aboutsummaryrefslogtreecommitdiff
path: root/clang
diff options
context:
space:
mode:
Diffstat (limited to 'clang')
-rw-r--r--clang/include/clang-c/Index.h29
-rw-r--r--clang/include/clang/AST/ASTConcept.h3
-rw-r--r--clang/include/clang/AST/ASTContext.h161
-rw-r--r--clang/include/clang/AST/ASTImporter.h18
-rw-r--r--clang/include/clang/AST/ASTImporterLookupTable.h16
-rw-r--r--clang/include/clang/AST/ASTNodeTraverser.h35
-rw-r--r--clang/include/clang/AST/Attr.h2
-rw-r--r--clang/include/clang/AST/CXXInheritance.h5
-rw-r--r--clang/include/clang/AST/CXXRecordDeclDefinitionBits.def4
-rw-r--r--clang/include/clang/AST/ComparisonCategories.h1
-rw-r--r--clang/include/clang/AST/ComputeDependence.h4
-rw-r--r--clang/include/clang/AST/Decl.h45
-rw-r--r--clang/include/clang/AST/DeclBase.h135
-rw-r--r--clang/include/clang/AST/DeclCXX.h424
-rw-r--r--clang/include/clang/AST/DeclContextInternals.h355
-rw-r--r--clang/include/clang/AST/DeclObjC.h4
-rw-r--r--clang/include/clang/AST/DeclTemplate.h7
-rw-r--r--clang/include/clang/AST/DeclarationName.h96
-rw-r--r--clang/include/clang/AST/DependenceFlags.h14
-rw-r--r--clang/include/clang/AST/EvaluatedExprVisitor.h19
-rw-r--r--clang/include/clang/AST/Expr.h110
-rw-r--r--clang/include/clang/AST/ExprCXX.h40
-rw-r--r--clang/include/clang/AST/ExprObjC.h16
-rw-r--r--clang/include/clang/AST/ExternalASTSource.h6
-rw-r--r--clang/include/clang/AST/IgnoreExpr.h14
-rw-r--r--clang/include/clang/AST/JSONNodeDumper.h5
-rw-r--r--clang/include/clang/AST/Mangle.h25
-rw-r--r--clang/include/clang/AST/MangleNumberingContext.h5
-rw-r--r--clang/include/clang/AST/OpenMPClause.h663
-rw-r--r--clang/include/clang/AST/OperationKinds.def3
-rw-r--r--clang/include/clang/AST/ParentMapContext.h3
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h168
-rw-r--r--clang/include/clang/AST/Redeclarable.h1
-rw-r--r--clang/include/clang/AST/Stmt.h8
-rw-r--r--clang/include/clang/AST/StmtDataCollectors.td2
-rw-r--r--clang/include/clang/AST/StmtIterator.h11
-rw-r--r--clang/include/clang/AST/StmtOpenMP.h1090
-rw-r--r--clang/include/clang/AST/TemplateBase.h6
-rw-r--r--clang/include/clang/AST/TextNodeDumper.h16
-rw-r--r--clang/include/clang/AST/Type.h19
-rw-r--r--clang/include/clang/AST/TypeProperties.td4
-rw-r--r--clang/include/clang/AST/VTableBuilder.h1
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchFinder.h11
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchers.h408
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchersInternal.h293
-rw-r--r--clang/include/clang/ASTMatchers/ASTMatchersMacros.h93
-rw-r--r--clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h6
-rw-r--r--clang/include/clang/ASTMatchers/Dynamic/Parser.h21
-rw-r--r--clang/include/clang/ASTMatchers/Dynamic/Registry.h25
-rw-r--r--clang/include/clang/ASTMatchers/Dynamic/VariantValue.h23
-rw-r--r--clang/include/clang/ASTMatchers/GtestMatchers.h46
-rw-r--r--clang/include/clang/Analysis/Analyses/CalledOnceCheck.h17
-rw-r--r--clang/include/clang/Analysis/AnalysisDeclContext.h2
-rw-r--r--clang/include/clang/Analysis/AnyCall.h10
-rw-r--r--clang/include/clang/Analysis/CFG.h18
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/DataflowValues.h3
-rw-r--r--clang/include/clang/Analysis/MacroExpansionContext.h127
-rw-r--r--clang/include/clang/Analysis/RetainSummaryManager.h8
-rw-r--r--clang/include/clang/Basic/ABI.h168
-rw-r--r--clang/include/clang/Basic/AddressSpaces.h7
-rw-r--r--clang/include/clang/Basic/Attr.td167
-rw-r--r--clang/include/clang/Basic/AttrDocs.td366
-rw-r--r--clang/include/clang/Basic/AttributeCommonInfo.h6
-rw-r--r--clang/include/clang/Basic/Builtins.def53
-rw-r--r--clang/include/clang/Basic/Builtins.h6
-rw-r--r--clang/include/clang/Basic/BuiltinsAArch64.def14
-rw-r--r--clang/include/clang/Basic/BuiltinsAMDGPU.def59
-rw-r--r--clang/include/clang/Basic/BuiltinsHexagon.def10
-rw-r--r--clang/include/clang/Basic/BuiltinsHexagonDep.def2702
-rw-r--r--clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def240
-rw-r--r--clang/include/clang/Basic/BuiltinsLe64.def18
-rw-r--r--clang/include/clang/Basic/BuiltinsNVPTX.def85
-rw-r--r--clang/include/clang/Basic/BuiltinsPPC.def317
-rw-r--r--clang/include/clang/Basic/BuiltinsRISCV.def63
-rw-r--r--clang/include/clang/Basic/BuiltinsSystemZ.def7
-rw-r--r--clang/include/clang/Basic/BuiltinsWebAssembly.def115
-rw-r--r--clang/include/clang/Basic/BuiltinsX86.def4
-rw-r--r--clang/include/clang/Basic/BuiltinsX86_64.def6
-rw-r--r--clang/include/clang/Basic/CodeGenOptions.def31
-rw-r--r--clang/include/clang/Basic/CodeGenOptions.h134
-rw-r--r--clang/include/clang/Basic/Cuda.h9
-rw-r--r--clang/include/clang/Basic/DarwinSDKInfo.h157
-rw-r--r--clang/include/clang/Basic/DebugInfoOptions.h1
-rw-r--r--clang/include/clang/Basic/DeclNodes.td5
-rw-r--r--clang/include/clang/Basic/Diagnostic.h39
-rw-r--r--clang/include/clang/Basic/DiagnosticASTKinds.td2
-rw-r--r--clang/include/clang/Basic/DiagnosticCategories.td1
-rw-r--r--clang/include/clang/Basic/DiagnosticCommonKinds.td24
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td49
-rw-r--r--clang/include/clang/Basic/DiagnosticFrontendKinds.td13
-rw-r--r--clang/include/clang/Basic/DiagnosticGroups.td118
-rw-r--r--clang/include/clang/Basic/DiagnosticIDs.h2
-rw-r--r--clang/include/clang/Basic/DiagnosticLexKinds.td9
-rw-r--r--clang/include/clang/Basic/DiagnosticOptions.def1
-rw-r--r--clang/include/clang/Basic/DiagnosticParseKinds.td81
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td337
-rw-r--r--clang/include/clang/Basic/DiagnosticSerializationKinds.td3
-rw-r--r--clang/include/clang/Basic/DirectoryEntry.h3
-rw-r--r--clang/include/clang/Basic/Features.def7
-rw-r--r--clang/include/clang/Basic/IdentifierTable.h19
-rw-r--r--clang/include/clang/Basic/LLVM.h3
-rw-r--r--clang/include/clang/Basic/LangOptions.def27
-rw-r--r--clang/include/clang/Basic/LangOptions.h67
-rw-r--r--clang/include/clang/Basic/LangStandard.h1
-rw-r--r--clang/include/clang/Basic/LangStandards.def4
-rw-r--r--clang/include/clang/Basic/Module.h26
-rw-r--r--clang/include/clang/Basic/NoSanitizeList.h50
-rw-r--r--clang/include/clang/Basic/OpenCLExtensions.def84
-rw-r--r--clang/include/clang/Basic/OpenCLImageTypes.def2
-rw-r--r--clang/include/clang/Basic/OpenCLOptions.h87
-rw-r--r--clang/include/clang/Basic/OpenMPKinds.h5
-rw-r--r--clang/include/clang/Basic/RISCVVTypes.def147
-rw-r--r--clang/include/clang/Basic/SanitizerBlacklist.h49
-rw-r--r--clang/include/clang/Basic/SanitizerSpecialCaseList.h2
-rw-r--r--clang/include/clang/Basic/Sanitizers.h23
-rw-r--r--clang/include/clang/Basic/SourceLocation.h48
-rw-r--r--clang/include/clang/Basic/SourceManager.h102
-rw-r--r--clang/include/clang/Basic/Specifiers.h13
-rw-r--r--clang/include/clang/Basic/StmtNodes.td10
-rw-r--r--clang/include/clang/Basic/TargetBuiltins.h29
-rw-r--r--clang/include/clang/Basic/TargetCXXABI.def129
-rw-r--r--clang/include/clang/Basic/TargetCXXABI.h200
-rw-r--r--clang/include/clang/Basic/TargetInfo.h115
-rw-r--r--clang/include/clang/Basic/Thunk.h188
-rw-r--r--clang/include/clang/Basic/TokenKinds.def20
-rw-r--r--clang/include/clang/Basic/XRayInstr.h5
-rw-r--r--clang/include/clang/Basic/arm_mve.td19
-rw-r--r--clang/include/clang/Basic/arm_neon.td57
-rw-r--r--clang/include/clang/Basic/arm_neon_incl.td1
-rw-r--r--clang/include/clang/Basic/arm_sve.td18
-rw-r--r--clang/include/clang/Basic/riscv_vector.td2112
-rw-r--r--clang/include/clang/CodeGen/BackendUtil.h3
-rw-r--r--clang/include/clang/CodeGen/CGFunctionInfo.h43
-rw-r--r--clang/include/clang/CodeGen/CodeGenAction.h3
-rw-r--r--clang/include/clang/CodeGen/SwiftCallingConv.h3
-rw-r--r--clang/include/clang/CrossTU/CrossTranslationUnit.h32
-rw-r--r--clang/include/clang/Driver/Action.h8
-rw-r--r--clang/include/clang/Driver/ClangOptionDocs.td2
-rw-r--r--clang/include/clang/Driver/DarwinSDKInfo.h41
-rw-r--r--clang/include/clang/Driver/Distro.h3
-rw-r--r--clang/include/clang/Driver/Driver.h46
-rw-r--r--clang/include/clang/Driver/InputInfo.h (renamed from clang/lib/Driver/InputInfo.h)4
-rw-r--r--clang/include/clang/Driver/Job.h29
-rw-r--r--clang/include/clang/Driver/Options.h2
-rw-r--r--clang/include/clang/Driver/Options.td1262
-rw-r--r--clang/include/clang/Driver/SanitizerArgs.h15
-rw-r--r--clang/include/clang/Driver/ToolChain.h45
-rw-r--r--clang/include/clang/Driver/Types.def1
-rw-r--r--clang/include/clang/Driver/Types.h11
-rw-r--r--clang/include/clang/Format/Format.h442
-rw-r--r--clang/include/clang/Frontend/CommandLineSourceLoc.h7
-rw-r--r--clang/include/clang/Frontend/CompilerInstance.h25
-rw-r--r--clang/include/clang/Frontend/CompilerInvocation.h148
-rw-r--r--clang/include/clang/Frontend/DependencyOutputOptions.h20
-rw-r--r--clang/include/clang/Frontend/FrontendAction.h5
-rw-r--r--clang/include/clang/Frontend/FrontendActions.h11
-rw-r--r--clang/include/clang/Frontend/FrontendOptions.h9
-rw-r--r--clang/include/clang/Frontend/PreprocessorOutputOptions.h2
-rw-r--r--clang/include/clang/Frontend/TextDiagnostic.h3
-rw-r--r--clang/include/clang/Index/CommentToXML.h1
-rw-r--r--clang/include/clang/Index/DeclOccurrence.h13
-rw-r--r--clang/include/clang/Index/IndexSymbol.h1
-rw-r--r--clang/include/clang/Index/IndexingOptions.h1
-rw-r--r--clang/include/clang/Interpreter/Interpreter.h71
-rw-r--r--clang/include/clang/Interpreter/PartialTranslationUnit.h37
-rw-r--r--clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h2
-rw-r--r--clang/include/clang/Lex/HeaderMap.h10
-rw-r--r--clang/include/clang/Lex/LiteralSupport.h1
-rw-r--r--clang/include/clang/Lex/MacroInfo.h2
-rw-r--r--clang/include/clang/Lex/ModuleLoader.h3
-rw-r--r--clang/include/clang/Lex/ModuleMap.h15
-rw-r--r--clang/include/clang/Lex/PPCallbacks.h62
-rw-r--r--clang/include/clang/Lex/PPConditionalDirectiveRecord.h8
-rw-r--r--clang/include/clang/Lex/PreprocessingRecord.h7
-rw-r--r--clang/include/clang/Lex/Preprocessor.h19
-rw-r--r--clang/include/clang/Lex/PreprocessorOptions.h4
-rw-r--r--clang/include/clang/Lex/Token.h4
-rw-r--r--clang/include/clang/Lex/VariadicMacroSupport.h10
-rw-r--r--clang/include/clang/Parse/Parser.h228
-rw-r--r--clang/include/clang/Rewrite/Core/RewriteRope.h9
-rw-r--r--clang/include/clang/Sema/AnalysisBasedWarnings.h8
-rw-r--r--clang/include/clang/Sema/DeclSpec.h6
-rw-r--r--clang/include/clang/Sema/ExternalSemaSource.h4
-rw-r--r--clang/include/clang/Sema/Initialization.h32
-rw-r--r--clang/include/clang/Sema/MultiplexExternalSemaSource.h2
-rw-r--r--clang/include/clang/Sema/Overload.h9
-rw-r--r--clang/include/clang/Sema/ParsedAttr.h75
-rw-r--r--clang/include/clang/Sema/Scope.h19
-rw-r--r--clang/include/clang/Sema/ScopeInfo.h22
-rw-r--r--clang/include/clang/Sema/Sema.h530
-rw-r--r--clang/include/clang/Sema/Template.h2
-rw-r--r--clang/include/clang/Serialization/ASTBitCodes.h3269
-rw-r--r--clang/include/clang/Serialization/ASTReader.h56
-rw-r--r--clang/include/clang/Serialization/ASTWriter.h14
-rw-r--r--clang/include/clang/Serialization/ModuleFile.h7
-rw-r--r--clang/include/clang/Serialization/ModuleFileExtension.h9
-rw-r--r--clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h4
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/Analyses.def7
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h69
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h306
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h5
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h14
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h41
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h9
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h59
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h53
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h1
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h3
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h3
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h325
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h2
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h42
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h22
-rw-r--r--clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h9
-rw-r--r--clang/include/clang/Tooling/ArgumentsAdjusters.h4
-rw-r--r--clang/include/clang/Tooling/CommonOptionsParser.h26
-rw-r--r--clang/include/clang/Tooling/CompilationDatabase.h6
-rw-r--r--clang/include/clang/Tooling/Core/Diagnostic.h33
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h82
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h67
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h8
-rw-r--r--clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h145
-rw-r--r--clang/include/clang/Tooling/DiagnosticsYaml.h9
-rw-r--r--clang/include/clang/Tooling/NodeIntrospection.h101
-rw-r--r--clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h11
-rw-r--r--clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h1
-rw-r--r--clang/include/clang/Tooling/Syntax/Tokens.h8
-rw-r--r--clang/include/clang/Tooling/Tooling.h8
-rw-r--r--clang/include/clang/Tooling/Transformer/Parsing.h1
-rw-r--r--clang/include/clang/Tooling/Transformer/RangeSelector.h6
-rw-r--r--clang/include/clang/module.modulemap4
-rw-r--r--clang/lib/APINotes/APINotesYAMLCompiler.cpp6
-rw-r--r--clang/lib/ARCMigrate/FileRemapper.cpp11
-rw-r--r--clang/lib/ARCMigrate/ObjCMT.cpp22
-rw-r--r--clang/lib/ARCMigrate/PlistReporter.cpp2
-rw-r--r--clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp2
-rw-r--r--clang/lib/AST/ASTContext.cpp458
-rw-r--r--clang/lib/AST/ASTDiagnostic.cpp5
-rw-r--r--clang/lib/AST/ASTImporter.cpp277
-rw-r--r--clang/lib/AST/ASTImporterLookupTable.cpp17
-rw-r--r--clang/lib/AST/ASTStructuralEquivalence.cpp9
-rw-r--r--clang/lib/AST/ASTTypeTraits.cpp4
-rw-r--r--clang/lib/AST/AttrImpl.cpp51
-rw-r--r--clang/lib/AST/CXXABI.h5
-rw-r--r--clang/lib/AST/CXXInheritance.cpp13
-rw-r--r--clang/lib/AST/ComputeDependence.cpp8
-rw-r--r--clang/lib/AST/Decl.cpp69
-rw-r--r--clang/lib/AST/DeclBase.cpp92
-rw-r--r--clang/lib/AST/DeclCXX.cpp147
-rw-r--r--clang/lib/AST/DeclObjC.cpp8
-rw-r--r--clang/lib/AST/DeclPrinter.cpp103
-rw-r--r--clang/lib/AST/DeclTemplate.cpp55
-rw-r--r--clang/lib/AST/DeclarationName.cpp27
-rw-r--r--clang/lib/AST/Expr.cpp205
-rw-r--r--clang/lib/AST/ExprCXX.cpp27
-rw-r--r--clang/lib/AST/ExprClassification.cpp19
-rw-r--r--clang/lib/AST/ExprConcepts.cpp6
-rw-r--r--clang/lib/AST/ExprConstant.cpp219
-rw-r--r--clang/lib/AST/ExprObjC.cpp6
-rw-r--r--clang/lib/AST/ExternalASTMerger.cpp28
-rw-r--r--clang/lib/AST/Interp/Context.h1
-rw-r--r--clang/lib/AST/Interp/Interp.h3
-rw-r--r--clang/lib/AST/ItaniumCXXABI.cpp6
-rw-r--r--clang/lib/AST/ItaniumMangle.cpp612
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp71
-rw-r--r--clang/lib/AST/Mangle.cpp24
-rw-r--r--clang/lib/AST/MicrosoftCXXABI.cpp33
-rw-r--r--clang/lib/AST/MicrosoftMangle.cpp49
-rw-r--r--clang/lib/AST/NSAPI.cpp2
-rw-r--r--clang/lib/AST/NestedNameSpecifier.cpp24
-rw-r--r--clang/lib/AST/OpenMPClause.cpp178
-rw-r--r--clang/lib/AST/ParentMapContext.cpp134
-rw-r--r--clang/lib/AST/PrintfFormatString.cpp2
-rw-r--r--clang/lib/AST/QualTypeNames.cpp16
-rw-r--r--clang/lib/AST/RecordLayoutBuilder.cpp64
-rw-r--r--clang/lib/AST/Stmt.cpp25
-rw-r--r--clang/lib/AST/StmtOpenMP.cpp212
-rw-r--r--clang/lib/AST/StmtPrinter.cpp134
-rw-r--r--clang/lib/AST/StmtProfile.cpp81
-rw-r--r--clang/lib/AST/TemplateBase.cpp116
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp63
-rw-r--r--clang/lib/AST/Type.cpp27
-rw-r--r--clang/lib/AST/TypeLoc.cpp2
-rw-r--r--clang/lib/AST/TypePrinter.cpp58
-rw-r--r--clang/lib/AST/VTableBuilder.cpp2
-rw-r--r--clang/lib/ASTMatchers/ASTMatchFinder.cpp72
-rw-r--r--clang/lib/ASTMatchers/ASTMatchersInternal.cpp13
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp8
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.cpp4
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Marshallers.h108
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Parser.cpp262
-rw-r--r--clang/lib/ASTMatchers/Dynamic/Registry.cpp154
-rw-r--r--clang/lib/ASTMatchers/Dynamic/VariantValue.cpp22
-rw-r--r--clang/lib/ASTMatchers/GtestMatchers.cpp236
-rw-r--r--clang/lib/Analysis/AnalysisDeclContext.cpp55
-rw-r--r--clang/lib/Analysis/BodyFarm.cpp101
-rw-r--r--clang/lib/Analysis/CFG.cpp13
-rw-r--r--clang/lib/Analysis/CalledOnceCheck.cpp242
-rw-r--r--clang/lib/Analysis/LiveVariables.cpp30
-rw-r--r--clang/lib/Analysis/MacroExpansionContext.cpp231
-rw-r--r--clang/lib/Analysis/PathDiagnostic.cpp2
-rw-r--r--clang/lib/Analysis/RetainSummaryManager.cpp34
-rw-r--r--clang/lib/Analysis/ThreadSafety.cpp235
-rw-r--r--clang/lib/Analysis/ThreadSafetyCommon.cpp3
-rw-r--r--clang/lib/Basic/Attributes.cpp8
-rw-r--r--clang/lib/Basic/Builtins.cpp16
-rw-r--r--clang/lib/Basic/CodeGenOptions.cpp8
-rw-r--r--clang/lib/Basic/Cuda.cpp22
-rw-r--r--clang/lib/Basic/DarwinSDKInfo.cpp131
-rw-r--r--clang/lib/Basic/DiagnosticIDs.cpp26
-rw-r--r--clang/lib/Basic/FileManager.cpp15
-rw-r--r--clang/lib/Basic/IdentifierTable.cpp43
-rw-r--r--clang/lib/Basic/LangOptions.cpp2
-rw-r--r--clang/lib/Basic/Module.cpp21
-rw-r--r--clang/lib/Basic/NoSanitizeList.cpp54
-rw-r--r--clang/lib/Basic/OpenCLOptions.cpp108
-rw-r--r--clang/lib/Basic/OpenMPKinds.cpp18
-rw-r--r--clang/lib/Basic/ProfileList.cpp1
-rw-r--r--clang/lib/Basic/SanitizerBlacklist.cpp59
-rw-r--r--clang/lib/Basic/Sanitizers.cpp61
-rw-r--r--clang/lib/Basic/SourceLocation.cpp2
-rw-r--r--clang/lib/Basic/SourceManager.cpp115
-rw-r--r--clang/lib/Basic/TargetInfo.cpp40
-rw-r--r--clang/lib/Basic/Targets.cpp62
-rw-r--r--clang/lib/Basic/Targets/AArch64.cpp52
-rw-r--r--clang/lib/Basic/Targets/AArch64.h5
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.cpp31
-rw-r--r--clang/lib/Basic/Targets/AMDGPU.h5
-rw-r--r--clang/lib/Basic/Targets/ARM.cpp30
-rw-r--r--clang/lib/Basic/Targets/ARM.h2
-rw-r--r--clang/lib/Basic/Targets/AVR.cpp1
-rw-r--r--clang/lib/Basic/Targets/AVR.h1
-rw-r--r--clang/lib/Basic/Targets/BPF.cpp11
-rw-r--r--clang/lib/Basic/Targets/BPF.h20
-rw-r--r--clang/lib/Basic/Targets/Hexagon.cpp39
-rw-r--r--clang/lib/Basic/Targets/Le64.cpp9
-rw-r--r--clang/lib/Basic/Targets/Le64.h1
-rw-r--r--clang/lib/Basic/Targets/M68k.cpp236
-rw-r--r--clang/lib/Basic/Targets/M68k.h59
-rw-r--r--clang/lib/Basic/Targets/NVPTX.cpp8
-rw-r--r--clang/lib/Basic/Targets/NVPTX.h8
-rw-r--r--clang/lib/Basic/Targets/OSTargets.cpp2
-rw-r--r--clang/lib/Basic/Targets/OSTargets.h40
-rw-r--r--clang/lib/Basic/Targets/PPC.cpp247
-rw-r--r--clang/lib/Basic/Targets/PPC.h16
-rw-r--r--clang/lib/Basic/Targets/RISCV.cpp64
-rw-r--r--clang/lib/Basic/Targets/RISCV.h14
-rw-r--r--clang/lib/Basic/Targets/SPIR.h55
-rw-r--r--clang/lib/Basic/Targets/SystemZ.cpp6
-rw-r--r--clang/lib/Basic/Targets/SystemZ.h4
-rw-r--r--clang/lib/Basic/Targets/TCE.h5
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.cpp29
-rw-r--r--clang/lib/Basic/Targets/WebAssembly.h15
-rw-r--r--clang/lib/Basic/Targets/X86.cpp37
-rw-r--r--clang/lib/Basic/Targets/X86.h39
-rw-r--r--clang/lib/Basic/Targets/XCore.cpp1
-rw-r--r--clang/lib/Basic/XRayInstr.cpp27
-rw-r--r--clang/lib/CodeGen/BackendUtil.cpp324
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp117
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp72
-rw-r--r--clang/lib/CodeGen/CGBuilder.h49
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp1822
-rw-r--r--clang/lib/CodeGen/CGCUDANV.cpp283
-rw-r--r--clang/lib/CodeGen/CGCUDARuntime.h32
-rw-r--r--clang/lib/CodeGen/CGCXX.cpp8
-rw-r--r--clang/lib/CodeGen/CGCall.cpp285
-rw-r--r--clang/lib/CodeGen/CGClass.cpp46
-rw-r--r--clang/lib/CodeGen/CGCleanup.cpp106
-rw-r--r--clang/lib/CodeGen/CGCoroutine.cpp13
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.cpp307
-rw-r--r--clang/lib/CodeGen/CGDebugInfo.h28
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp115
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp151
-rw-r--r--clang/lib/CodeGen/CGException.cpp153
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp121
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp44
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp22
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp1
-rw-r--r--clang/lib/CodeGen/CGExprConstant.cpp1
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp188
-rw-r--r--clang/lib/CodeGen/CGLoopInfo.cpp11
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp14
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp167
-rw-r--r--clang/lib/CodeGen/CGObjCGNU.cpp48
-rw-r--r--clang/lib/CodeGen/CGObjCMac.cpp27
-rw-r--r--clang/lib/CodeGen/CGObjCRuntime.cpp2
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp1473
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.h69
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp1273
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.h70
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp246
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp925
-rw-r--r--clang/lib/CodeGen/CGVTables.cpp22
-rw-r--r--clang/lib/CodeGen/CodeGenAction.cpp215
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp258
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.h149
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp654
-rw-r--r--clang/lib/CodeGen/CodeGenModule.h100
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.cpp8
-rw-r--r--clang/lib/CodeGen/CodeGenPGO.h4
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp11
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.cpp312
-rw-r--r--clang/lib/CodeGen/CoverageMappingGen.h3
-rw-r--r--clang/lib/CodeGen/EHScopeStack.h14
-rw-r--r--clang/lib/CodeGen/ItaniumCXXABI.cpp337
-rw-r--r--clang/lib/CodeGen/MicrosoftCXXABI.cpp81
-rw-r--r--clang/lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp12
-rw-r--r--clang/lib/CodeGen/SanitizerMetadata.cpp10
-rw-r--r--clang/lib/CodeGen/TargetInfo.cpp293
-rw-r--r--clang/lib/CodeGen/TargetInfo.h11
-rw-r--r--clang/lib/CodeGen/VarBypassDetector.cpp2
-rw-r--r--clang/lib/CodeGen/VarBypassDetector.h2
-rw-r--r--clang/lib/CrossTU/CrossTranslationUnit.cpp48
-rw-r--r--clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp1
-rw-r--r--clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp279
-rw-r--r--clang/lib/Driver/Action.cpp4
-rw-r--r--clang/lib/Driver/Compilation.cpp7
-rw-r--r--clang/lib/Driver/DarwinSDKInfo.cpp43
-rw-r--r--clang/lib/Driver/Distro.cpp11
-rw-r--r--clang/lib/Driver/Driver.cpp431
-rw-r--r--clang/lib/Driver/Job.cpp59
-rw-r--r--clang/lib/Driver/SanitizerArgs.cpp151
-rw-r--r--clang/lib/Driver/Tool.cpp2
-rw-r--r--clang/lib/Driver/ToolChain.cpp301
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/AIX.h11
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.cpp383
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPU.h29
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp304
-rw-r--r--clang/lib/Driver/ToolChains/AMDGPUOpenMP.h106
-rw-r--r--clang/lib/Driver/ToolChains/AVR.cpp519
-rw-r--r--clang/lib/Driver/ToolChains/AVR.h5
-rw-r--r--clang/lib/Driver/ToolChains/Ananas.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.cpp51
-rw-r--r--clang/lib/Driver/ToolChains/Arch/AArch64.h3
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.cpp304
-rw-r--r--clang/lib/Driver/ToolChains/Arch/ARM.h4
-rw-r--r--clang/lib/Driver/ToolChains/Arch/M68k.cpp125
-rw-r--r--clang/lib/Driver/ToolChains/Arch/M68k.h42
-rw-r--r--clang/lib/Driver/ToolChains/Arch/RISCV.cpp28
-rw-r--r--clang/lib/Driver/ToolChains/Arch/X86.cpp21
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.cpp14
-rw-r--r--clang/lib/Driver/ToolChains/BareMetal.h9
-rw-r--r--clang/lib/Driver/ToolChains/Clang.cpp872
-rw-r--r--clang/lib/Driver/ToolChains/Clang.h4
-rw-r--r--clang/lib/Driver/ToolChains/CloudABI.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.cpp235
-rw-r--r--clang/lib/Driver/ToolChains/CommonArgs.h17
-rw-r--r--clang/lib/Driver/ToolChains/Cuda.cpp102
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.cpp263
-rw-r--r--clang/lib/Driver/ToolChains/Darwin.h45
-rw-r--r--clang/lib/Driver/ToolChains/Flang.cpp37
-rw-r--r--clang/lib/Driver/ToolChains/Flang.h15
-rw-r--r--clang/lib/Driver/ToolChains/FreeBSD.cpp30
-rw-r--r--clang/lib/Driver/ToolChains/Fuchsia.cpp75
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.cpp307
-rw-r--r--clang/lib/Driver/ToolChains/Gnu.h18
-rw-r--r--clang/lib/Driver/ToolChains/HIP.cpp240
-rw-r--r--clang/lib/Driver/ToolChains/HIP.h12
-rw-r--r--clang/lib/Driver/ToolChains/Haiku.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/Hexagon.cpp22
-rw-r--r--clang/lib/Driver/ToolChains/Hurd.cpp27
-rw-r--r--clang/lib/Driver/ToolChains/Hurd.h3
-rw-r--r--clang/lib/Driver/ToolChains/InterfaceStubs.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/Linux.cpp357
-rw-r--r--clang/lib/Driver/ToolChains/Linux.h11
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/MSP430.h2
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.cpp476
-rw-r--r--clang/lib/Driver/ToolChains/MSVC.h33
-rw-r--r--clang/lib/Driver/ToolChains/MinGW.cpp5
-rw-r--r--clang/lib/Driver/ToolChains/Minix.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/Myriad.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/NaCl.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/NetBSD.cpp4
-rw-r--r--clang/lib/Driver/ToolChains/OpenBSD.cpp1
-rw-r--r--clang/lib/Driver/ToolChains/RISCVToolchain.cpp10
-rw-r--r--clang/lib/Driver/ToolChains/ROCm.h48
-rw-r--r--clang/lib/Driver/ToolChains/Solaris.cpp8
-rw-r--r--clang/lib/Driver/ToolChains/WebAssembly.cpp56
-rw-r--r--clang/lib/Driver/ToolChains/WebAssembly.h4
-rw-r--r--clang/lib/Driver/Types.cpp49
-rw-r--r--clang/lib/Edit/RewriteObjCFoundationAPI.cpp1
-rw-r--r--clang/lib/Format/BreakableToken.cpp114
-rw-r--r--clang/lib/Format/BreakableToken.h26
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp13
-rw-r--r--clang/lib/Format/Format.cpp298
-rw-r--r--clang/lib/Format/FormatToken.h24
-rw-r--r--clang/lib/Format/FormatTokenLexer.cpp117
-rw-r--r--clang/lib/Format/FormatTokenLexer.h3
-rw-r--r--clang/lib/Format/NamespaceEndCommentsFixer.cpp15
-rw-r--r--clang/lib/Format/SortJavaScriptImports.cpp190
-rw-r--r--[-rwxr-xr-x]clang/lib/Format/TokenAnnotator.cpp453
-rw-r--r--clang/lib/Format/TokenAnnotator.h14
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.cpp99
-rw-r--r--clang/lib/Format/UnwrappedLineFormatter.h1
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp221
-rw-r--r--clang/lib/Format/UnwrappedLineParser.h15
-rw-r--r--clang/lib/Format/UsingDeclarationsSorter.cpp4
-rw-r--r--clang/lib/Format/WhitespaceManager.cpp399
-rw-r--r--clang/lib/Format/WhitespaceManager.h107
-rw-r--r--clang/lib/Frontend/ASTUnit.cpp35
-rw-r--r--clang/lib/Frontend/CompilerInstance.cpp247
-rw-r--r--clang/lib/Frontend/CompilerInvocation.cpp2479
-rw-r--r--clang/lib/Frontend/CreateInvocationFromCommandLine.cpp11
-rw-r--r--clang/lib/Frontend/DependencyFile.cpp17
-rw-r--r--clang/lib/Frontend/DependencyGraph.cpp2
-rw-r--r--clang/lib/Frontend/DiagnosticRenderer.cpp7
-rw-r--r--clang/lib/Frontend/FrontendAction.cpp17
-rw-r--r--clang/lib/Frontend/FrontendActions.cpp85
-rw-r--r--clang/lib/Frontend/FrontendOptions.cpp1
-rw-r--r--clang/lib/Frontend/HeaderIncludeGen.cpp20
-rw-r--r--clang/lib/Frontend/InitHeaderSearch.cpp44
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp63
-rw-r--r--clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp12
-rw-r--r--clang/lib/Frontend/ModuleDependencyCollector.cpp2
-rw-r--r--clang/lib/Frontend/PrecompiledPreamble.cpp10
-rw-r--r--clang/lib/Frontend/PrintPreprocessedOutput.cpp349
-rw-r--r--clang/lib/Frontend/Rewrite/FrontendActions.cpp6
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp99
-rw-r--r--clang/lib/Frontend/Rewrite/RewriteObjC.cpp96
-rw-r--r--clang/lib/Frontend/TestModuleFileExtension.cpp10
-rw-r--r--clang/lib/Frontend/TestModuleFileExtension.h18
-rw-r--r--clang/lib/Frontend/TextDiagnostic.cpp25
-rw-r--r--clang/lib/Frontend/TextDiagnosticPrinter.cpp3
-rw-r--r--clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp2
-rw-r--r--clang/lib/Headers/__clang_cuda_math.h4
-rw-r--r--clang/lib/Headers/__clang_cuda_runtime_wrapper.h7
-rw-r--r--clang/lib/Headers/__clang_hip_cmath.h238
-rw-r--r--clang/lib/Headers/__clang_hip_libdevice_declares.h36
-rw-r--r--clang/lib/Headers/__clang_hip_math.h6
-rw-r--r--clang/lib/Headers/__clang_hip_runtime_wrapper.h87
-rw-r--r--clang/lib/Headers/altivec.h1584
-rw-r--r--clang/lib/Headers/amxintrin.h253
-rw-r--r--clang/lib/Headers/arm_acle.h55
-rw-r--r--clang/lib/Headers/avx512fintrin.h226
-rw-r--r--clang/lib/Headers/builtins.h16
-rw-r--r--clang/lib/Headers/cuda_wrappers/complex8
-rw-r--r--clang/lib/Headers/hexagon_circ_brev_intrinsics.h298
-rw-r--r--clang/lib/Headers/hexagon_protos.h8450
-rw-r--r--clang/lib/Headers/hexagon_types.h2653
-rw-r--r--clang/lib/Headers/hvx_hexagon_protos.h4392
-rw-r--r--clang/lib/Headers/immintrin.h10
-rw-r--r--clang/lib/Headers/intrin.h49
-rw-r--r--clang/lib/Headers/keylockerintrin.h30
-rw-r--r--clang/lib/Headers/opencl-c-base.h147
-rw-r--r--clang/lib/Headers/opencl-c.h860
-rw-r--r--clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h42
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex6
-rw-r--r--clang/lib/Headers/openmp_wrappers/complex.h1
-rw-r--r--clang/lib/Headers/openmp_wrappers/new26
-rw-r--r--clang/lib/Headers/ppc_wrappers/xmmintrin.h9
-rw-r--r--clang/lib/Headers/uintrintrin.h7
-rw-r--r--clang/lib/Headers/vaesintrin.h41
-rw-r--r--clang/lib/Headers/vecintrin.h283
-rw-r--r--clang/lib/Headers/vpclmulqdqintrin.h2
-rw-r--r--clang/lib/Headers/wasm_simd128.h1045
-rw-r--r--clang/lib/Index/FileIndexRecord.cpp71
-rw-r--r--clang/lib/Index/FileIndexRecord.h23
-rw-r--r--clang/lib/Index/IndexBody.cpp12
-rw-r--r--clang/lib/Index/IndexDecl.cpp2
-rw-r--r--clang/lib/Index/IndexSymbol.cpp7
-rw-r--r--clang/lib/Index/IndexingAction.cpp111
-rw-r--r--clang/lib/Index/IndexingContext.cpp35
-rw-r--r--clang/lib/Index/IndexingContext.h2
-rw-r--r--clang/lib/Index/USRGeneration.cpp7
-rw-r--r--clang/lib/Interpreter/IncrementalExecutor.cpp63
-rw-r--r--clang/lib/Interpreter/IncrementalExecutor.h48
-rw-r--r--clang/lib/Interpreter/IncrementalParser.cpp292
-rw-r--r--clang/lib/Interpreter/IncrementalParser.h77
-rw-r--r--clang/lib/Interpreter/Interpreter.cpp225
-rw-r--r--clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp6
-rw-r--r--clang/lib/Lex/HeaderMap.cpp31
-rw-r--r--clang/lib/Lex/HeaderSearch.cpp19
-rw-r--r--clang/lib/Lex/Lexer.cpp118
-rw-r--r--clang/lib/Lex/LiteralSupport.cpp113
-rw-r--r--clang/lib/Lex/ModuleMap.cpp58
-rw-r--r--clang/lib/Lex/PPCaching.cpp2
-rw-r--r--clang/lib/Lex/PPConditionalDirectiveRecord.cpp22
-rw-r--r--clang/lib/Lex/PPDirectives.cpp215
-rw-r--r--clang/lib/Lex/PPExpressions.cpp16
-rw-r--r--clang/lib/Lex/PPMacroExpansion.cpp15
-rw-r--r--clang/lib/Lex/Pragma.cpp62
-rw-r--r--clang/lib/Lex/PreprocessingRecord.cpp17
-rw-r--r--clang/lib/Lex/Preprocessor.cpp20
-rw-r--r--clang/lib/Lex/TokenLexer.cpp21
-rw-r--r--clang/lib/Parse/ParseCXXInlineMethods.cpp1
-rw-r--r--clang/lib/Parse/ParseDecl.cpp180
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp428
-rw-r--r--clang/lib/Parse/ParseExpr.cpp51
-rw-r--r--clang/lib/Parse/ParseExprCXX.cpp328
-rw-r--r--clang/lib/Parse/ParseInit.cpp28
-rw-r--r--clang/lib/Parse/ParseObjc.cpp94
-rw-r--r--clang/lib/Parse/ParseOpenMP.cpp425
-rw-r--r--clang/lib/Parse/ParsePragma.cpp30
-rw-r--r--clang/lib/Parse/ParseStmt.cpp80
-rw-r--r--clang/lib/Parse/ParseStmtAsm.cpp17
-rw-r--r--clang/lib/Parse/ParseTentative.cpp2
-rw-r--r--clang/lib/Parse/Parser.cpp37
-rw-r--r--clang/lib/Rewrite/DeltaTree.cpp5
-rw-r--r--clang/lib/Sema/AnalysisBasedWarnings.cpp158
-rw-r--r--clang/lib/Sema/CodeCompleteConsumer.cpp2
-rw-r--r--clang/lib/Sema/DeclSpec.cpp5
-rw-r--r--clang/lib/Sema/JumpDiagnostics.cpp41
-rw-r--r--clang/lib/Sema/MultiplexExternalSemaSource.cpp3
-rw-r--r--clang/lib/Sema/OpenCLBuiltins.td560
-rw-r--r--clang/lib/Sema/ParsedAttr.cpp40
-rw-r--r--clang/lib/Sema/Sema.cpp367
-rw-r--r--clang/lib/Sema/SemaAccess.cpp35
-rw-r--r--clang/lib/Sema/SemaAttr.cpp94
-rw-r--r--clang/lib/Sema/SemaAvailability.cpp33
-rw-r--r--clang/lib/Sema/SemaCUDA.cpp156
-rw-r--r--clang/lib/Sema/SemaCXXScopeSpec.cpp40
-rw-r--r--clang/lib/Sema/SemaCast.cpp191
-rw-r--r--clang/lib/Sema/SemaChecking.cpp839
-rw-r--r--clang/lib/Sema/SemaCodeComplete.cpp216
-rw-r--r--clang/lib/Sema/SemaConcept.cpp62
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp59
-rw-r--r--clang/lib/Sema/SemaDecl.cpp742
-rw-r--r--clang/lib/Sema/SemaDeclAttr.cpp842
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp774
-rw-r--r--clang/lib/Sema/SemaDeclObjC.cpp15
-rw-r--r--clang/lib/Sema/SemaExceptionSpec.cpp18
-rw-r--r--clang/lib/Sema/SemaExpr.cpp873
-rw-r--r--clang/lib/Sema/SemaExprCXX.cpp403
-rw-r--r--clang/lib/Sema/SemaExprMember.cpp28
-rw-r--r--clang/lib/Sema/SemaExprObjC.cpp30
-rw-r--r--clang/lib/Sema/SemaFixItUtils.cpp4
-rw-r--r--clang/lib/Sema/SemaInit.cpp246
-rw-r--r--clang/lib/Sema/SemaLambda.cpp33
-rw-r--r--clang/lib/Sema/SemaLookup.cpp194
-rw-r--r--clang/lib/Sema/SemaObjCProperty.cpp44
-rw-r--r--clang/lib/Sema/SemaOpenMP.cpp2345
-rw-r--r--clang/lib/Sema/SemaOverload.cpp168
-rw-r--r--clang/lib/Sema/SemaPseudoObject.cpp6
-rw-r--r--clang/lib/Sema/SemaSYCL.cpp33
-rw-r--r--clang/lib/Sema/SemaStmt.cpp797
-rw-r--r--clang/lib/Sema/SemaStmtAsm.cpp21
-rw-r--r--clang/lib/Sema/SemaStmtAttr.cpp135
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp82
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp34
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp17
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp346
-rw-r--r--clang/lib/Sema/SemaType.cpp295
-rw-r--r--clang/lib/Sema/TreeTransform.h528
-rw-r--r--clang/lib/Serialization/ASTCommon.cpp7
-rw-r--r--clang/lib/Serialization/ASTReader.cpp340
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp39
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp88
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp217
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp22
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp56
-rw-r--r--clang/lib/Serialization/ModuleFileExtension.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp10
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp38
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp136
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp80
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/Iterator.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp90
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp53
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp5
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp7
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp15
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp172
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h13
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtr.h4
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp376
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp202
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp111
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp20
-rw-r--r--clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp62
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporter.cpp57
-rw-r--r--clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp1130
-rw-r--r--clang/lib/StaticAnalyzer/Core/CallEvent.cpp43
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerContext.cpp4
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp34
-rw-r--r--clang/lib/StaticAnalyzer/Core/CheckerManager.cpp29
-rw-r--r--clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp2
-rw-r--r--clang/lib/StaticAnalyzer/Core/CoreEngine.cpp6
-rw-r--r--clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp (renamed from clang/lib/StaticAnalyzer/Core/DynamicSize.cpp)51
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngine.cpp13
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp41
-rw-r--r--clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp31
-rw-r--r--clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp53
-rw-r--r--clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp77
-rw-r--r--clang/lib/StaticAnalyzer/Core/MemRegion.cpp12
-rw-r--r--clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp644
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp1377
-rw-r--r--clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp22
-rw-r--r--clang/lib/StaticAnalyzer/Core/RegionStore.cpp9
-rw-r--r--clang/lib/StaticAnalyzer/Core/SValBuilder.cpp532
-rw-r--r--clang/lib/StaticAnalyzer/Core/SVals.cpp59
-rw-r--r--clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp8
-rw-r--r--clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp159
-rw-r--r--clang/lib/StaticAnalyzer/Core/Store.cpp49
-rw-r--r--clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp11
-rw-r--r--clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp109
-rw-r--r--clang/lib/Tooling/ArgumentsAdjusters.cpp21
-rw-r--r--clang/lib/Tooling/CommonOptionsParser.cpp3
-rw-r--r--clang/lib/Tooling/Core/Diagnostic.cpp5
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp43
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp9
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp71
-rw-r--r--clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp155
-rw-r--r--clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp209
-rw-r--r--clang/lib/Tooling/DumpTool/APIData.h31
-rw-r--r--clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp271
-rw-r--r--clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h53
-rw-r--r--clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp159
-rw-r--r--clang/lib/Tooling/EmptyNodeIntrospection.inc.in48
-rw-r--r--clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp7
-rw-r--r--clang/lib/Tooling/Inclusions/HeaderIncludes.cpp4
-rw-r--r--clang/lib/Tooling/InterpolatingCompilationDatabase.cpp41
-rw-r--r--clang/lib/Tooling/JSONCompilationDatabase.cpp2
-rw-r--r--clang/lib/Tooling/NodeIntrospection.cpp88
-rw-r--r--clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp18
-rw-r--r--clang/lib/Tooling/Syntax/BuildTree.cpp38
-rw-r--r--clang/lib/Tooling/Syntax/Tokens.cpp32
-rw-r--r--clang/lib/Tooling/Tooling.cpp68
-rw-r--r--clang/lib/Tooling/Transformer/RangeSelector.cpp11
-rw-r--r--clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp2
-rw-r--r--clang/lib/Tooling/Transformer/Stencil.cpp627
-rw-r--r--clang/tools/amdgpu-arch/AMDGPUArch.cpp78
-rw-r--r--clang/tools/clang-format/ClangFormat.cpp22
-rw-r--r--clang/tools/clang-repl/ClangRepl.cpp108
-rw-r--r--clang/tools/driver/cc1_main.cpp6
-rw-r--r--clang/tools/driver/cc1as_main.cpp61
-rw-r--r--clang/tools/driver/cc1gen_reproducer_main.cpp2
-rw-r--r--clang/tools/driver/driver.cpp97
-rw-r--r--clang/utils/TableGen/ClangAttrEmitter.cpp417
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.cpp219
-rw-r--r--clang/utils/TableGen/ClangOpcodesEmitter.cpp51
-rw-r--r--clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp464
-rw-r--r--clang/utils/TableGen/MveEmitter.cpp13
-rw-r--r--clang/utils/TableGen/NeonEmitter.cpp6
-rw-r--r--clang/utils/TableGen/RISCVVEmitter.cpp1269
-rw-r--r--clang/utils/TableGen/SveEmitter.cpp37
-rw-r--r--clang/utils/TableGen/TableGen.cpp24
-rw-r--r--clang/utils/TableGen/TableGenBackends.h6
764 files changed, 76228 insertions, 25014 deletions
diff --git a/clang/include/clang-c/Index.h b/clang/include/clang-c/Index.h
index e305283bbaf1..26844d1c74f3 100644
--- a/clang/include/clang-c/Index.h
+++ b/clang/include/clang-c/Index.h
@@ -33,7 +33,7 @@
* compatible, thus CINDEX_VERSION_MAJOR is expected to remain stable.
*/
#define CINDEX_VERSION_MAJOR 0
-#define CINDEX_VERSION_MINOR 61
+#define CINDEX_VERSION_MINOR 62
#define CINDEX_VERSION_ENCODE(major, minor) (((major)*10000) + ((minor)*1))
@@ -2568,7 +2568,31 @@ enum CXCursorKind {
*/
CXCursor_OMPScanDirective = 287,
- CXCursor_LastStmt = CXCursor_OMPScanDirective,
+ /** OpenMP tile directive.
+ */
+ CXCursor_OMPTileDirective = 288,
+
+ /** OpenMP canonical loop.
+ */
+ CXCursor_OMPCanonicalLoop = 289,
+
+ /** OpenMP interop directive.
+ */
+ CXCursor_OMPInteropDirective = 290,
+
+ /** OpenMP dispatch directive.
+ */
+ CXCursor_OMPDispatchDirective = 291,
+
+ /** OpenMP masked directive.
+ */
+ CXCursor_OMPMaskedDirective = 292,
+
+ /** OpenMP unroll directive.
+ */
+ CXCursor_OMPUnrollDirective = 293,
+
+ CXCursor_LastStmt = CXCursor_OMPUnrollDirective,
/**
* Cursor that represents the translation unit itself.
@@ -3394,6 +3418,7 @@ enum CXCallingConv {
CXCallingConv_PreserveMost = 14,
CXCallingConv_PreserveAll = 15,
CXCallingConv_AArch64VectorCall = 16,
+ CXCallingConv_SwiftAsync = 17,
CXCallingConv_Invalid = 100,
CXCallingConv_Unexposed = 200
diff --git a/clang/include/clang/AST/ASTConcept.h b/clang/include/clang/AST/ASTConcept.h
index 71bf14a87865..d0526f4fa5c5 100644
--- a/clang/include/clang/AST/ASTConcept.h
+++ b/clang/include/clang/AST/ASTConcept.h
@@ -14,12 +14,13 @@
#ifndef LLVM_CLANG_AST_ASTCONCEPT_H
#define LLVM_CLANG_AST_ASTCONCEPT_H
+
#include "clang/AST/Expr.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
-#include <string>
#include <utility>
+
namespace clang {
class ConceptDecl;
class ConceptSpecializationExpr;
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index ce47d54e44b0..34299581d89d 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -34,12 +34,13 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
+#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/ProfileList.h"
-#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/XRayLists.h"
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/ArrayRef.h"
@@ -102,6 +103,7 @@ class DynTypedNode;
class DynTypedNodeList;
class Expr;
class GlobalDecl;
+class ItaniumMangleContext;
class MangleContext;
class MangleNumberingContext;
class MaterializeTemporaryExpr;
@@ -299,6 +301,10 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// This is lazily created. This is intentionally not serialized.
mutable llvm::StringMap<StringLiteral *> StringLiteralCache;
+ /// MD5 hash of CUID. It is calculated when first used and cached by this
+ /// data member.
+ mutable std::string CUIDHash;
+
/// Representation of a "canonical" template template parameter that
/// is used in canonical template names.
class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
@@ -453,6 +459,7 @@ private:
friend class ASTWriter;
template <class> friend class serialization::AbstractTypeReader;
friend class CXXRecordDecl;
+ friend class IncrementalParser;
/// A mapping to contain the template or declaration that
/// a variable declaration describes or was instantiated from,
@@ -512,6 +519,17 @@ private:
/// B<int> to the UnresolvedUsingDecl in B<T>.
llvm::DenseMap<NamedDecl *, NamedDecl *> InstantiatedFromUsingDecl;
+ /// Like InstantiatedFromUsingDecl, but for using-enum-declarations. Maps
+ /// from the instantiated using-enum to the templated decl from whence it
+ /// came.
+ /// Note that using-enum-declarations cannot be dependent and
+ /// thus will never be instantiated from an "unresolved"
+ /// version thereof (as with using-declarations), so each mapping is from
+ /// a (resolved) UsingEnumDecl to a (resolved) UsingEnumDecl.
+ llvm::DenseMap<UsingEnumDecl *, UsingEnumDecl *>
+ InstantiatedFromUsingEnumDecl;
+
+ /// Simlarly maps instantiated UsingShadowDecls to their origin.
llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>
InstantiatedFromUsingShadowDecl;
@@ -538,6 +556,9 @@ private:
/// need them (like static local vars).
llvm::MapVector<const NamedDecl *, unsigned> MangleNumbers;
llvm::MapVector<const VarDecl *, unsigned> StaticLocalNumbers;
+ /// Mapping the associated device lambda mangling number if present.
+ mutable llvm::DenseMap<const CXXRecordDecl *, unsigned>
+ DeviceLambdaManglingNumbers;
/// Mapping that stores parameterIndex values for ParmVarDecls when
/// that value exceeds the bitfield size of ParmVarDeclBits.ParameterIndex.
@@ -547,7 +568,7 @@ private:
ImportDecl *FirstLocalImport = nullptr;
ImportDecl *LastLocalImport = nullptr;
- TranslationUnitDecl *TUDecl;
+ TranslationUnitDecl *TUDecl = nullptr;
mutable ExternCContextDecl *ExternCContext = nullptr;
mutable BuiltinTemplateDecl *MakeIntegerSeqDecl = nullptr;
mutable BuiltinTemplateDecl *TypePackElementDecl = nullptr;
@@ -559,9 +580,9 @@ private:
/// this ASTContext object.
LangOptions &LangOpts;
- /// Blacklist object that is used by sanitizers to decide which
+ /// NoSanitizeList object that is used by sanitizers to decide which
/// entities should not be instrumented.
- std::unique_ptr<SanitizerBlacklist> SanitizerBL;
+ std::unique_ptr<NoSanitizeList> NoSanitizeL;
/// Function filtering mechanism to determine whether a given function
/// should be imbued with the XRay "always" or "never" attributes.
@@ -597,10 +618,14 @@ private:
std::unique_ptr<interp::Context> InterpContext;
std::unique_ptr<ParentMapContext> ParentMapCtx;
+ /// Keeps track of the deallocated DeclListNodes for future reuse.
+ DeclListNode *ListNodeFreeList = nullptr;
+
public:
IdentifierTable &Idents;
SelectorTable &Selectors;
Builtin::Context &BuiltinInfo;
+ const TranslationUnitKind TUKind;
mutable DeclarationNameTable DeclarationNames;
IntrusiveRefCntPtr<ExternalASTSource> ExternalSource;
ASTMutationListener *Listener = nullptr;
@@ -612,11 +637,22 @@ public:
ParentMapContext &getParentMapContext();
// A traversal scope limits the parts of the AST visible to certain analyses.
- // RecursiveASTVisitor::TraverseAST will only visit reachable nodes, and
+ // RecursiveASTVisitor only visits specified children of TranslationUnitDecl.
// getParents() will only observe reachable parent edges.
//
- // The scope is defined by a set of "top-level" declarations.
- // Initially, it is the entire TU: {getTranslationUnitDecl()}.
+ // The scope is defined by a set of "top-level" declarations which will be
+ // visible under the TranslationUnitDecl.
+ // Initially, it is the entire TU, represented by {getTranslationUnitDecl()}.
+ //
+ // After setTraversalScope({foo, bar}), the exposed AST looks like:
+ // TranslationUnitDecl
+ // - foo
+ // - ...
+ // - bar
+ // - ...
+ // All other siblings of foo and bar are pruned from the tree.
+ // (However they are still accessible via TranslationUnitDecl->decls())
+ //
// Changing the scope clears the parent cache, which is expensive to rebuild.
std::vector<Decl *> getTraversalScope() const { return TraversalScope; }
void setTraversalScope(const std::vector<Decl *> &);
@@ -648,6 +684,24 @@ public:
}
void Deallocate(void *Ptr) const {}
+ /// Allocates a \c DeclListNode or returns one from the \c ListNodeFreeList
+ /// pool.
+ DeclListNode *AllocateDeclListNode(clang::NamedDecl *ND) {
+ if (DeclListNode *Alloc = ListNodeFreeList) {
+ ListNodeFreeList = Alloc->Rest.dyn_cast<DeclListNode*>();
+ Alloc->D = ND;
+ Alloc->Rest = nullptr;
+ return Alloc;
+ }
+ return new (*this) DeclListNode(ND);
+ }
+ /// Deallcates a \c DeclListNode by returning it to the \c ListNodeFreeList
+ /// pool.
+ void DeallocateDeclListNode(DeclListNode *N) {
+ N->Rest = ListNodeFreeList;
+ ListNodeFreeList = N;
+ }
+
/// Return the total amount of physical memory allocated for representing
/// AST nodes and type information.
size_t getASTAllocatedMemory() const {
@@ -688,9 +742,7 @@ public:
return LangOpts.CPlusPlus || LangOpts.RecoveryAST;
}
- const SanitizerBlacklist &getSanitizerBlacklist() const {
- return *SanitizerBL;
- }
+ const NoSanitizeList &getNoSanitizeList() const { return *NoSanitizeL; }
const XRayFunctionFilter &getXRayFilter() const {
return *XRayFilter;
@@ -704,6 +756,11 @@ public:
return FullSourceLoc(Loc,SourceMgr);
}
+ /// Return the C++ ABI kind that should be used. The C++ ABI can be overriden
+ /// at compile time with `-fc++-abi=`. If this is not provided, we instead use
+ /// the default ABI set by the target.
+ TargetCXXABI::Kind getCXXABIKind() const;
+
/// All comments in this translation unit.
RawCommentList Comments;
@@ -853,30 +910,38 @@ public:
MemberSpecializationInfo *getInstantiatedFromStaticDataMember(
const VarDecl *Var);
- TemplateOrSpecializationInfo
- getTemplateOrSpecializationInfo(const VarDecl *Var);
-
/// Note that the static data member \p Inst is an instantiation of
/// the static data member template \p Tmpl of a class template.
void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
TemplateSpecializationKind TSK,
SourceLocation PointOfInstantiation = SourceLocation());
+ TemplateOrSpecializationInfo
+ getTemplateOrSpecializationInfo(const VarDecl *Var);
+
void setTemplateOrSpecializationInfo(VarDecl *Inst,
TemplateOrSpecializationInfo TSI);
- /// If the given using decl \p Inst is an instantiation of a
- /// (possibly unresolved) using decl from a template instantiation,
- /// return it.
+ /// If the given using decl \p Inst is an instantiation of
+ /// another (possibly unresolved) using decl, return it.
NamedDecl *getInstantiatedFromUsingDecl(NamedDecl *Inst);
/// Remember that the using decl \p Inst is an instantiation
/// of the using decl \p Pattern of a class template.
void setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern);
+ /// If the given using-enum decl \p Inst is an instantiation of
+ /// another using-enum decl, return it.
+ UsingEnumDecl *getInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst);
+
+ /// Remember that the using enum decl \p Inst is an instantiation
+ /// of the using enum decl \p Pattern of a class template.
+ void setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
+ UsingEnumDecl *Pattern);
+
+ UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst);
void setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
UsingShadowDecl *Pattern);
- UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst);
FieldDecl *getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field);
@@ -959,7 +1024,18 @@ public:
/// Get the initializations to perform when importing a module, if any.
ArrayRef<Decl*> getModuleInitializers(Module *M);
- TranslationUnitDecl *getTranslationUnitDecl() const { return TUDecl; }
+ TranslationUnitDecl *getTranslationUnitDecl() const {
+ return TUDecl->getMostRecentDecl();
+ }
+ void addTranslationUnitDecl() {
+ assert(!TUDecl || TUKind == TU_Incremental);
+ TranslationUnitDecl *NewTUDecl = TranslationUnitDecl::Create(*this);
+ if (TraversalScope.empty() || TraversalScope.back() == TUDecl)
+ TraversalScope = {NewTUDecl};
+ if (TUDecl)
+ NewTUDecl->setPreviousDecl(TUDecl);
+ TUDecl = NewTUDecl;
+ }
ExternCContextDecl *getExternCContextDecl() const;
BuiltinTemplateDecl *getMakeIntegerSeqDecl() const;
@@ -1017,6 +1093,9 @@ public:
#define PPC_VECTOR_TYPE(Name, Id, Size) \
CanQualType Id##Ty;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) \
+ CanQualType SingletonId;
+#include "clang/Basic/RISCVVTypes.def"
// Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
mutable QualType AutoDeductTy; // Deduction against 'auto'.
@@ -1029,11 +1108,12 @@ public:
// Implicitly-declared type 'struct _GUID'.
mutable TagDecl *MSGuidTagDecl = nullptr;
- /// Keep track of CUDA/HIP static device variables referenced by host code.
- llvm::DenseSet<const VarDecl *> CUDAStaticDeviceVarReferencedByHost;
+ /// Keep track of CUDA/HIP device-side variables ODR-used by host code.
+ llvm::DenseSet<const VarDecl *> CUDADeviceVarODRUsedByHost;
ASTContext(LangOptions &LOpts, SourceManager &SM, IdentifierTable &idents,
- SelectorTable &sels, Builtin::Context &builtins);
+ SelectorTable &sels, Builtin::Context &builtins,
+ TranslationUnitKind TUKind);
ASTContext(const ASTContext &) = delete;
ASTContext &operator=(const ASTContext &) = delete;
~ASTContext();
@@ -2320,6 +2400,12 @@ public:
/// If \p T is null pointer, assume the target in ASTContext.
MangleContext *createMangleContext(const TargetInfo *T = nullptr);
+ /// Creates a device mangle context to correctly mangle lambdas in a mixed
+ /// architecture compile by setting the lambda mangling number source to the
+ /// DeviceLambdaManglingNumber. Currently this asserts that the TargetInfo
+ /// (from the AuxTargetInfo) is a an itanium target.
+ MangleContext *createDeviceMangleContext(const TargetInfo &T);
+
void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass,
SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const;
@@ -2420,7 +2506,7 @@ public:
const ObjCMethodDecl *MethodImp);
bool UnwrapSimilarTypes(QualType &T1, QualType &T2);
- bool UnwrapSimilarArrayTypes(QualType &T1, QualType &T2);
+ void UnwrapSimilarArrayTypes(QualType &T1, QualType &T2);
/// Determine if two types are similar, according to the C++ rules. That is,
/// determine if they are the same other than qualifiers on the initial
@@ -2719,6 +2805,14 @@ public:
// a given fixed point type.
QualType getCorrespondingUnsignedType(QualType T) const;
+ // Per C99 6.2.5p6, for every signed integer type, there is a corresponding
+ // unsigned integer type. This method takes an unsigned type, and returns the
+ // corresponding signed integer type.
+ // With the introduction of fixed point types in ISO N1169, this method also
+ // accepts fixed point types and returns the corresponding signed type for
+ // a given fixed point type.
+ QualType getCorrespondingSignedType(QualType T) const;
+
// Per ISO N1169, this method accepts fixed point types and returns the
// corresponding saturated type for a given fixed point type.
QualType getCorrespondingSaturatedType(QualType Ty) const;
@@ -3113,10 +3207,33 @@ public:
/// Whether a C++ static variable should be externalized.
bool shouldExternalizeStaticVar(const Decl *D) const;
+ StringRef getCUIDHash() const;
+
+ void AddSYCLKernelNamingDecl(const CXXRecordDecl *RD);
+ bool IsSYCLKernelNamingDecl(const NamedDecl *RD) const;
+ unsigned GetSYCLKernelNamingIndex(const NamedDecl *RD);
+ /// A SourceLocation to store whether we have evaluated a kernel name already,
+ /// and where it happened. If so, we need to diagnose an illegal use of the
+ /// builtin.
+ llvm::MapVector<const SYCLUniqueStableNameExpr *, std::string>
+ SYCLUniqueStableNameEvaluatedValues;
+
private:
/// All OMPTraitInfo objects live in this collection, one per
/// `pragma omp [begin] declare variant` directive.
SmallVector<std::unique_ptr<OMPTraitInfo>, 4> OMPTraitInfoVector;
+
+ /// A list of the (right now just lambda decls) declarations required to
+ /// name all the SYCL kernels in the translation unit, so that we can get the
+ /// correct kernel name, as well as implement
+ /// __builtin_sycl_unique_stable_name.
+ llvm::DenseMap<const DeclContext *,
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4>>
+ SYCLKernelNamingTypes;
+ std::unique_ptr<ItaniumMangleContext> SYCLKernelFilterContext;
+ void FilterSYCLKernelNamingDecls(
+ const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls);
};
/// Insertion operator for diagnostics.
diff --git a/clang/include/clang/AST/ASTImporter.h b/clang/include/clang/AST/ASTImporter.h
index a6d822ba2ea6..17e673a8471a 100644
--- a/clang/include/clang/AST/ASTImporter.h
+++ b/clang/include/clang/AST/ASTImporter.h
@@ -93,8 +93,6 @@ class TypeSourceInfo;
using NonEquivalentDeclSet = llvm::DenseSet<std::pair<Decl *, Decl *>>;
using ImportedCXXBaseSpecifierMap =
llvm::DenseMap<const CXXBaseSpecifier *, CXXBaseSpecifier *>;
- using FileIDImportHandlerType =
- std::function<void(FileID /*ToID*/, FileID /*FromID*/)>;
enum class ODRHandlingType { Conservative, Liberal };
@@ -220,8 +218,6 @@ class TypeSourceInfo;
};
private:
- FileIDImportHandlerType FileIDImportHandler;
-
std::shared_ptr<ASTImporterSharedState> SharedState = nullptr;
/// The path which we go through during the import of a given AST node.
@@ -324,14 +320,6 @@ class TypeSourceInfo;
virtual ~ASTImporter();
- /// Set a callback function for FileID import handling.
- /// The function is invoked when a FileID is imported from the From context.
- /// The imported FileID in the To context and the original FileID in the
- /// From context is passed to it.
- void setFileIDImportHandler(FileIDImportHandlerType H) {
- FileIDImportHandler = H;
- }
-
/// Whether the importer will perform a minimal import, creating
/// to-be-completed forward declarations when possible.
bool isMinimalImport() const { return Minimal; }
@@ -356,6 +344,12 @@ class TypeSourceInfo;
Import(ExprWithCleanups::CleanupObject From);
/// Import the given type from the "from" context into the "to"
+ /// context.
+ ///
+ /// \returns The equivalent type in the "to" context, or the import error.
+ llvm::Expected<const Type *> Import(const Type *FromT);
+
+ /// Import the given qualified type from the "from" context into the "to"
/// context. A null type is imported as a null type (no error).
///
/// \returns The equivalent type in the "to" context, or the import error.
diff --git a/clang/include/clang/AST/ASTImporterLookupTable.h b/clang/include/clang/AST/ASTImporterLookupTable.h
index 407478a51058..47dca2033839 100644
--- a/clang/include/clang/AST/ASTImporterLookupTable.h
+++ b/clang/include/clang/AST/ASTImporterLookupTable.h
@@ -63,8 +63,24 @@ public:
ASTImporterLookupTable(TranslationUnitDecl &TU);
void add(NamedDecl *ND);
void remove(NamedDecl *ND);
+ // Sometimes a declaration is created first with a temporarily value of decl
+ // context (often the translation unit) and later moved to the final context.
+ // This happens for declarations that are created before the final declaration
+ // context. In such cases the lookup table needs to be updated.
+ // (The declaration is in these cases not added to the temporary decl context,
+ // only its parent is set.)
+ // FIXME: It would be better to not add the declaration to the temporary
+ // context at all in the lookup table, but this requires big change in
+ // ASTImporter.
+ // The function should be called when the old context is definitely different
+ // from the new.
+ void update(NamedDecl *ND, DeclContext *OldDC);
using LookupResult = DeclList;
LookupResult lookup(DeclContext *DC, DeclarationName Name) const;
+ // Check if the `ND` is within the lookup table (with its current name) in
+ // context `DC`. This is intended for debug purposes when the DeclContext of a
+ // NamedDecl is changed.
+ bool contains(DeclContext *DC, NamedDecl *ND) const;
void dump(DeclContext *DC) const;
void dump() const;
};
diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h
index bb5b0c73f028..18e7f491f222 100644
--- a/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/clang/include/clang/AST/ASTNodeTraverser.h
@@ -53,6 +53,7 @@ struct {
void Visit(const OMPClause *C);
void Visit(const BlockDecl::Capture &C);
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const concepts::Requirement *R);
void Visit(const APValue &Value, QualType Ty);
};
*/
@@ -141,7 +142,8 @@ public:
ConstStmtVisitor<Derived>::Visit(S);
// Some statements have custom mechanisms for dumping their children.
- if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S))
+ if (isa<DeclStmt>(S) || isa<GenericSelectionExpr>(S) ||
+ isa<RequiresExpr>(S))
return;
if (Traversal == TK_IgnoreUnlessSpelledInSource &&
@@ -228,6 +230,28 @@ public:
});
}
+ void Visit(const concepts::Requirement *R) {
+ getNodeDelegate().AddChild([=] {
+ getNodeDelegate().Visit(R);
+ if (!R)
+ return;
+ if (auto *TR = dyn_cast<concepts::TypeRequirement>(R)) {
+ if (!TR->isSubstitutionFailure())
+ Visit(TR->getType()->getType().getTypePtr());
+ } else if (auto *ER = dyn_cast<concepts::ExprRequirement>(R)) {
+ if (!ER->isExprSubstitutionFailure())
+ Visit(ER->getExpr());
+ if (!ER->getReturnTypeRequirement().isEmpty())
+ Visit(ER->getReturnTypeRequirement()
+ .getTypeConstraint()
+ ->getImmediatelyDeclaredConstraint());
+ } else if (auto *NR = dyn_cast<concepts::NestedRequirement>(R)) {
+ if (!NR->isSubstitutionFailure())
+ Visit(NR->getConstraintExpr());
+ }
+ });
+ }
+
void Visit(const APValue &Value, QualType Ty) {
getNodeDelegate().AddChild([=] { getNodeDelegate().Visit(Value, Ty); });
}
@@ -438,6 +462,8 @@ public:
}
void VisitBindingDecl(const BindingDecl *D) {
+ if (Traversal == TK_IgnoreUnlessSpelledInSource)
+ return;
if (const auto *E = D->getBinding())
Visit(E);
}
@@ -687,6 +713,13 @@ public:
}
}
+ void VisitRequiresExpr(const RequiresExpr *E) {
+ for (auto *D : E->getLocalParameters())
+ Visit(D);
+ for (auto *R : E->getRequirements())
+ Visit(R);
+ }
+
void VisitLambdaExpr(const LambdaExpr *Node) {
if (Traversal == TK_IgnoreUnlessSpelledInSource) {
for (unsigned I = 0, N = Node->capture_size(); I != N; ++I) {
diff --git a/clang/include/clang/AST/Attr.h b/clang/include/clang/AST/Attr.h
index e453733ab92c..dbfecc125049 100644
--- a/clang/include/clang/AST/Attr.h
+++ b/clang/include/clang/AST/Attr.h
@@ -208,6 +208,8 @@ public:
switch (getKind()) {
case attr::SwiftContext:
return ParameterABI::SwiftContext;
+ case attr::SwiftAsyncContext:
+ return ParameterABI::SwiftAsyncContext;
case attr::SwiftErrorResult:
return ParameterABI::SwiftErrorResult;
case attr::SwiftIndirectResult:
diff --git a/clang/include/clang/AST/CXXInheritance.h b/clang/include/clang/AST/CXXInheritance.h
index 709f08bff82a..946b9e318baa 100644
--- a/clang/include/clang/AST/CXXInheritance.h
+++ b/clang/include/clang/AST/CXXInheritance.h
@@ -76,9 +76,8 @@ public:
CXXBasePath() = default;
- /// The set of declarations found inside this base class
- /// subobject.
- DeclContext::lookup_result Decls;
+ /// The declarations found inside this base class subobject.
+ DeclContext::lookup_iterator Decls;
void clear() {
SmallVectorImpl<CXXBasePathElement>::clear();
diff --git a/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def b/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
index d15d6698860f..9b270682f8cf 100644
--- a/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
+++ b/clang/include/clang/AST/CXXRecordDeclDefinitionBits.def
@@ -242,4 +242,8 @@ FIELD(HasDeclaredCopyConstructorWithConstParam, 1, MERGE_OR)
/// const-qualified reference parameter or a non-reference parameter.
FIELD(HasDeclaredCopyAssignmentWithConstParam, 1, MERGE_OR)
+/// Whether the destructor is no-return. Either explicitly, or if any
+/// base classes or fields have a no-return destructor
+FIELD(IsAnyDestructorNoReturn, 1, NO_MERGE)
+
#undef FIELD
diff --git a/clang/include/clang/AST/ComparisonCategories.h b/clang/include/clang/AST/ComparisonCategories.h
index 70a78964b8a0..b41e934142ee 100644
--- a/clang/include/clang/AST/ComparisonCategories.h
+++ b/clang/include/clang/AST/ComparisonCategories.h
@@ -19,6 +19,7 @@
#include "llvm/ADT/DenseMap.h"
#include <array>
#include <cassert>
+#include <vector>
namespace llvm {
class StringRef;
diff --git a/clang/include/clang/AST/ComputeDependence.h b/clang/include/clang/AST/ComputeDependence.h
index 04e8e2c7d2cc..8db09e6b57d0 100644
--- a/clang/include/clang/AST/ComputeDependence.h
+++ b/clang/include/clang/AST/ComputeDependence.h
@@ -71,6 +71,7 @@ class OverloadExpr;
class DependentScopeDeclRefExpr;
class CXXConstructExpr;
class CXXDefaultInitExpr;
+class CXXDefaultArgExpr;
class LambdaExpr;
class CXXUnresolvedConstructExpr;
class CXXDependentScopeMemberExpr;
@@ -78,6 +79,7 @@ class MaterializeTemporaryExpr;
class CXXFoldExpr;
class TypeTraitExpr;
class ConceptSpecializationExpr;
+class SYCLUniqueStableNameExpr;
class PredefinedExpr;
class CallExpr;
class OffsetOfExpr;
@@ -155,6 +157,7 @@ ExprDependence computeDependence(OverloadExpr *E, bool KnownDependent,
ExprDependence computeDependence(DependentScopeDeclRefExpr *E);
ExprDependence computeDependence(CXXConstructExpr *E);
ExprDependence computeDependence(CXXDefaultInitExpr *E);
+ExprDependence computeDependence(CXXDefaultArgExpr *E);
ExprDependence computeDependence(LambdaExpr *E,
bool ContainsUnexpandedParameterPack);
ExprDependence computeDependence(CXXUnresolvedConstructExpr *E);
@@ -165,6 +168,7 @@ ExprDependence computeDependence(TypeTraitExpr *E);
ExprDependence computeDependence(ConceptSpecializationExpr *E,
bool ValueDependent);
+ExprDependence computeDependence(SYCLUniqueStableNameExpr *E);
ExprDependence computeDependence(PredefinedExpr *E);
ExprDependence computeDependence(CallExpr *E, llvm::ArrayRef<Expr *> PreArgs);
ExprDependence computeDependence(OffsetOfExpr *E);
diff --git a/clang/include/clang/AST/Decl.h b/clang/include/clang/AST/Decl.h
index 47c282f0a63d..510bf8978985 100644
--- a/clang/include/clang/AST/Decl.h
+++ b/clang/include/clang/AST/Decl.h
@@ -79,7 +79,23 @@ class UnresolvedSetImpl;
class VarTemplateDecl;
/// The top declaration context.
-class TranslationUnitDecl : public Decl, public DeclContext {
+class TranslationUnitDecl : public Decl,
+ public DeclContext,
+ public Redeclarable<TranslationUnitDecl> {
+ using redeclarable_base = Redeclarable<TranslationUnitDecl>;
+
+ TranslationUnitDecl *getNextRedeclarationImpl() override {
+ return getNextRedeclaration();
+ }
+
+ TranslationUnitDecl *getPreviousDeclImpl() override {
+ return getPreviousDecl();
+ }
+
+ TranslationUnitDecl *getMostRecentDeclImpl() override {
+ return getMostRecentDecl();
+ }
+
ASTContext &Ctx;
/// The (most recently entered) anonymous namespace for this
@@ -91,6 +107,16 @@ class TranslationUnitDecl : public Decl, public DeclContext {
virtual void anchor();
public:
+ using redecl_range = redeclarable_base::redecl_range;
+ using redecl_iterator = redeclarable_base::redecl_iterator;
+
+ using redeclarable_base::getMostRecentDecl;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::isFirstDecl;
+ using redeclarable_base::redecls;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+
ASTContext &getASTContext() const { return Ctx; }
NamespaceDecl *getAnonymousNamespace() const { return AnonymousNamespace; }
@@ -356,6 +382,10 @@ public:
/// a C++ class.
bool isCXXInstanceMember() const;
+ /// Determine if the declaration obeys the reserved identifier rules of the
+ /// given language.
+ ReservedIdentifierStatus isReserved(const LangOptions &LangOpts) const;
+
/// Determine what kind of linkage this entity has.
///
/// This is not the linkage as defined by the standard or the codegen notion
@@ -579,6 +609,16 @@ public:
AnonOrFirstNamespaceAndInline.setInt(Inline);
}
+ /// Returns true if the inline qualifier for \c Name is redundant.
+ bool isRedundantInlineQualifierFor(DeclarationName Name) const {
+ if (!isInline())
+ return false;
+ auto X = lookup(Name);
+ auto Y = getParent()->lookup(Name);
+ return std::distance(X.begin(), X.end()) ==
+ std::distance(Y.begin(), Y.end());
+ }
+
/// Get the original (first) namespace declaration.
NamespaceDecl *getOriginalNamespace();
@@ -1480,6 +1520,9 @@ public:
NonParmVarDeclBits.EscapingByref = true;
}
+ /// Determines if this variable's alignment is dependent.
+ bool hasDependentAlignment() const;
+
/// Retrieve the variable declaration from which this variable could
/// be instantiated, if it is an instantiation (rather than a non-template).
VarDecl *getTemplateInstantiationPattern() const;
diff --git a/clang/include/clang/AST/DeclBase.h b/clang/include/clang/AST/DeclBase.h
index 15eb29f72539..482d2889a25a 100644
--- a/clang/include/clang/AST/DeclBase.h
+++ b/clang/include/clang/AST/DeclBase.h
@@ -1220,65 +1220,110 @@ public:
void print(raw_ostream &OS) const override;
};
+} // namespace clang
-/// The results of name lookup within a DeclContext. This is either a
-/// single result (with no stable storage) or a collection of results (with
-/// stable storage provided by the lookup table).
-class DeclContextLookupResult {
- using ResultTy = ArrayRef<NamedDecl *>;
-
- ResultTy Result;
-
- // If there is only one lookup result, it would be invalidated by
- // reallocations of the name table, so store it separately.
- NamedDecl *Single = nullptr;
-
- static NamedDecl *const SingleElementDummyList;
+// Required to determine the layout of the PointerUnion<NamedDecl*> before
+// seeing the NamedDecl definition being first used in DeclListNode::operator*.
+namespace llvm {
+ template <> struct PointerLikeTypeTraits<::clang::NamedDecl *> {
+ static inline void *getAsVoidPointer(::clang::NamedDecl *P) { return P; }
+ static inline ::clang::NamedDecl *getFromVoidPointer(void *P) {
+ return static_cast<::clang::NamedDecl *>(P);
+ }
+ static constexpr int NumLowBitsAvailable = 3;
+ };
+}
+namespace clang {
+/// A list storing NamedDecls in the lookup tables.
+class DeclListNode {
+ friend class ASTContext; // allocate, deallocate nodes.
+ friend class StoredDeclsList;
public:
- DeclContextLookupResult() = default;
- DeclContextLookupResult(ArrayRef<NamedDecl *> Result)
- : Result(Result) {}
- DeclContextLookupResult(NamedDecl *Single)
- : Result(SingleElementDummyList), Single(Single) {}
-
- class iterator;
-
- using IteratorBase =
- llvm::iterator_adaptor_base<iterator, ResultTy::iterator,
- std::random_access_iterator_tag, NamedDecl *>;
-
- class iterator : public IteratorBase {
- value_type SingleElement;
+ using Decls = llvm::PointerUnion<NamedDecl*, DeclListNode*>;
+ class iterator {
+ friend class DeclContextLookupResult;
+ friend class StoredDeclsList;
+ Decls Ptr;
+ iterator(Decls Node) : Ptr(Node) { }
public:
- explicit iterator(pointer Pos, value_type Single = nullptr)
- : IteratorBase(Pos), SingleElement(Single) {}
+ using difference_type = ptrdiff_t;
+ using value_type = NamedDecl*;
+ using pointer = void;
+ using reference = value_type;
+ using iterator_category = std::forward_iterator_tag;
+
+ iterator() = default;
reference operator*() const {
- return SingleElement ? SingleElement : IteratorBase::operator*();
+ assert(Ptr && "dereferencing end() iterator");
+ if (DeclListNode *CurNode = Ptr.dyn_cast<DeclListNode*>())
+ return CurNode->D;
+ return Ptr.get<NamedDecl*>();
+ }
+ void operator->() const { } // Unsupported.
+ bool operator==(const iterator &X) const { return Ptr == X.Ptr; }
+ bool operator!=(const iterator &X) const { return Ptr != X.Ptr; }
+ inline iterator &operator++() { // ++It
+ assert(!Ptr.isNull() && "Advancing empty iterator");
+
+ if (DeclListNode *CurNode = Ptr.dyn_cast<DeclListNode*>())
+ Ptr = CurNode->Rest;
+ else
+ Ptr = nullptr;
+ return *this;
+ }
+ iterator operator++(int) { // It++
+ iterator temp = *this;
+ ++(*this);
+ return temp;
}
+ // Enables the pattern for (iterator I =..., E = I.end(); I != E; ++I)
+ iterator end() { return iterator(); }
};
+private:
+ NamedDecl *D = nullptr;
+ Decls Rest = nullptr;
+ DeclListNode(NamedDecl *ND) : D(ND) {}
+};
+/// The results of name lookup within a DeclContext.
+class DeclContextLookupResult {
+ using Decls = DeclListNode::Decls;
+
+ /// When in collection form, this is what the Data pointer points to.
+ Decls Result;
+
+public:
+ DeclContextLookupResult() = default;
+ DeclContextLookupResult(Decls Result) : Result(Result) {}
+
+ using iterator = DeclListNode::iterator;
using const_iterator = iterator;
- using pointer = iterator::pointer;
using reference = iterator::reference;
- iterator begin() const { return iterator(Result.begin(), Single); }
- iterator end() const { return iterator(Result.end(), Single); }
+ iterator begin() { return iterator(Result); }
+ iterator end() { return iterator(); }
+ const_iterator begin() const {
+ return const_cast<DeclContextLookupResult*>(this)->begin();
+ }
+ const_iterator end() const { return iterator(); }
+
+ bool empty() const { return Result.isNull(); }
+ bool isSingleResult() const { return Result.dyn_cast<NamedDecl*>(); }
+ reference front() const { return *begin(); }
- bool empty() const { return Result.empty(); }
- pointer data() const { return Single ? &Single : Result.data(); }
- size_t size() const { return Single ? 1 : Result.size(); }
- reference front() const { return Single ? Single : Result.front(); }
- reference back() const { return Single ? Single : Result.back(); }
- reference operator[](size_t N) const { return Single ? Single : Result[N]; }
+ // Find the first declaration of the given type in the list. Note that this
+ // is not in general the earliest-declared declaration, and should only be
+ // used when it's not possible for there to be more than one match or where
+ // it doesn't matter which one is found.
+ template<class T> T *find_first() const {
+ for (auto *D : *this)
+ if (T *Decl = dyn_cast<T>(D))
+ return Decl;
- // FIXME: Remove this from the interface
- DeclContextLookupResult slice(size_t N) const {
- DeclContextLookupResult Sliced = Result.slice(N);
- Sliced.Single = Single;
- return Sliced;
+ return nullptr;
}
};
@@ -2333,7 +2378,7 @@ public:
using udir_iterator_base =
llvm::iterator_adaptor_base<udir_iterator, lookup_iterator,
- std::random_access_iterator_tag,
+ typename lookup_iterator::iterator_category,
UsingDirectiveDecl *>;
struct udir_iterator : udir_iterator_base {
diff --git a/clang/include/clang/AST/DeclCXX.h b/clang/include/clang/AST/DeclCXX.h
index e32101bb2276..0d5ad40fc19e 100644
--- a/clang/include/clang/AST/DeclCXX.h
+++ b/clang/include/clang/AST/DeclCXX.h
@@ -69,6 +69,7 @@ class FriendDecl;
class FunctionTemplateDecl;
class IdentifierInfo;
class MemberSpecializationInfo;
+class BaseUsingDecl;
class TemplateDecl;
class TemplateParameterList;
class UsingDecl;
@@ -1479,7 +1480,7 @@ public:
/// Returns true if the class destructor, or any implicitly invoked
/// destructors are marked noreturn.
- bool isAnyDestructorNoReturn() const;
+ bool isAnyDestructorNoReturn() const { return data().IsAnyDestructorNoReturn; }
/// If the class is a local class [class.local], returns
/// the enclosing function declaration.
@@ -1735,6 +1736,12 @@ public:
getLambdaData().HasKnownInternalLinkage = HasKnownInternalLinkage;
}
+ /// Set the device side mangling number.
+ void setDeviceLambdaManglingNumber(unsigned Num) const;
+
+ /// Retrieve the device side mangling number.
+ unsigned getDeviceLambdaManglingNumber() const;
+
/// Returns the inheritance model used for this record.
MSInheritanceModel getMSInheritanceModel() const;
@@ -1780,6 +1787,7 @@ public:
static bool classofKind(Kind K) {
return K >= firstCXXRecord && K <= lastCXXRecord;
}
+ void markAbstract() { data().Abstract = true; }
};
/// Store information needed for an explicit specifier.
@@ -1846,15 +1854,17 @@ private:
CXXDeductionGuideDecl(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
ExplicitSpecifier ES,
const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, SourceLocation EndLocation)
+ TypeSourceInfo *TInfo, SourceLocation EndLocation,
+ CXXConstructorDecl *Ctor)
: FunctionDecl(CXXDeductionGuide, C, DC, StartLoc, NameInfo, T, TInfo,
SC_None, false, ConstexprSpecKind::Unspecified),
- ExplicitSpec(ES) {
+ Ctor(Ctor), ExplicitSpec(ES) {
if (EndLocation.isValid())
setRangeEnd(EndLocation);
setIsCopyDeductionCandidate(false);
}
+ CXXConstructorDecl *Ctor;
ExplicitSpecifier ExplicitSpec;
void setExplicitSpecifier(ExplicitSpecifier ES) { ExplicitSpec = ES; }
@@ -1865,7 +1875,8 @@ public:
static CXXDeductionGuideDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, SourceLocation EndLocation);
+ TypeSourceInfo *TInfo, SourceLocation EndLocation,
+ CXXConstructorDecl *Ctor = nullptr);
static CXXDeductionGuideDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -1880,6 +1891,12 @@ public:
return getDeclName().getCXXDeductionGuideTemplate();
}
+ /// Get the constructor from which this deduction guide was generated, if
+ /// this is an implicit deduction guide.
+ CXXConstructorDecl *getCorrespondingConstructor() const {
+ return Ctor;
+ }
+
void setIsCopyDeductionCandidate(bool isCDC = true) {
FunctionDeclBits.IsCopyDeductionCandidate = isCDC;
}
@@ -2160,6 +2177,10 @@ class CXXCtorInitializer final {
llvm::PointerUnion<TypeSourceInfo *, FieldDecl *, IndirectFieldDecl *>
Initializee;
+ /// The argument used to initialize the base or member, which may
+ /// end up constructing an object (when multiple arguments are involved).
+ Stmt *Init;
+
/// The source location for the field name or, for a base initializer
/// pack expansion, the location of the ellipsis.
///
@@ -2168,10 +2189,6 @@ class CXXCtorInitializer final {
/// Initializee points to the CXXConstructorDecl (to allow loop detection).
SourceLocation MemberOrEllipsisLocation;
- /// The argument used to initialize the base or member, which may
- /// end up constructing an object (when multiple arguments are involved).
- Stmt *Init;
-
/// Location of the left paren of the ctor-initializer.
SourceLocation LParenLoc;
@@ -2261,7 +2278,8 @@ public:
// For a pack expansion, returns the location of the ellipsis.
SourceLocation getEllipsisLoc() const {
- assert(isPackExpansion() && "Initializer is not a pack expansion");
+ if (!isPackExpansion())
+ return {};
return MemberOrEllipsisLocation;
}
@@ -2418,12 +2436,12 @@ class CXXConstructorDecl final
: ExplicitSpecKind::ResolvedFalse);
}
- enum TraillingAllocKind {
+ enum TrailingAllocKind {
TAKInheritsConstructor = 1,
TAKHasTailExplicit = 1 << 1,
};
- uint64_t getTraillingAllocKind() const {
+ uint64_t getTrailingAllocKind() const {
return numTrailingObjects(OverloadToken<InheritedConstructor>()) |
(numTrailingObjects(OverloadToken<ExplicitSpecifier>()) << 1);
}
@@ -3146,21 +3164,27 @@ public:
}
};
-/// Represents a shadow declaration introduced into a scope by a
-/// (resolved) using declaration.
+/// Represents a shadow declaration implicitly introduced into a scope by a
+/// (resolved) using-declaration or using-enum-declaration to achieve
+/// the desired lookup semantics.
///
-/// For example,
+/// For example:
/// \code
/// namespace A {
/// void foo();
+/// void foo(int);
+/// struct foo {};
+/// enum bar { bar1, bar2 };
/// }
/// namespace B {
-/// using A::foo; // <- a UsingDecl
-/// // Also creates a UsingShadowDecl for A::foo() in B
+/// // add a UsingDecl and three UsingShadowDecls (named foo) to B.
+/// using A::foo;
+/// // adds UsingEnumDecl and two UsingShadowDecls (named bar1 and bar2) to B.
+/// using enum A::bar;
/// }
/// \endcode
class UsingShadowDecl : public NamedDecl, public Redeclarable<UsingShadowDecl> {
- friend class UsingDecl;
+ friend class BaseUsingDecl;
/// The referenced declaration.
NamedDecl *Underlying = nullptr;
@@ -3187,7 +3211,8 @@ class UsingShadowDecl : public NamedDecl, public Redeclarable<UsingShadowDecl> {
protected:
UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC, SourceLocation Loc,
- UsingDecl *Using, NamedDecl *Target);
+ DeclarationName Name, BaseUsingDecl *Introducer,
+ NamedDecl *Target);
UsingShadowDecl(Kind K, ASTContext &C, EmptyShell);
public:
@@ -3195,9 +3220,10 @@ public:
friend class ASTDeclWriter;
static UsingShadowDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation Loc, UsingDecl *Using,
- NamedDecl *Target) {
- return new (C, DC) UsingShadowDecl(UsingShadow, C, DC, Loc, Using, Target);
+ SourceLocation Loc, DeclarationName Name,
+ BaseUsingDecl *Introducer, NamedDecl *Target) {
+ return new (C, DC)
+ UsingShadowDecl(UsingShadow, C, DC, Loc, Name, Introducer, Target);
}
static UsingShadowDecl *CreateDeserialized(ASTContext &C, unsigned ID);
@@ -3235,8 +3261,9 @@ public:
~(IDNS_OrdinaryFriend | IDNS_TagFriend | IDNS_LocalExtern);
}
- /// Gets the using declaration to which this declaration is tied.
- UsingDecl *getUsingDecl() const;
+ /// Gets the (written or instantiated) using declaration that introduced this
+ /// declaration.
+ BaseUsingDecl *getIntroducer() const;
/// The next using shadow declaration contained in the shadow decl
/// chain of the using declaration which introduced this decl.
@@ -3250,6 +3277,180 @@ public:
}
};
+/// Represents a C++ declaration that introduces decls from somewhere else. It
+/// provides a set of the shadow decls so introduced.
+
+class BaseUsingDecl : public NamedDecl {
+ /// The first shadow declaration of the shadow decl chain associated
+ /// with this using declaration.
+ ///
+ /// The bool member of the pair is a bool flag a derived type may use
+ /// (UsingDecl makes use of it).
+ llvm::PointerIntPair<UsingShadowDecl *, 1, bool> FirstUsingShadow;
+
+protected:
+ BaseUsingDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
+ : NamedDecl(DK, DC, L, N), FirstUsingShadow(nullptr, 0) {}
+
+private:
+ void anchor() override;
+
+protected:
+ /// A bool flag for use by a derived type
+ bool getShadowFlag() const { return FirstUsingShadow.getInt(); }
+
+ /// A bool flag a derived type may set
+ void setShadowFlag(bool V) { FirstUsingShadow.setInt(V); }
+
+public:
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+
+ /// Iterates through the using shadow declarations associated with
+ /// this using declaration.
+ class shadow_iterator {
+ /// The current using shadow declaration.
+ UsingShadowDecl *Current = nullptr;
+
+ public:
+ using value_type = UsingShadowDecl *;
+ using reference = UsingShadowDecl *;
+ using pointer = UsingShadowDecl *;
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
+
+ shadow_iterator() = default;
+ explicit shadow_iterator(UsingShadowDecl *C) : Current(C) {}
+
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ shadow_iterator &operator++() {
+ Current = Current->getNextUsingShadowDecl();
+ return *this;
+ }
+
+ shadow_iterator operator++(int) {
+ shadow_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(shadow_iterator x, shadow_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(shadow_iterator x, shadow_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ using shadow_range = llvm::iterator_range<shadow_iterator>;
+
+ shadow_range shadows() const {
+ return shadow_range(shadow_begin(), shadow_end());
+ }
+
+ shadow_iterator shadow_begin() const {
+ return shadow_iterator(FirstUsingShadow.getPointer());
+ }
+
+ shadow_iterator shadow_end() const { return shadow_iterator(); }
+
+ /// Return the number of shadowed declarations associated with this
+ /// using declaration.
+ unsigned shadow_size() const {
+ return std::distance(shadow_begin(), shadow_end());
+ }
+
+ void addShadowDecl(UsingShadowDecl *S);
+ void removeShadowDecl(UsingShadowDecl *S);
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Using || K == UsingEnum; }
+};
+
+/// Represents a C++ using-declaration.
+///
+/// For example:
+/// \code
+/// using someNameSpace::someIdentifier;
+/// \endcode
+class UsingDecl : public BaseUsingDecl, public Mergeable<UsingDecl> {
+ /// The source location of the 'using' keyword itself.
+ SourceLocation UsingLocation;
+
+ /// The nested-name-specifier that precedes the name.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// Provides source/type location info for the declaration name
+ /// embedded in the ValueDecl base class.
+ DeclarationNameLoc DNLoc;
+
+ UsingDecl(DeclContext *DC, SourceLocation UL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo, bool HasTypenameKeyword)
+ : BaseUsingDecl(Using, DC, NameInfo.getLoc(), NameInfo.getName()),
+ UsingLocation(UL), QualifierLoc(QualifierLoc),
+ DNLoc(NameInfo.getInfo()) {
+ setShadowFlag(HasTypenameKeyword);
+ }
+
+ void anchor() override;
+
+public:
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+
+ /// Return the source location of the 'using' keyword.
+ SourceLocation getUsingLoc() const { return UsingLocation; }
+
+ /// Set the source location of the 'using' keyword.
+ void setUsingLoc(SourceLocation L) { UsingLocation = L; }
+
+ /// Retrieve the nested-name-specifier that qualifies the name,
+ /// with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// Retrieve the nested-name-specifier that qualifies the name.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ DeclarationNameInfo getNameInfo() const {
+ return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
+ }
+
+ /// Return true if it is a C++03 access declaration (no 'using').
+ bool isAccessDeclaration() const { return UsingLocation.isInvalid(); }
+
+ /// Return true if the using declaration has 'typename'.
+ bool hasTypename() const { return getShadowFlag(); }
+
+ /// Sets whether the using declaration has 'typename'.
+ void setTypename(bool TN) { setShadowFlag(TN); }
+
+ static UsingDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool HasTypenameKeyword);
+
+ static UsingDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const override LLVM_READONLY;
+
+ /// Retrieves the canonical declaration of this declaration.
+ UsingDecl *getCanonicalDecl() override {
+ return cast<UsingDecl>(getFirstDecl());
+ }
+ const UsingDecl *getCanonicalDecl() const {
+ return cast<UsingDecl>(getFirstDecl());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Using; }
+};
+
/// Represents a shadow constructor declaration introduced into a
/// class by a C++11 using-declaration that names a constructor.
///
@@ -3280,7 +3481,8 @@ class ConstructorUsingShadowDecl final : public UsingShadowDecl {
ConstructorUsingShadowDecl(ASTContext &C, DeclContext *DC, SourceLocation Loc,
UsingDecl *Using, NamedDecl *Target,
bool TargetInVirtualBase)
- : UsingShadowDecl(ConstructorUsingShadow, C, DC, Loc, Using,
+ : UsingShadowDecl(ConstructorUsingShadow, C, DC, Loc,
+ Using->getDeclName(), Using,
Target->getUnderlyingDecl()),
NominatedBaseClassShadowDecl(
dyn_cast<ConstructorUsingShadowDecl>(Target)),
@@ -3313,6 +3515,12 @@ public:
static ConstructorUsingShadowDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
+ /// Override the UsingShadowDecl's getIntroducer, returning the UsingDecl that
+ /// introduced this.
+ UsingDecl *getIntroducer() const {
+ return cast<UsingDecl>(UsingShadowDecl::getIntroducer());
+ }
+
/// Returns the parent of this using shadow declaration, which
/// is the class in which this is declared.
//@{
@@ -3360,37 +3568,27 @@ public:
static bool classofKind(Kind K) { return K == ConstructorUsingShadow; }
};
-/// Represents a C++ using-declaration.
+/// Represents a C++ using-enum-declaration.
///
/// For example:
/// \code
-/// using someNameSpace::someIdentifier;
+/// using enum SomeEnumTag ;
/// \endcode
-class UsingDecl : public NamedDecl, public Mergeable<UsingDecl> {
+
+class UsingEnumDecl : public BaseUsingDecl, public Mergeable<UsingEnumDecl> {
/// The source location of the 'using' keyword itself.
SourceLocation UsingLocation;
- /// The nested-name-specifier that precedes the name.
- NestedNameSpecifierLoc QualifierLoc;
-
- /// Provides source/type location info for the declaration name
- /// embedded in the ValueDecl base class.
- DeclarationNameLoc DNLoc;
+ /// Location of the 'enum' keyword.
+ SourceLocation EnumLocation;
- /// The first shadow declaration of the shadow decl chain associated
- /// with this using declaration.
- ///
- /// The bool member of the pair store whether this decl has the \c typename
- /// keyword.
- llvm::PointerIntPair<UsingShadowDecl *, 1, bool> FirstUsingShadow;
+ /// The enum
+ EnumDecl *Enum;
- UsingDecl(DeclContext *DC, SourceLocation UL,
- NestedNameSpecifierLoc QualifierLoc,
- const DeclarationNameInfo &NameInfo, bool HasTypenameKeyword)
- : NamedDecl(Using, DC, NameInfo.getLoc(), NameInfo.getName()),
- UsingLocation(UL), QualifierLoc(QualifierLoc),
- DNLoc(NameInfo.getInfo()), FirstUsingShadow(nullptr, HasTypenameKeyword) {
- }
+ UsingEnumDecl(DeclContext *DC, DeclarationName DN, SourceLocation UL,
+ SourceLocation EL, SourceLocation NL, EnumDecl *ED)
+ : BaseUsingDecl(UsingEnum, DC, NL, DN), UsingLocation(UL),
+ EnumLocation(EL), Enum(ED) {}
void anchor() override;
@@ -3398,109 +3596,35 @@ public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
- /// Return the source location of the 'using' keyword.
+ /// The source location of the 'using' keyword.
SourceLocation getUsingLoc() const { return UsingLocation; }
-
- /// Set the source location of the 'using' keyword.
void setUsingLoc(SourceLocation L) { UsingLocation = L; }
- /// Retrieve the nested-name-specifier that qualifies the name,
- /// with source-location information.
- NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
-
- /// Retrieve the nested-name-specifier that qualifies the name.
- NestedNameSpecifier *getQualifier() const {
- return QualifierLoc.getNestedNameSpecifier();
- }
-
- DeclarationNameInfo getNameInfo() const {
- return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
- }
-
- /// Return true if it is a C++03 access declaration (no 'using').
- bool isAccessDeclaration() const { return UsingLocation.isInvalid(); }
-
- /// Return true if the using declaration has 'typename'.
- bool hasTypename() const { return FirstUsingShadow.getInt(); }
-
- /// Sets whether the using declaration has 'typename'.
- void setTypename(bool TN) { FirstUsingShadow.setInt(TN); }
-
- /// Iterates through the using shadow declarations associated with
- /// this using declaration.
- class shadow_iterator {
- /// The current using shadow declaration.
- UsingShadowDecl *Current = nullptr;
-
- public:
- using value_type = UsingShadowDecl *;
- using reference = UsingShadowDecl *;
- using pointer = UsingShadowDecl *;
- using iterator_category = std::forward_iterator_tag;
- using difference_type = std::ptrdiff_t;
-
- shadow_iterator() = default;
- explicit shadow_iterator(UsingShadowDecl *C) : Current(C) {}
-
- reference operator*() const { return Current; }
- pointer operator->() const { return Current; }
-
- shadow_iterator& operator++() {
- Current = Current->getNextUsingShadowDecl();
- return *this;
- }
-
- shadow_iterator operator++(int) {
- shadow_iterator tmp(*this);
- ++(*this);
- return tmp;
- }
-
- friend bool operator==(shadow_iterator x, shadow_iterator y) {
- return x.Current == y.Current;
- }
- friend bool operator!=(shadow_iterator x, shadow_iterator y) {
- return x.Current != y.Current;
- }
- };
-
- using shadow_range = llvm::iterator_range<shadow_iterator>;
-
- shadow_range shadows() const {
- return shadow_range(shadow_begin(), shadow_end());
- }
+ /// The source location of the 'enum' keyword.
+ SourceLocation getEnumLoc() const { return EnumLocation; }
+ void setEnumLoc(SourceLocation L) { EnumLocation = L; }
- shadow_iterator shadow_begin() const {
- return shadow_iterator(FirstUsingShadow.getPointer());
- }
-
- shadow_iterator shadow_end() const { return shadow_iterator(); }
+public:
+ EnumDecl *getEnumDecl() const { return Enum; }
- /// Return the number of shadowed declarations associated with this
- /// using declaration.
- unsigned shadow_size() const {
- return std::distance(shadow_begin(), shadow_end());
- }
+ static UsingEnumDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingL, SourceLocation EnumL,
+ SourceLocation NameL, EnumDecl *ED);
- void addShadowDecl(UsingShadowDecl *S);
- void removeShadowDecl(UsingShadowDecl *S);
-
- static UsingDecl *Create(ASTContext &C, DeclContext *DC,
- SourceLocation UsingL,
- NestedNameSpecifierLoc QualifierLoc,
- const DeclarationNameInfo &NameInfo,
- bool HasTypenameKeyword);
-
- static UsingDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+ static UsingEnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
/// Retrieves the canonical declaration of this declaration.
- UsingDecl *getCanonicalDecl() override { return getFirstDecl(); }
- const UsingDecl *getCanonicalDecl() const { return getFirstDecl(); }
+ UsingEnumDecl *getCanonicalDecl() override {
+ return cast<UsingEnumDecl>(getFirstDecl());
+ }
+ const UsingEnumDecl *getCanonicalDecl() const {
+ return cast<UsingEnumDecl>(getFirstDecl());
+ }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
- static bool classofKind(Kind K) { return K == Using; }
+ static bool classofKind(Kind K) { return K == UsingEnum; }
};
/// Represents a pack of using declarations that a single
@@ -3759,6 +3883,28 @@ public:
static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
};
+/// This node is generated when a using-declaration that was annotated with
+/// __attribute__((using_if_exists)) failed to resolve to a known declaration.
+/// In that case, Sema builds a UsingShadowDecl whose target is an instance of
+/// this declaration, adding it to the current scope. Referring to this
+/// declaration in any way is an error.
+class UnresolvedUsingIfExistsDecl final : public NamedDecl {
+ UnresolvedUsingIfExistsDecl(DeclContext *DC, SourceLocation Loc,
+ DeclarationName Name);
+
+ void anchor() override;
+
+public:
+ static UnresolvedUsingIfExistsDecl *Create(ASTContext &Ctx, DeclContext *DC,
+ SourceLocation Loc,
+ DeclarationName Name);
+ static UnresolvedUsingIfExistsDecl *CreateDeserialized(ASTContext &Ctx,
+ unsigned ID);
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Decl::UnresolvedUsingIfExists; }
+};
+
/// Represents a C++11 static_assert declaration.
class StaticAssertDecl : public Decl {
llvm::PointerIntPair<Expr *, 1, bool> AssertExprAndFailed;
@@ -3811,7 +3957,7 @@ public:
/// DecompositionDecl of type 'int (&)[3]'.
class BindingDecl : public ValueDecl {
/// The declaration that this binding binds to part of.
- LazyDeclPtr Decomp;
+ ValueDecl *Decomp;
/// The binding represented by this declaration. References to this
/// declaration are effectively equivalent to this expression (except
/// that it is only evaluated once at the point of declaration of the
@@ -3837,7 +3983,7 @@ public:
/// Get the decomposition declaration that this binding represents a
/// decomposition of.
- ValueDecl *getDecomposedDecl() const;
+ ValueDecl *getDecomposedDecl() const { return Decomp; }
/// Get the variable (if any) that holds the value of evaluating the binding.
/// Only present for user-defined bindings for tuple-like types.
diff --git a/clang/include/clang/AST/DeclContextInternals.h b/clang/include/clang/AST/DeclContextInternals.h
index e6a4cd4381e4..2eef2343b750 100644
--- a/clang/include/clang/AST/DeclContextInternals.h
+++ b/clang/include/clang/AST/DeclContextInternals.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_AST_DECLCONTEXTINTERNALS_H
#define LLVM_CLANG_AST_DECLCONTEXTINTERNALS_H
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/DeclCXX.h"
@@ -21,8 +22,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
-#include "llvm/ADT/SmallVector.h"
-#include <algorithm>
#include <cassert>
namespace clang {
@@ -31,231 +30,287 @@ class DependentDiagnostic;
/// An array of decls optimized for the common case of only containing
/// one entry.
-struct StoredDeclsList {
- /// When in vector form, this is what the Data pointer points to.
- using DeclsTy = SmallVector<NamedDecl *, 4>;
+class StoredDeclsList {
+ using Decls = DeclListNode::Decls;
/// A collection of declarations, with a flag to indicate if we have
/// further external declarations.
- using DeclsAndHasExternalTy = llvm::PointerIntPair<DeclsTy *, 1, bool>;
+ using DeclsAndHasExternalTy = llvm::PointerIntPair<Decls, 1, bool>;
/// The stored data, which will be either a pointer to a NamedDecl,
- /// or a pointer to a vector with a flag to indicate if there are further
+ /// or a pointer to a list with a flag to indicate if there are further
/// external declarations.
- llvm::PointerUnion<NamedDecl *, DeclsAndHasExternalTy> Data;
+ DeclsAndHasExternalTy Data;
+
+ template<typename Fn>
+ void erase_if(Fn ShouldErase) {
+ Decls List = Data.getPointer();
+ if (!List)
+ return;
+ ASTContext &C = getASTContext();
+ DeclListNode::Decls NewHead = nullptr;
+ DeclListNode::Decls *NewLast = nullptr;
+ DeclListNode::Decls *NewTail = &NewHead;
+ while (true) {
+ if (!ShouldErase(*DeclListNode::iterator(List))) {
+ NewLast = NewTail;
+ *NewTail = List;
+ if (auto *Node = List.dyn_cast<DeclListNode*>()) {
+ NewTail = &Node->Rest;
+ List = Node->Rest;
+ } else {
+ break;
+ }
+ } else if (DeclListNode *N = List.dyn_cast<DeclListNode*>()) {
+ List = N->Rest;
+ C.DeallocateDeclListNode(N);
+ } else {
+ // We're discarding the last declaration in the list. The last node we
+ // want to keep (if any) will be of the form DeclListNode(D, <rest>);
+ // replace it with just D.
+ if (NewLast) {
+ DeclListNode *Node = NewLast->get<DeclListNode*>();
+ *NewLast = Node->D;
+ C.DeallocateDeclListNode(Node);
+ }
+ break;
+ }
+ }
+ Data.setPointer(NewHead);
+
+ assert(llvm::find_if(getLookupResult(), ShouldErase) ==
+ getLookupResult().end() && "Still exists!");
+ }
+
+ void erase(NamedDecl *ND) {
+ erase_if([ND](NamedDecl *D) { return D == ND; });
+ }
public:
StoredDeclsList() = default;
StoredDeclsList(StoredDeclsList &&RHS) : Data(RHS.Data) {
- RHS.Data = (NamedDecl *)nullptr;
+ RHS.Data.setPointer(nullptr);
+ RHS.Data.setInt(0);
+ }
+
+ void MaybeDeallocList() {
+ if (isNull())
+ return;
+ // If this is a list-form, free the list.
+ ASTContext &C = getASTContext();
+ Decls List = Data.getPointer();
+ while (DeclListNode *ToDealloc = List.dyn_cast<DeclListNode *>()) {
+ List = ToDealloc->Rest;
+ C.DeallocateDeclListNode(ToDealloc);
+ }
}
~StoredDeclsList() {
- // If this is a vector-form, free the vector.
- if (DeclsTy *Vector = getAsVector())
- delete Vector;
+ MaybeDeallocList();
}
StoredDeclsList &operator=(StoredDeclsList &&RHS) {
- if (DeclsTy *Vector = getAsVector())
- delete Vector;
+ MaybeDeallocList();
+
Data = RHS.Data;
- RHS.Data = (NamedDecl *)nullptr;
+ RHS.Data.setPointer(nullptr);
+ RHS.Data.setInt(0);
return *this;
}
- bool isNull() const { return Data.isNull(); }
+ bool isNull() const { return Data.getPointer().isNull(); }
- NamedDecl *getAsDecl() const {
- return Data.dyn_cast<NamedDecl *>();
+ ASTContext &getASTContext() {
+ assert(!isNull() && "No ASTContext.");
+ if (NamedDecl *ND = getAsDecl())
+ return ND->getASTContext();
+ return getAsList()->D->getASTContext();
}
- DeclsAndHasExternalTy getAsVectorAndHasExternal() const {
- return Data.dyn_cast<DeclsAndHasExternalTy>();
+ DeclsAndHasExternalTy getAsListAndHasExternal() const { return Data; }
+
+ NamedDecl *getAsDecl() const {
+ return getAsListAndHasExternal().getPointer().dyn_cast<NamedDecl *>();
}
- DeclsTy *getAsVector() const {
- return getAsVectorAndHasExternal().getPointer();
+ DeclListNode *getAsList() const {
+ return getAsListAndHasExternal().getPointer().dyn_cast<DeclListNode*>();
}
bool hasExternalDecls() const {
- return getAsVectorAndHasExternal().getInt();
+ return getAsListAndHasExternal().getInt();
}
void setHasExternalDecls() {
- if (DeclsTy *Vec = getAsVector())
- Data = DeclsAndHasExternalTy(Vec, true);
- else {
- DeclsTy *VT = new DeclsTy();
- if (NamedDecl *OldD = getAsDecl())
- VT->push_back(OldD);
- Data = DeclsAndHasExternalTy(VT, true);
- }
- }
-
- void setOnlyValue(NamedDecl *ND) {
- assert(!getAsVector() && "Not inline");
- Data = ND;
- // Make sure that Data is a plain NamedDecl* so we can use its address
- // at getLookupResult.
- assert(*(NamedDecl **)&Data == ND &&
- "PointerUnion mangles the NamedDecl pointer!");
+ Data.setInt(1);
}
void remove(NamedDecl *D) {
assert(!isNull() && "removing from empty list");
- if (NamedDecl *Singleton = getAsDecl()) {
- assert(Singleton == D && "list is different singleton");
- (void)Singleton;
- Data = (NamedDecl *)nullptr;
- return;
- }
-
- DeclsTy &Vec = *getAsVector();
- DeclsTy::iterator I = llvm::find(Vec, D);
- assert(I != Vec.end() && "list does not contain decl");
- Vec.erase(I);
-
- assert(llvm::find(Vec, D) == Vec.end() && "list still contains decl");
+ erase(D);
}
- /// Remove any declarations which were imported from an external
- /// AST source.
+ /// Remove any declarations which were imported from an external AST source.
void removeExternalDecls() {
- if (isNull()) {
- // Nothing to do.
- } else if (NamedDecl *Singleton = getAsDecl()) {
- if (Singleton->isFromASTFile())
- *this = StoredDeclsList();
- } else {
- DeclsTy &Vec = *getAsVector();
- Vec.erase(std::remove_if(Vec.begin(), Vec.end(),
- [](Decl *D) { return D->isFromASTFile(); }),
- Vec.end());
- // Don't have any external decls any more.
- Data = DeclsAndHasExternalTy(&Vec, false);
- }
+ erase_if([](NamedDecl *ND) { return ND->isFromASTFile(); });
+
+ // Don't have any pending external decls any more.
+ Data.setInt(0);
}
- /// getLookupResult - Return an array of all the decls that this list
- /// represents.
- DeclContext::lookup_result getLookupResult() {
- if (isNull())
- return DeclContext::lookup_result();
+ void replaceExternalDecls(ArrayRef<NamedDecl*> Decls) {
+ // Remove all declarations that are either external or are replaced with
+ // external declarations.
+ erase_if([Decls](NamedDecl *ND) {
+ if (ND->isFromASTFile())
+ return true;
+ for (NamedDecl *D : Decls)
+ if (D->declarationReplaces(ND, /*IsKnownNewer=*/false))
+ return true;
+ return false;
+ });
- // If we have a single NamedDecl, return it.
- if (NamedDecl *ND = getAsDecl()) {
- assert(!isNull() && "Empty list isn't allowed");
+ // Don't have any pending external decls any more.
+ Data.setInt(0);
+
+ if (Decls.empty())
+ return;
+
+ // Convert Decls into a list, in order.
+ ASTContext &C = Decls.front()->getASTContext();
+ DeclListNode::Decls DeclsAsList = Decls.back();
+ for (size_t I = Decls.size() - 1; I != 0; --I) {
+ DeclListNode *Node = C.AllocateDeclListNode(Decls[I - 1]);
+ Node->Rest = DeclsAsList;
+ DeclsAsList = Node;
+ }
- // Data is a raw pointer to a NamedDecl*, return it.
- return DeclContext::lookup_result(ND);
+ DeclListNode::Decls Head = Data.getPointer();
+ if (Head.isNull()) {
+ Data.setPointer(DeclsAsList);
+ return;
}
- assert(getAsVector() && "Must have a vector at this point");
- DeclsTy &Vector = *getAsVector();
+ // Find the end of the existing list.
+ // FIXME: It would be possible to preserve information from erase_if to
+ // avoid this rescan looking for the end of the list.
+ DeclListNode::Decls *Tail = &Head;
+ while (DeclListNode *Node = Tail->dyn_cast<DeclListNode *>())
+ Tail = &Node->Rest;
+
+ // Append the Decls.
+ DeclListNode *Node = C.AllocateDeclListNode(Tail->get<NamedDecl *>());
+ Node->Rest = DeclsAsList;
+ *Tail = Node;
+ Data.setPointer(Head);
+ }
- // Otherwise, we have a range result.
- return DeclContext::lookup_result(Vector);
+ /// Return an array of all the decls that this list represents.
+ DeclContext::lookup_result getLookupResult() const {
+ return DeclContext::lookup_result(Data.getPointer());
}
- /// HandleRedeclaration - If this is a redeclaration of an existing decl,
- /// replace the old one with D and return true. Otherwise return false.
- bool HandleRedeclaration(NamedDecl *D, bool IsKnownNewer) {
+ /// If this is a redeclaration of an existing decl, replace the old one with
+ /// D. Otherwise, append D.
+ void addOrReplaceDecl(NamedDecl *D) {
+ const bool IsKnownNewer = true;
+
+ if (isNull()) {
+ Data.setPointer(D);
+ return;
+ }
+
// Most decls only have one entry in their list, special case it.
if (NamedDecl *OldD = getAsDecl()) {
- if (!D->declarationReplaces(OldD, IsKnownNewer))
- return false;
- setOnlyValue(D);
- return true;
+ if (D->declarationReplaces(OldD, IsKnownNewer)) {
+ Data.setPointer(D);
+ return;
+ }
+
+ // Add D after OldD.
+ ASTContext &C = D->getASTContext();
+ DeclListNode *Node = C.AllocateDeclListNode(OldD);
+ Node->Rest = D;
+ Data.setPointer(Node);
+ return;
}
+ // FIXME: Move the assert before the single decl case when we fix the
+ // duplication coming from the ASTReader reading builtin types.
+ assert(!llvm::is_contained(getLookupResult(), D) && "Already exists!");
// Determine if this declaration is actually a redeclaration.
- DeclsTy &Vec = *getAsVector();
- for (DeclsTy::iterator OD = Vec.begin(), ODEnd = Vec.end();
- OD != ODEnd; ++OD) {
- NamedDecl *OldD = *OD;
- if (D->declarationReplaces(OldD, IsKnownNewer)) {
- *OD = D;
- return true;
+ for (DeclListNode *N = getAsList(); /*return in loop*/;
+ N = N->Rest.dyn_cast<DeclListNode *>()) {
+ if (D->declarationReplaces(N->D, IsKnownNewer)) {
+ N->D = D;
+ return;
+ }
+ if (auto *ND = N->Rest.dyn_cast<NamedDecl *>()) {
+ if (D->declarationReplaces(ND, IsKnownNewer)) {
+ N->Rest = D;
+ return;
+ }
+
+ // Add D after ND.
+ ASTContext &C = D->getASTContext();
+ DeclListNode *Node = C.AllocateDeclListNode(ND);
+ N->Rest = Node;
+ Node->Rest = D;
+ return;
}
}
-
- return false;
}
- /// AddSubsequentDecl - This is called on the second and later decl when it is
- /// not a redeclaration to merge it into the appropriate place in our list.
- void AddSubsequentDecl(NamedDecl *D) {
- assert(!isNull() && "don't AddSubsequentDecl when we have no decls");
+ /// Add a declaration to the list without checking if it replaces anything.
+ void prependDeclNoReplace(NamedDecl *D) {
+ if (isNull()) {
+ Data.setPointer(D);
+ return;
+ }
- // If this is the second decl added to the list, convert this to vector
- // form.
- if (NamedDecl *OldD = getAsDecl()) {
- DeclsTy *VT = new DeclsTy();
- VT->push_back(OldD);
- Data = DeclsAndHasExternalTy(VT, false);
+ ASTContext &C = D->getASTContext();
+ DeclListNode *Node = C.AllocateDeclListNode(D);
+ Node->Rest = Data.getPointer();
+ Data.setPointer(Node);
+ }
+
+ LLVM_DUMP_METHOD void dump() const {
+ Decls D = Data.getPointer();
+ if (!D) {
+ llvm::errs() << "<null>\n";
+ return;
}
- DeclsTy &Vec = *getAsVector();
-
- // Using directives end up in a special entry which contains only
- // other using directives, so all this logic is wasted for them.
- // But avoiding the logic wastes time in the far-more-common case
- // that we're *not* adding a new using directive.
-
- // Tag declarations always go at the end of the list so that an
- // iterator which points at the first tag will start a span of
- // decls that only contains tags.
- if (D->hasTagIdentifierNamespace())
- Vec.push_back(D);
-
- // Resolved using declarations go at the front of the list so that
- // they won't show up in other lookup results. Unresolved using
- // declarations (which are always in IDNS_Using | IDNS_Ordinary)
- // follow that so that the using declarations will be contiguous.
- else if (D->getIdentifierNamespace() & Decl::IDNS_Using) {
- DeclsTy::iterator I = Vec.begin();
- if (D->getIdentifierNamespace() != Decl::IDNS_Using) {
- while (I != Vec.end() &&
- (*I)->getIdentifierNamespace() == Decl::IDNS_Using)
- ++I;
+ while (true) {
+ if (auto *Node = D.dyn_cast<DeclListNode*>()) {
+ llvm::errs() << '[' << Node->D << "] -> ";
+ D = Node->Rest;
+ } else {
+ llvm::errs() << '[' << D.get<NamedDecl*>() << "]\n";
+ return;
}
- Vec.insert(I, D);
-
- // All other declarations go at the end of the list, but before any
- // tag declarations. But we can be clever about tag declarations
- // because there can only ever be one in a scope.
- } else if (!Vec.empty() && Vec.back()->hasTagIdentifierNamespace()) {
- NamedDecl *TagD = Vec.back();
- Vec.back() = D;
- Vec.push_back(TagD);
- } else
- Vec.push_back(D);
+ }
}
};
class StoredDeclsMap
: public llvm::SmallDenseMap<DeclarationName, StoredDeclsList, 4> {
-public:
- static void DestroyAll(StoredDeclsMap *Map, bool Dependent);
-
-private:
friend class ASTContext; // walks the chain deleting these
friend class DeclContext;
llvm::PointerIntPair<StoredDeclsMap*, 1> Previous;
+public:
+ static void DestroyAll(StoredDeclsMap *Map, bool Dependent);
};
class DependentStoredDeclsMap : public StoredDeclsMap {
-public:
- DependentStoredDeclsMap() = default;
-
-private:
friend class DeclContext; // iterates over diagnostics
friend class DependentDiagnostic;
DependentDiagnostic *FirstDiagnostic = nullptr;
+public:
+ DependentStoredDeclsMap() = default;
};
} // namespace clang
diff --git a/clang/include/clang/AST/DeclObjC.h b/clang/include/clang/AST/DeclObjC.h
index b1bce069920c..6bb9cdf67034 100644
--- a/clang/include/clang/AST/DeclObjC.h
+++ b/clang/include/clang/AST/DeclObjC.h
@@ -852,9 +852,7 @@ public:
bool isClassProperty() const {
return PropertyAttributes & ObjCPropertyAttribute::kind_class;
}
- bool isDirectProperty() const {
- return PropertyAttributes & ObjCPropertyAttribute::kind_direct;
- }
+ bool isDirectProperty() const;
ObjCPropertyQueryKind getQueryKind() const {
return isClassProperty() ? ObjCPropertyQueryKind::OBJC_PR_query_class :
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index 7fbf6294970e..cbaa287f225a 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -160,9 +160,7 @@ public:
/// Determine whether this template parameter list contains an
/// unexpanded parameter pack.
- bool containsUnexpandedParameterPack() const {
- return ContainsUnexpandedParameterPack;
- }
+ bool containsUnexpandedParameterPack() const;
/// Determine whether this template parameter list contains a parameter pack.
bool hasParameterPack() const {
@@ -204,6 +202,9 @@ public:
bool OmitTemplateKW = false) const;
void print(raw_ostream &Out, const ASTContext &Context,
const PrintingPolicy &Policy, bool OmitTemplateKW = false) const;
+
+ static bool shouldIncludeTypeForArgument(const TemplateParameterList *TPL,
+ unsigned Idx);
};
/// Stores a list of template parameters and the associated
diff --git a/clang/include/clang/AST/DeclarationName.h b/clang/include/clang/AST/DeclarationName.h
index 3cb0a02ff49b..38da6fc727fb 100644
--- a/clang/include/clang/AST/DeclarationName.h
+++ b/clang/include/clang/AST/DeclarationName.h
@@ -647,7 +647,7 @@ public:
/// DeclarationNameLoc - Additional source/type location info
/// for a declaration name. Needs a DeclarationName in order
/// to be interpreted correctly.
-struct DeclarationNameLoc {
+class DeclarationNameLoc {
// The source location for identifier stored elsewhere.
// struct {} Identifier;
@@ -660,13 +660,13 @@ struct DeclarationNameLoc {
// The location (if any) of the operator keyword is stored elsewhere.
struct CXXOpName {
- unsigned BeginOpNameLoc;
- unsigned EndOpNameLoc;
+ SourceLocation::UIntTy BeginOpNameLoc;
+ SourceLocation::UIntTy EndOpNameLoc;
};
// The location (if any) of the operator keyword is stored elsewhere.
struct CXXLitOpName {
- unsigned OpNameLoc;
+ SourceLocation::UIntTy OpNameLoc;
};
// struct {} CXXUsingDirective;
@@ -679,10 +679,78 @@ struct DeclarationNameLoc {
struct CXXLitOpName CXXLiteralOperatorName;
};
- DeclarationNameLoc(DeclarationName Name);
+ void setNamedTypeLoc(TypeSourceInfo *TInfo) { NamedType.TInfo = TInfo; }
+
+ void setCXXOperatorNameRange(SourceRange Range) {
+ CXXOperatorName.BeginOpNameLoc = Range.getBegin().getRawEncoding();
+ CXXOperatorName.EndOpNameLoc = Range.getEnd().getRawEncoding();
+ }
+ void setCXXLiteralOperatorNameLoc(SourceLocation Loc) {
+ CXXLiteralOperatorName.OpNameLoc = Loc.getRawEncoding();
+ }
+
+public:
+ DeclarationNameLoc(DeclarationName Name);
// FIXME: this should go away once all DNLocs are properly initialized.
DeclarationNameLoc() { memset((void*) this, 0, sizeof(*this)); }
+
+ /// Returns the source type info. Assumes that the object stores location
+ /// information of a constructor, destructor or conversion operator.
+ TypeSourceInfo *getNamedTypeInfo() const { return NamedType.TInfo; }
+
+ /// Return the beginning location of the getCXXOperatorNameRange() range.
+ SourceLocation getCXXOperatorNameBeginLoc() const {
+ return SourceLocation::getFromRawEncoding(CXXOperatorName.BeginOpNameLoc);
+ }
+
+ /// Return the end location of the getCXXOperatorNameRange() range.
+ SourceLocation getCXXOperatorNameEndLoc() const {
+ return SourceLocation::getFromRawEncoding(CXXOperatorName.EndOpNameLoc);
+ }
+
+ /// Return the range of the operator name (without the operator keyword).
+ /// Assumes that the object stores location information of a (non-literal)
+ /// operator.
+ SourceRange getCXXOperatorNameRange() const {
+ return SourceRange(getCXXOperatorNameBeginLoc(),
+ getCXXOperatorNameEndLoc());
+ }
+
+ /// Return the location of the literal operator name (without the operator
+ /// keyword). Assumes that the object stores location information of a literal
+ /// operator.
+ SourceLocation getCXXLiteralOperatorNameLoc() const {
+ return SourceLocation::getFromRawEncoding(CXXLiteralOperatorName.OpNameLoc);
+ }
+
+ /// Construct location information for a constructor, destructor or conversion
+ /// operator.
+ static DeclarationNameLoc makeNamedTypeLoc(TypeSourceInfo *TInfo) {
+ DeclarationNameLoc DNL;
+ DNL.setNamedTypeLoc(TInfo);
+ return DNL;
+ }
+
+ /// Construct location information for a non-literal C++ operator.
+ static DeclarationNameLoc makeCXXOperatorNameLoc(SourceLocation BeginLoc,
+ SourceLocation EndLoc) {
+ return makeCXXOperatorNameLoc(SourceRange(BeginLoc, EndLoc));
+ }
+
+ /// Construct location information for a non-literal C++ operator.
+ static DeclarationNameLoc makeCXXOperatorNameLoc(SourceRange Range) {
+ DeclarationNameLoc DNL;
+ DNL.setCXXOperatorNameRange(Range);
+ return DNL;
+ }
+
+ /// Construct location information for a literal C++ operator.
+ static DeclarationNameLoc makeCXXLiteralOperatorNameLoc(SourceLocation Loc) {
+ DeclarationNameLoc DNL;
+ DNL.setCXXLiteralOperatorNameLoc(Loc);
+ return DNL;
+ }
};
/// DeclarationNameInfo - A collector data type for bundling together
@@ -722,7 +790,6 @@ public:
void setLoc(SourceLocation L) { NameLoc = L; }
const DeclarationNameLoc &getInfo() const { return LocInfo; }
- DeclarationNameLoc &getInfo() { return LocInfo; }
void setInfo(const DeclarationNameLoc &Info) { LocInfo = Info; }
/// getNamedTypeInfo - Returns the source type info associated to
@@ -732,7 +799,7 @@ public:
Name.getNameKind() != DeclarationName::CXXDestructorName &&
Name.getNameKind() != DeclarationName::CXXConversionFunctionName)
return nullptr;
- return LocInfo.NamedType.TInfo;
+ return LocInfo.getNamedTypeInfo();
}
/// setNamedTypeInfo - Sets the source type info associated to
@@ -741,7 +808,7 @@ public:
assert(Name.getNameKind() == DeclarationName::CXXConstructorName ||
Name.getNameKind() == DeclarationName::CXXDestructorName ||
Name.getNameKind() == DeclarationName::CXXConversionFunctionName);
- LocInfo.NamedType.TInfo = TInfo;
+ LocInfo = DeclarationNameLoc::makeNamedTypeLoc(TInfo);
}
/// getCXXOperatorNameRange - Gets the range of the operator name
@@ -749,18 +816,14 @@ public:
SourceRange getCXXOperatorNameRange() const {
if (Name.getNameKind() != DeclarationName::CXXOperatorName)
return SourceRange();
- return SourceRange(
- SourceLocation::getFromRawEncoding(LocInfo.CXXOperatorName.BeginOpNameLoc),
- SourceLocation::getFromRawEncoding(LocInfo.CXXOperatorName.EndOpNameLoc)
- );
+ return LocInfo.getCXXOperatorNameRange();
}
/// setCXXOperatorNameRange - Sets the range of the operator name
/// (without the operator keyword). Assumes it is a C++ operator.
void setCXXOperatorNameRange(SourceRange R) {
assert(Name.getNameKind() == DeclarationName::CXXOperatorName);
- LocInfo.CXXOperatorName.BeginOpNameLoc = R.getBegin().getRawEncoding();
- LocInfo.CXXOperatorName.EndOpNameLoc = R.getEnd().getRawEncoding();
+ LocInfo = DeclarationNameLoc::makeCXXOperatorNameLoc(R);
}
/// getCXXLiteralOperatorNameLoc - Returns the location of the literal
@@ -769,8 +832,7 @@ public:
SourceLocation getCXXLiteralOperatorNameLoc() const {
if (Name.getNameKind() != DeclarationName::CXXLiteralOperatorName)
return SourceLocation();
- return SourceLocation::
- getFromRawEncoding(LocInfo.CXXLiteralOperatorName.OpNameLoc);
+ return LocInfo.getCXXLiteralOperatorNameLoc();
}
/// setCXXLiteralOperatorNameLoc - Sets the location of the literal
@@ -778,7 +840,7 @@ public:
/// Assumes it is a literal operator.
void setCXXLiteralOperatorNameLoc(SourceLocation Loc) {
assert(Name.getNameKind() == DeclarationName::CXXLiteralOperatorName);
- LocInfo.CXXLiteralOperatorName.OpNameLoc = Loc.getRawEncoding();
+ LocInfo = DeclarationNameLoc::makeCXXLiteralOperatorNameLoc(Loc);
}
/// Determine whether this name involves a template parameter.
diff --git a/clang/include/clang/AST/DependenceFlags.h b/clang/include/clang/AST/DependenceFlags.h
index ca96b65574bd..62efdb4ce6e4 100644
--- a/clang/include/clang/AST/DependenceFlags.h
+++ b/clang/include/clang/AST/DependenceFlags.h
@@ -128,6 +128,9 @@ public:
// Type depends on a runtime value (variable-length array).
VariablyModified = 32,
+ // Dependence that is propagated syntactically, regardless of semantics.
+ Syntactic = UnexpandedPack | Instantiation | Error,
+
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/VariablyModified)
};
@@ -165,6 +168,13 @@ public:
translate(D, TNDependence::Dependent, Dependent) |
translate(D, TNDependence::Error, Error)) {}
+ /// Extract only the syntactic portions of this type's dependence.
+ Dependence syntactic() {
+ Dependence Result = *this;
+ Result.V &= Syntactic;
+ return Result;
+ }
+
TypeDependence type() const {
return translate(V, UnexpandedPack, TypeDependence::UnexpandedPack) |
translate(V, Instantiation, TypeDependence::Instantiation) |
@@ -256,6 +266,10 @@ inline TypeDependence toTypeDependence(TemplateArgumentDependence D) {
return Dependence(D).type();
}
+inline TypeDependence toSyntacticDependence(TypeDependence D) {
+ return Dependence(D).syntactic().type();
+}
+
inline NestedNameSpecifierDependence
toNestedNameSpecifierDependendence(TypeDependence D) {
return Dependence(D).nestedNameSpecifier();
diff --git a/clang/include/clang/AST/EvaluatedExprVisitor.h b/clang/include/clang/AST/EvaluatedExprVisitor.h
index 2f6c314b4111..2991f2859ac4 100644
--- a/clang/include/clang/AST/EvaluatedExprVisitor.h
+++ b/clang/include/clang/AST/EvaluatedExprVisitor.h
@@ -32,6 +32,9 @@ protected:
const ASTContext &Context;
public:
+ // Return whether this visitor should recurse into discarded statements for a
+ // 'constexpr-if'.
+ bool shouldVisitDiscardedStmt() const { return true; }
#define PTR(CLASS) typename Ptr<CLASS>::type
explicit EvaluatedExprVisitorBase(const ASTContext &Context) : Context(Context) { }
@@ -83,7 +86,7 @@ public:
void VisitCallExpr(PTR(CallExpr) CE) {
if (!CE->isUnevaluatedBuiltinCall(Context))
- return static_cast<ImplClass*>(this)->VisitExpr(CE);
+ return getDerived().VisitExpr(CE);
}
void VisitLambdaExpr(PTR(LambdaExpr) LE) {
@@ -103,6 +106,20 @@ public:
this->Visit(SubStmt);
}
+ void VisitIfStmt(PTR(IfStmt) If) {
+ if (!getDerived().shouldVisitDiscardedStmt()) {
+ if (auto SubStmt = If->getNondiscardedCase(Context)) {
+ if (*SubStmt)
+ this->Visit(*SubStmt);
+ return;
+ }
+ }
+
+ getDerived().VisitStmt(If);
+ }
+
+ ImplClass &getDerived() { return *static_cast<ImplClass *>(this); }
+
#undef PTR
};
diff --git a/clang/include/clang/AST/Expr.h b/clang/include/clang/AST/Expr.h
index a44d06967431..06164411cc2d 100644
--- a/clang/include/clang/AST/Expr.h
+++ b/clang/include/clang/AST/Expr.h
@@ -266,13 +266,11 @@ public:
/// C++11 divides the concept of "r-value" into pure r-values
/// ("pr-values") and so-called expiring values ("x-values"), which
/// identify specific objects that can be safely cannibalized for
- /// their resources. This is an unfortunate abuse of terminology on
- /// the part of the C++ committee. In Clang, when we say "r-value",
- /// we generally mean a pr-value.
+ /// their resources.
bool isLValue() const { return getValueKind() == VK_LValue; }
- bool isRValue() const { return getValueKind() == VK_RValue; }
+ bool isPRValue() const { return getValueKind() == VK_PRValue; }
bool isXValue() const { return getValueKind() == VK_XValue; }
- bool isGLValue() const { return getValueKind() != VK_RValue; }
+ bool isGLValue() const { return getValueKind() != VK_PRValue; }
enum LValueClassification {
LV_Valid,
@@ -425,7 +423,7 @@ public:
? VK_LValue
: (RT->getPointeeType()->isFunctionType()
? VK_LValue : VK_XValue));
- return VK_RValue;
+ return VK_PRValue;
}
/// getValueKind - The value kind that this expression produces.
@@ -1588,8 +1586,8 @@ public:
// type should be IntTy
CharacterLiteral(unsigned value, CharacterKind kind, QualType type,
SourceLocation l)
- : Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary), Value(value),
- Loc(l) {
+ : Expr(CharacterLiteralClass, type, VK_PRValue, OK_Ordinary),
+ Value(value), Loc(l) {
CharacterLiteralBits.Kind = kind;
setDependence(ExprDependence::None);
}
@@ -1615,6 +1613,8 @@ public:
return T->getStmtClass() == CharacterLiteralClass;
}
+ static void print(unsigned val, CharacterKind Kind, raw_ostream &OS);
+
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
@@ -1707,7 +1707,7 @@ class ImaginaryLiteral : public Expr {
Stmt *Val;
public:
ImaginaryLiteral(Expr *val, QualType Ty)
- : Expr(ImaginaryLiteralClass, Ty, VK_RValue, OK_Ordinary), Val(val) {
+ : Expr(ImaginaryLiteralClass, Ty, VK_PRValue, OK_Ordinary), Val(val) {
setDependence(ExprDependence::None);
}
@@ -2037,6 +2037,64 @@ public:
}
};
+// This represents a use of the __builtin_sycl_unique_stable_name, which takes a
+// type-id, and at CodeGen time emits a unique string representation of the
+// type in a way that permits us to properly encode information about the SYCL
+// kernels.
+class SYCLUniqueStableNameExpr final : public Expr {
+ friend class ASTStmtReader;
+ SourceLocation OpLoc, LParen, RParen;
+ TypeSourceInfo *TypeInfo;
+
+ SYCLUniqueStableNameExpr(EmptyShell Empty, QualType ResultTy);
+ SYCLUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen,
+ SourceLocation RParen, QualType ResultTy,
+ TypeSourceInfo *TSI);
+
+ void setTypeSourceInfo(TypeSourceInfo *Ty) { TypeInfo = Ty; }
+
+ void setLocation(SourceLocation L) { OpLoc = L; }
+ void setLParenLocation(SourceLocation L) { LParen = L; }
+ void setRParenLocation(SourceLocation L) { RParen = L; }
+
+public:
+ TypeSourceInfo *getTypeSourceInfo() { return TypeInfo; }
+
+ const TypeSourceInfo *getTypeSourceInfo() const { return TypeInfo; }
+
+ static SYCLUniqueStableNameExpr *
+ Create(const ASTContext &Ctx, SourceLocation OpLoc, SourceLocation LParen,
+ SourceLocation RParen, TypeSourceInfo *TSI);
+
+ static SYCLUniqueStableNameExpr *CreateEmpty(const ASTContext &Ctx);
+
+ SourceLocation getBeginLoc() const { return getLocation(); }
+ SourceLocation getEndLoc() const { return RParen; }
+ SourceLocation getLocation() const { return OpLoc; }
+ SourceLocation getLParenLocation() const { return LParen; }
+ SourceLocation getRParenLocation() const { return RParen; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SYCLUniqueStableNameExprClass;
+ }
+
+ // Iterators
+ child_range children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+
+ const_child_range children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ // Convenience function to generate the name of the currently stored type.
+ std::string ComputeName(ASTContext &Context) const;
+
+ // Get the generated name of the type. Note that this only works after all
+ // kernels have been instantiated.
+ static std::string ComputeName(ASTContext &Context, QualType Ty);
+};
+
/// ParenExpr - This represents a parethesized expression, e.g. "(1)". This
/// AST node is only formed if full location information is requested.
class ParenExpr : public Expr {
@@ -2487,7 +2545,8 @@ public:
UnaryExprOrTypeTraitExpr(UnaryExprOrTypeTrait ExprKind, TypeSourceInfo *TInfo,
QualType resultType, SourceLocation op,
SourceLocation rp)
- : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary),
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_PRValue,
+ OK_Ordinary),
OpLoc(op), RParenLoc(rp) {
assert(ExprKind <= UETT_Last && "invalid enum value!");
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
@@ -2928,11 +2987,22 @@ public:
}
/// setArg - Set the specified argument.
+ /// ! the dependence bits might be stale after calling this setter, it is
+ /// *caller*'s responsibility to recompute them by calling
+ /// computeDependence().
void setArg(unsigned Arg, Expr *ArgExpr) {
assert(Arg < getNumArgs() && "Arg access out of range!");
getArgs()[Arg] = ArgExpr;
}
+ /// Compute and set dependence bits.
+ void computeDependence() {
+ setDependence(clang::computeDependence(
+ this, llvm::makeArrayRef(
+ reinterpret_cast<Expr **>(getTrailingStmts() + PREARGS_START),
+ getNumPreArgs())));
+ }
+
/// Reduce the number of arguments in this call expression. This is used for
/// example during error recovery to drop extra arguments. There is no way
/// to perform the opposite because: 1.) We don't track how much storage
@@ -4227,7 +4297,7 @@ class AddrLabelExpr : public Expr {
public:
AddrLabelExpr(SourceLocation AALoc, SourceLocation LLoc, LabelDecl *L,
QualType t)
- : Expr(AddrLabelExprClass, t, VK_RValue, OK_Ordinary), AmpAmpLoc(AALoc),
+ : Expr(AddrLabelExprClass, t, VK_PRValue, OK_Ordinary), AmpAmpLoc(AALoc),
LabelLoc(LLoc), Label(L) {
setDependence(ExprDependence::None);
}
@@ -4272,7 +4342,7 @@ class StmtExpr : public Expr {
public:
StmtExpr(CompoundStmt *SubStmt, QualType T, SourceLocation LParenLoc,
SourceLocation RParenLoc, unsigned TemplateDepth)
- : Expr(StmtExprClass, T, VK_RValue, OK_Ordinary), SubStmt(SubStmt),
+ : Expr(StmtExprClass, T, VK_PRValue, OK_Ordinary), SubStmt(SubStmt),
LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
setDependence(computeDependence(this, TemplateDepth));
// FIXME: A templated statement expression should have an associated
@@ -4522,7 +4592,7 @@ class GNUNullExpr : public Expr {
public:
GNUNullExpr(QualType Ty, SourceLocation Loc)
- : Expr(GNUNullExprClass, Ty, VK_RValue, OK_Ordinary), TokenLoc(Loc) {
+ : Expr(GNUNullExprClass, Ty, VK_PRValue, OK_Ordinary), TokenLoc(Loc) {
setDependence(ExprDependence::None);
}
@@ -4557,7 +4627,7 @@ class VAArgExpr : public Expr {
public:
VAArgExpr(SourceLocation BLoc, Expr *e, TypeSourceInfo *TInfo,
SourceLocation RPLoc, QualType t, bool IsMS)
- : Expr(VAArgExprClass, t, VK_RValue, OK_Ordinary), Val(e),
+ : Expr(VAArgExprClass, t, VK_PRValue, OK_Ordinary), Val(e),
TInfo(TInfo, IsMS), BuiltinLoc(BLoc), RParenLoc(RPLoc) {
setDependence(computeDependence(this));
}
@@ -5249,7 +5319,7 @@ public:
class NoInitExpr : public Expr {
public:
explicit NoInitExpr(QualType ty)
- : Expr(NoInitExprClass, ty, VK_RValue, OK_Ordinary) {
+ : Expr(NoInitExprClass, ty, VK_PRValue, OK_Ordinary) {
setDependence(computeDependence(this));
}
@@ -5345,7 +5415,7 @@ class ArrayInitLoopExpr : public Expr {
public:
explicit ArrayInitLoopExpr(QualType T, Expr *CommonInit, Expr *ElementInit)
- : Expr(ArrayInitLoopExprClass, T, VK_RValue, OK_Ordinary),
+ : Expr(ArrayInitLoopExprClass, T, VK_PRValue, OK_Ordinary),
SubExprs{CommonInit, ElementInit} {
setDependence(computeDependence(this));
}
@@ -5396,7 +5466,7 @@ class ArrayInitIndexExpr : public Expr {
public:
explicit ArrayInitIndexExpr(QualType T)
- : Expr(ArrayInitIndexExprClass, T, VK_RValue, OK_Ordinary) {
+ : Expr(ArrayInitIndexExprClass, T, VK_PRValue, OK_Ordinary) {
setDependence(ExprDependence::None);
}
@@ -5429,7 +5499,7 @@ public:
class ImplicitValueInitExpr : public Expr {
public:
explicit ImplicitValueInitExpr(QualType ty)
- : Expr(ImplicitValueInitExprClass, ty, VK_RValue, OK_Ordinary) {
+ : Expr(ImplicitValueInitExprClass, ty, VK_PRValue, OK_Ordinary) {
setDependence(computeDependence(this));
}
@@ -5834,7 +5904,7 @@ public:
ExtVectorElementExpr(QualType ty, ExprValueKind VK, Expr *base,
IdentifierInfo &accessor, SourceLocation loc)
: Expr(ExtVectorElementExprClass, ty, VK,
- (VK == VK_RValue ? OK_Ordinary : OK_VectorComponent)),
+ (VK == VK_PRValue ? OK_Ordinary : OK_VectorComponent)),
Base(base), Accessor(&accessor), AccessorLoc(loc) {
setDependence(computeDependence(this));
}
@@ -5891,7 +5961,7 @@ protected:
BlockDecl *TheBlock;
public:
BlockExpr(BlockDecl *BD, QualType ty)
- : Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary), TheBlock(BD) {
+ : Expr(BlockExprClass, ty, VK_PRValue, OK_Ordinary), TheBlock(BD) {
setDependence(computeDependence(this));
}
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index fbeeb4004f7d..161287adce4c 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -721,7 +721,7 @@ public:
class CXXBoolLiteralExpr : public Expr {
public:
CXXBoolLiteralExpr(bool Val, QualType Ty, SourceLocation Loc)
- : Expr(CXXBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary) {
+ : Expr(CXXBoolLiteralExprClass, Ty, VK_PRValue, OK_Ordinary) {
CXXBoolLiteralExprBits.Value = Val;
CXXBoolLiteralExprBits.Loc = Loc;
setDependence(ExprDependence::None);
@@ -759,7 +759,7 @@ public:
class CXXNullPtrLiteralExpr : public Expr {
public:
CXXNullPtrLiteralExpr(QualType Ty, SourceLocation Loc)
- : Expr(CXXNullPtrLiteralExprClass, Ty, VK_RValue, OK_Ordinary) {
+ : Expr(CXXNullPtrLiteralExprClass, Ty, VK_PRValue, OK_Ordinary) {
CXXNullPtrLiteralExprBits.Loc = Loc;
setDependence(ExprDependence::None);
}
@@ -799,7 +799,7 @@ public:
friend class ASTStmtReader;
CXXStdInitializerListExpr(QualType Ty, Expr *SubExpr)
- : Expr(CXXStdInitializerListExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(CXXStdInitializerListExprClass, Ty, VK_PRValue, OK_Ordinary),
SubExpr(SubExpr) {
setDependence(computeDependence(this));
}
@@ -1142,7 +1142,7 @@ public:
class CXXThisExpr : public Expr {
public:
CXXThisExpr(SourceLocation L, QualType Ty, bool IsImplicit)
- : Expr(CXXThisExprClass, Ty, VK_RValue, OK_Ordinary) {
+ : Expr(CXXThisExprClass, Ty, VK_PRValue, OK_Ordinary) {
CXXThisExprBits.IsImplicit = IsImplicit;
CXXThisExprBits.Loc = L;
setDependence(computeDependence(this));
@@ -1191,7 +1191,7 @@ public:
// null if not present.
CXXThrowExpr(Expr *Operand, QualType Ty, SourceLocation Loc,
bool IsThrownVariableInScope)
- : Expr(CXXThrowExprClass, Ty, VK_RValue, OK_Ordinary), Operand(Operand) {
+ : Expr(CXXThrowExprClass, Ty, VK_PRValue, OK_Ordinary), Operand(Operand) {
CXXThrowExprBits.ThrowLoc = Loc;
CXXThrowExprBits.IsThrownVariableInScope = IsThrownVariableInScope;
setDependence(computeDependence(this));
@@ -1257,7 +1257,7 @@ class CXXDefaultArgExpr final : public Expr {
Param->getDefaultArg()->getObjectKind()),
Param(Param), UsedContext(UsedContext) {
CXXDefaultArgExprBits.Loc = Loc;
- setDependence(ExprDependence::None);
+ setDependence(computeDependence(this));
}
public:
@@ -1414,7 +1414,7 @@ class CXXBindTemporaryExpr : public Expr {
Stmt *SubExpr = nullptr;
CXXBindTemporaryExpr(CXXTemporary *temp, Expr *SubExpr)
- : Expr(CXXBindTemporaryExprClass, SubExpr->getType(), VK_RValue,
+ : Expr(CXXBindTemporaryExprClass, SubExpr->getType(), VK_PRValue,
OK_Ordinary),
Temp(temp), SubExpr(SubExpr) {
setDependence(computeDependence(this));
@@ -1669,7 +1669,7 @@ public:
CXXInheritedCtorInitExpr(SourceLocation Loc, QualType T,
CXXConstructorDecl *Ctor, bool ConstructsVirtualBase,
bool InheritedFromVirtualBase)
- : Expr(CXXInheritedCtorInitExprClass, T, VK_RValue, OK_Ordinary),
+ : Expr(CXXInheritedCtorInitExprClass, T, VK_PRValue, OK_Ordinary),
Constructor(Ctor), Loc(Loc),
ConstructsVirtualBase(ConstructsVirtualBase),
InheritedFromVirtualBase(InheritedFromVirtualBase) {
@@ -2100,7 +2100,7 @@ public:
/// expression.
CXXScalarValueInitExpr(QualType Type, TypeSourceInfo *TypeInfo,
SourceLocation RParenLoc)
- : Expr(CXXScalarValueInitExprClass, Type, VK_RValue, OK_Ordinary),
+ : Expr(CXXScalarValueInitExprClass, Type, VK_PRValue, OK_Ordinary),
TypeInfo(TypeInfo) {
CXXScalarValueInitExprBits.RParenLoc = RParenLoc;
setDependence(computeDependence(this));
@@ -2408,7 +2408,7 @@ public:
CXXDeleteExpr(QualType Ty, bool GlobalDelete, bool ArrayForm,
bool ArrayFormAsWritten, bool UsualArrayDeleteWantsSize,
FunctionDecl *OperatorDelete, Expr *Arg, SourceLocation Loc)
- : Expr(CXXDeleteExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(CXXDeleteExprClass, Ty, VK_PRValue, OK_Ordinary),
OperatorDelete(OperatorDelete), Argument(Arg) {
CXXDeleteExprBits.GlobalDelete = GlobalDelete;
CXXDeleteExprBits.ArrayForm = ArrayForm;
@@ -2775,7 +2775,7 @@ public:
ArrayTypeTraitExpr(SourceLocation loc, ArrayTypeTrait att,
TypeSourceInfo *queried, uint64_t value, Expr *dimension,
SourceLocation rparen, QualType ty)
- : Expr(ArrayTypeTraitExprClass, ty, VK_RValue, OK_Ordinary), ATT(att),
+ : Expr(ArrayTypeTraitExprClass, ty, VK_PRValue, OK_Ordinary), ATT(att),
Value(value), Dimension(dimension), Loc(loc), RParen(rparen),
QueriedType(queried) {
assert(att <= ATT_Last && "invalid enum value!");
@@ -2841,7 +2841,7 @@ public:
ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et, Expr *queried,
bool value, SourceLocation rparen, QualType resultType)
- : Expr(ExpressionTraitExprClass, resultType, VK_RValue, OK_Ordinary),
+ : Expr(ExpressionTraitExprClass, resultType, VK_PRValue, OK_Ordinary),
ET(et), Value(value), Loc(loc), RParen(rparen),
QueriedExpression(queried) {
assert(et <= ET_Last && "invalid enum value!");
@@ -4003,7 +4003,7 @@ class CXXNoexceptExpr : public Expr {
public:
CXXNoexceptExpr(QualType Ty, Expr *Operand, CanThrowResult Val,
SourceLocation Keyword, SourceLocation RParen)
- : Expr(CXXNoexceptExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(CXXNoexceptExprClass, Ty, VK_PRValue, OK_Ordinary),
Operand(Operand), Range(Keyword, RParen) {
CXXNoexceptExprBits.Value = Val == CT_Cannot;
setDependence(computeDependence(this, Val));
@@ -4161,7 +4161,7 @@ class SizeOfPackExpr final
SourceLocation PackLoc, SourceLocation RParenLoc,
Optional<unsigned> Length,
ArrayRef<TemplateArgument> PartialArgs)
- : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary),
+ : Expr(SizeOfPackExprClass, SizeType, VK_PRValue, OK_Ordinary),
OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
Length(Length ? *Length : PartialArgs.size()), Pack(Pack) {
assert((!Length || PartialArgs.empty()) &&
@@ -4528,9 +4528,7 @@ public:
/// Determine whether this materialized temporary is bound to an
/// lvalue reference; otherwise, it's bound to an rvalue reference.
- bool isBoundToLvalueReference() const {
- return getValueKind() == VK_LValue;
- }
+ bool isBoundToLvalueReference() const { return isLValue(); }
/// Determine whether this temporary object is usable in constant
/// expressions, as specified in C++20 [expr.const]p4.
@@ -4593,8 +4591,8 @@ public:
SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Opcode,
SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc,
Optional<unsigned> NumExpansions)
- : Expr(CXXFoldExprClass, T, VK_RValue, OK_Ordinary), LParenLoc(LParenLoc),
- EllipsisLoc(EllipsisLoc), RParenLoc(RParenLoc),
+ : Expr(CXXFoldExprClass, T, VK_PRValue, OK_Ordinary),
+ LParenLoc(LParenLoc), EllipsisLoc(EllipsisLoc), RParenLoc(RParenLoc),
NumExpansions(NumExpansions ? *NumExpansions + 1 : 0), Opcode(Opcode) {
SubExprs[SubExpr::Callee] = Callee;
SubExprs[SubExpr::LHS] = LHS;
@@ -4704,7 +4702,7 @@ public:
CoroutineSuspendExpr(StmtClass SC, SourceLocation KeywordLoc, QualType Ty,
Expr *Common)
- : Expr(SC, Ty, VK_RValue, OK_Ordinary), KeywordLoc(KeywordLoc) {
+ : Expr(SC, Ty, VK_PRValue, OK_Ordinary), KeywordLoc(KeywordLoc) {
assert(Common->isTypeDependent() && Ty->isDependentType() &&
"wrong constructor for non-dependent co_await/co_yield expression");
SubExprs[SubExpr::Common] = Common;
@@ -4808,7 +4806,7 @@ class DependentCoawaitExpr : public Expr {
public:
DependentCoawaitExpr(SourceLocation KeywordLoc, QualType Ty, Expr *Op,
UnresolvedLookupExpr *OpCoawait)
- : Expr(DependentCoawaitExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(DependentCoawaitExprClass, Ty, VK_PRValue, OK_Ordinary),
KeywordLoc(KeywordLoc) {
// NOTE: A co_await expression is dependent on the coroutines promise
// type and may be dependent even when the `Op` expression is not.
diff --git a/clang/include/clang/AST/ExprObjC.h b/clang/include/clang/AST/ExprObjC.h
index 17eec5172697..b0f057dbaa02 100644
--- a/clang/include/clang/AST/ExprObjC.h
+++ b/clang/include/clang/AST/ExprObjC.h
@@ -55,7 +55,7 @@ class ObjCStringLiteral : public Expr {
public:
ObjCStringLiteral(StringLiteral *SL, QualType T, SourceLocation L)
- : Expr(ObjCStringLiteralClass, T, VK_RValue, OK_Ordinary), String(SL),
+ : Expr(ObjCStringLiteralClass, T, VK_PRValue, OK_Ordinary), String(SL),
AtLoc(L) {
setDependence(ExprDependence::None);
}
@@ -91,7 +91,7 @@ class ObjCBoolLiteralExpr : public Expr {
public:
ObjCBoolLiteralExpr(bool val, QualType Ty, SourceLocation l)
- : Expr(ObjCBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary), Value(val),
+ : Expr(ObjCBoolLiteralExprClass, Ty, VK_PRValue, OK_Ordinary), Value(val),
Loc(l) {
setDependence(ExprDependence::None);
}
@@ -134,7 +134,7 @@ public:
friend class ASTStmtReader;
ObjCBoxedExpr(Expr *E, QualType T, ObjCMethodDecl *method, SourceRange R)
- : Expr(ObjCBoxedExprClass, T, VK_RValue, OK_Ordinary), SubExpr(E),
+ : Expr(ObjCBoxedExprClass, T, VK_PRValue, OK_Ordinary), SubExpr(E),
BoxingMethod(method), Range(R) {
setDependence(computeDependence(this));
}
@@ -458,7 +458,7 @@ class ObjCSelectorExpr : public Expr {
public:
ObjCSelectorExpr(QualType T, Selector selInfo, SourceLocation at,
SourceLocation rp)
- : Expr(ObjCSelectorExprClass, T, VK_RValue, OK_Ordinary),
+ : Expr(ObjCSelectorExprClass, T, VK_PRValue, OK_Ordinary),
SelName(selInfo), AtLoc(at), RParenLoc(rp) {
setDependence(ExprDependence::None);
}
@@ -511,7 +511,7 @@ public:
ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol, SourceLocation at,
SourceLocation protoLoc, SourceLocation rp)
- : Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary),
+ : Expr(ObjCProtocolExprClass, T, VK_PRValue, OK_Ordinary),
TheProtocol(protocol), AtLoc(at), ProtoLoc(protoLoc), RParenLoc(rp) {
setDependence(ExprDependence::None);
}
@@ -1638,8 +1638,8 @@ public:
ObjCBridgedCastExpr(SourceLocation LParenLoc, ObjCBridgeCastKind Kind,
CastKind CK, SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo, Expr *Operand)
- : ExplicitCastExpr(ObjCBridgedCastExprClass, TSInfo->getType(), VK_RValue,
- CK, Operand, 0, false, TSInfo),
+ : ExplicitCastExpr(ObjCBridgedCastExprClass, TSInfo->getType(),
+ VK_PRValue, CK, Operand, 0, false, TSInfo),
LParenLoc(LParenLoc), BridgeKeywordLoc(BridgeKeywordLoc), Kind(Kind) {}
/// Construct an empty Objective-C bridged cast.
@@ -1692,7 +1692,7 @@ class ObjCAvailabilityCheckExpr : public Expr {
public:
ObjCAvailabilityCheckExpr(VersionTuple VersionToCheck, SourceLocation AtLoc,
SourceLocation RParen, QualType Ty)
- : Expr(ObjCAvailabilityCheckExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(ObjCAvailabilityCheckExprClass, Ty, VK_PRValue, OK_Ordinary),
VersionToCheck(VersionToCheck), AtLoc(AtLoc), RParen(RParen) {
setDependence(ExprDependence::None);
}
diff --git a/clang/include/clang/AST/ExternalASTSource.h b/clang/include/clang/AST/ExternalASTSource.h
index caae0770931b..b1851afcda37 100644
--- a/clang/include/clang/AST/ExternalASTSource.h
+++ b/clang/include/clang/AST/ExternalASTSource.h
@@ -24,14 +24,12 @@
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
-#include <string>
#include <utility>
namespace clang {
@@ -462,10 +460,10 @@ public:
} // namespace clang
-/// Specialize PointerLikeTypeTraits to allow LazyGenerationalUpdatePtr to be
-/// placed into a PointerUnion.
namespace llvm {
+/// Specialize PointerLikeTypeTraits to allow LazyGenerationalUpdatePtr to be
+/// placed into a PointerUnion.
template<typename Owner, typename T,
void (clang::ExternalASTSource::*Update)(Owner)>
struct PointerLikeTypeTraits<
diff --git a/clang/include/clang/AST/IgnoreExpr.h b/clang/include/clang/AST/IgnoreExpr.h
index 1c2b538e5b63..a7e9b07bef6c 100644
--- a/clang/include/clang/AST/IgnoreExpr.h
+++ b/clang/include/clang/AST/IgnoreExpr.h
@@ -41,7 +41,7 @@ template <typename... FnTys> Expr *IgnoreExprNodes(Expr *E, FnTys &&... Fns) {
template <typename... FnTys>
const Expr *IgnoreExprNodes(const Expr *E, FnTys &&...Fns) {
- return const_cast<Expr *>(IgnoreExprNodes(E, std::forward<FnTys>(Fns)...));
+ return IgnoreExprNodes(const_cast<Expr *>(E), std::forward<FnTys>(Fns)...);
}
inline Expr *IgnoreImplicitCastsSingleStep(Expr *E) {
@@ -121,6 +121,18 @@ inline Expr *IgnoreImplicitSingleStep(Expr *E) {
return E;
}
+inline Expr *IgnoreElidableImplicitConstructorSingleStep(Expr *E) {
+ auto *CCE = dyn_cast<CXXConstructExpr>(E);
+ if (CCE && CCE->isElidable() && !isa<CXXTemporaryObjectExpr>(CCE)) {
+ unsigned NumArgs = CCE->getNumArgs();
+ if ((NumArgs == 1 ||
+ (NumArgs > 1 && CCE->getArg(1)->isDefaultArgument())) &&
+ !CCE->getArg(0)->isDefaultArgument() && !CCE->isListInitialization())
+ return CCE->getArg(0);
+ }
+ return E;
+}
+
inline Expr *IgnoreImplicitAsWrittenSingleStep(Expr *E) {
if (auto *ICE = dyn_cast<ImplicitCastExpr>(E))
return ICE->getSubExprAsWritten();
diff --git a/clang/include/clang/AST/JSONNodeDumper.h b/clang/include/clang/AST/JSONNodeDumper.h
index 4e7162992418..a96e21993e20 100644
--- a/clang/include/clang/AST/JSONNodeDumper.h
+++ b/clang/include/clang/AST/JSONNodeDumper.h
@@ -21,6 +21,7 @@
#include "clang/AST/AttrVisitor.h"
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentVisitor.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/Type.h"
@@ -204,6 +205,7 @@ public:
void Visit(const OMPClause *C);
void Visit(const BlockDecl::Capture &C);
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const concepts::Requirement *R);
void Visit(const APValue &Value, QualType Ty);
void VisitTypedefType(const TypedefType *TT);
@@ -234,6 +236,7 @@ public:
void VisitUsingDirectiveDecl(const UsingDirectiveDecl *UDD);
void VisitNamespaceAliasDecl(const NamespaceAliasDecl *NAD);
void VisitUsingDecl(const UsingDecl *UD);
+ void VisitUsingEnumDecl(const UsingEnumDecl *UED);
void VisitUsingShadowDecl(const UsingShadowDecl *USD);
void VisitVarDecl(const VarDecl *VD);
void VisitFieldDecl(const FieldDecl *FD);
@@ -263,6 +266,7 @@ public:
void VisitBlockDecl(const BlockDecl *D);
void VisitDeclRefExpr(const DeclRefExpr *DRE);
+ void VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E);
void VisitPredefinedExpr(const PredefinedExpr *PE);
void VisitUnaryOperator(const UnaryOperator *UO);
void VisitBinaryOperator(const BinaryOperator *BO);
@@ -288,6 +292,7 @@ public:
void VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *BTE);
void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *MTE);
void VisitCXXDependentScopeMemberExpr(const CXXDependentScopeMemberExpr *ME);
+ void VisitRequiresExpr(const RequiresExpr *RE);
void VisitObjCEncodeExpr(const ObjCEncodeExpr *OEE);
void VisitObjCMessageExpr(const ObjCMessageExpr *OME);
diff --git a/clang/include/clang/AST/Mangle.h b/clang/include/clang/AST/Mangle.h
index 0e8d6dd53d8a..7d02f08e0120 100644
--- a/clang/include/clang/AST/Mangle.h
+++ b/clang/include/clang/AST/Mangle.h
@@ -89,6 +89,17 @@ public:
return Result.first->second;
}
+ uint64_t getAnonymousStructIdForDebugInfo(const NamedDecl *D) {
+ llvm::DenseMap<const NamedDecl *, uint64_t>::iterator Result =
+ AnonStructIds.find(D);
+ // The decl should already be inserted, but return 0 in case it is not.
+ if (Result == AnonStructIds.end())
+ return 0;
+ return Result->second;
+ }
+
+ virtual std::string getLambdaString(const CXXRecordDecl *Lambda) = 0;
+
/// @name Mangler Entry Points
/// @{
@@ -96,6 +107,12 @@ public:
virtual bool shouldMangleCXXName(const NamedDecl *D) = 0;
virtual bool shouldMangleStringLiteral(const StringLiteral *SL) = 0;
+ virtual bool isUniqueInternalLinkageDecl(const NamedDecl *ND) {
+ return false;
+ }
+
+ virtual void needsUniqueInternalLinkageNames() { }
+
// FIXME: consider replacing raw_ostream & with something like SmallString &.
void mangleName(GlobalDecl GD, raw_ostream &);
virtual void mangleCXXName(GlobalDecl GD, raw_ostream &) = 0;
@@ -153,6 +170,8 @@ public:
class ItaniumMangleContext : public MangleContext {
public:
+ using DiscriminatorOverrideTy =
+ llvm::Optional<unsigned> (*)(ASTContext &, const NamedDecl *);
explicit ItaniumMangleContext(ASTContext &C, DiagnosticsEngine &D)
: MangleContext(C, D, MK_Itanium) {}
@@ -175,12 +194,18 @@ public:
virtual void mangleDynamicStermFinalizer(const VarDecl *D, raw_ostream &) = 0;
+ // This has to live here, otherwise the CXXNameMangler won't have access to
+ // it.
+ virtual DiscriminatorOverrideTy getDiscriminatorOverride() const = 0;
static bool classof(const MangleContext *C) {
return C->getKind() == MK_Itanium;
}
static ItaniumMangleContext *create(ASTContext &Context,
DiagnosticsEngine &Diags);
+ static ItaniumMangleContext *create(ASTContext &Context,
+ DiagnosticsEngine &Diags,
+ DiscriminatorOverrideTy Discriminator);
};
class MicrosoftMangleContext : public MangleContext {
diff --git a/clang/include/clang/AST/MangleNumberingContext.h b/clang/include/clang/AST/MangleNumberingContext.h
index f1ca6a05dbaf..eb33759682d6 100644
--- a/clang/include/clang/AST/MangleNumberingContext.h
+++ b/clang/include/clang/AST/MangleNumberingContext.h
@@ -52,6 +52,11 @@ public:
/// this context.
virtual unsigned getManglingNumber(const TagDecl *TD,
unsigned MSLocalManglingNumber) = 0;
+
+ /// Retrieve the mangling number of a new lambda expression with the
+ /// given call operator within the device context. No device number is
+ /// assigned if there's no device numbering context is associated.
+ virtual unsigned getDeviceManglingNumber(const CXXMethodDecl *) { return 0; }
};
} // end namespace clang
diff --git a/clang/include/clang/AST/OpenMPClause.h b/clang/include/clang/AST/OpenMPClause.h
index 877c1d87d8ac..aaddcfa307da 100644
--- a/clang/include/clang/AST/OpenMPClause.h
+++ b/clang/include/clang/AST/OpenMPClause.h
@@ -794,6 +794,208 @@ public:
}
};
+/// This represents the 'sizes' clause in the '#pragma omp tile' directive.
+///
+/// \code
+/// #pragma omp tile sizes(5,5)
+/// for (int i = 0; i < 64; ++i)
+/// for (int j = 0; j < 64; ++j)
+/// \endcode
+class OMPSizesClause final
+ : public OMPClause,
+ private llvm::TrailingObjects<OMPSizesClause, Expr *> {
+ friend class OMPClauseReader;
+ friend class llvm::TrailingObjects<OMPSizesClause, Expr *>;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Number of tile sizes in the clause.
+ unsigned NumSizes;
+
+ /// Build an empty clause.
+ explicit OMPSizesClause(int NumSizes)
+ : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()),
+ NumSizes(NumSizes) {}
+
+public:
+ /// Build a 'sizes' AST node.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Location of the 'sizes' identifier.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Location of ')'.
+ /// \param Sizes Content of the clause.
+ static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc,
+ ArrayRef<Expr *> Sizes);
+
+ /// Build an empty 'sizes' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ /// \param NumSizes Number of items in the clause.
+ static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes);
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns the number of list items.
+ unsigned getNumSizes() const { return NumSizes; }
+
+ /// Returns the tile size expressions.
+ MutableArrayRef<Expr *> getSizesRefs() {
+ return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this)
+ ->template getTrailingObjects<Expr *>(),
+ NumSizes);
+ }
+ ArrayRef<Expr *> getSizesRefs() const {
+ return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this)
+ ->template getTrailingObjects<Expr *>(),
+ NumSizes);
+ }
+
+ /// Sets the tile size expressions.
+ void setSizesRefs(ArrayRef<Expr *> VL) {
+ assert(VL.size() == NumSizes);
+ std::copy(VL.begin(), VL.end(),
+ static_cast<OMPSizesClause *>(this)
+ ->template getTrailingObjects<Expr *>());
+ }
+
+ child_range children() {
+ MutableArrayRef<Expr *> Sizes = getSizesRefs();
+ return child_range(reinterpret_cast<Stmt **>(Sizes.begin()),
+ reinterpret_cast<Stmt **>(Sizes.end()));
+ }
+ const_child_range children() const {
+ ArrayRef<Expr *> Sizes = getSizesRefs();
+ return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()),
+ reinterpret_cast<Stmt *const *>(Sizes.end()));
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_sizes;
+ }
+};
+
+/// Representation of the 'full' clause of the '#pragma omp unroll' directive.
+///
+/// \code
+/// #pragma omp unroll full
+/// for (int i = 0; i < 64; ++i)
+/// \endcode
+class OMPFullClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Build an empty clause.
+ explicit OMPFullClause() : OMPClause(llvm::omp::OMPC_full, {}, {}) {}
+
+public:
+ /// Build an AST node for a 'full' clause.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Starting location of the clause.
+ /// \param EndLoc Ending location of the clause.
+ static OMPFullClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc);
+
+ /// Build an empty 'full' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ static OMPFullClause *CreateEmpty(const ASTContext &C);
+
+ child_range children() { return {child_iterator(), child_iterator()}; }
+ const_child_range children() const {
+ return {const_child_iterator(), const_child_iterator()};
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_full;
+ }
+};
+
+/// Representation of the 'partial' clause of the '#pragma omp unroll'
+/// directive.
+///
+/// \code
+/// #pragma omp unroll partial(4)
+/// for (int i = start; i < end; ++i)
+/// \endcode
+class OMPPartialClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Optional argument to the clause (unroll factor).
+ Stmt *Factor;
+
+ /// Build an empty clause.
+ explicit OMPPartialClause() : OMPClause(llvm::omp::OMPC_partial, {}, {}) {}
+
+ /// Set the unroll factor.
+ void setFactor(Expr *E) { Factor = E; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build an AST node for a 'partial' clause.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Location of the 'partial' identifier.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Location of ')'.
+ /// \param Factor Clause argument.
+ static OMPPartialClause *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc, Expr *Factor);
+
+ /// Build an empty 'partial' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ static OMPPartialClause *CreateEmpty(const ASTContext &C);
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns the argument of the clause or nullptr if not set.
+ Expr *getFactor() const { return cast_or_null<Expr>(Factor); }
+
+ child_range children() { return child_range(&Factor, &Factor + 1); }
+ const_child_range children() const {
+ return const_child_range(&Factor, &Factor + 1);
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_partial;
+ }
+};
+
/// This represents 'collapse' clause in the '#pragma omp ...'
/// directive.
///
@@ -5261,14 +5463,14 @@ public:
if (!(--RemainingLists)) {
++DeclCur;
++NumListsCur;
- if (SupportsMapper)
- ++MapperCur;
RemainingLists = *NumListsCur;
assert(RemainingLists && "No lists in the following declaration??");
}
}
++ListSizeCur;
+ if (SupportsMapper)
+ ++MapperCur;
return *this;
}
};
@@ -7272,15 +7474,244 @@ public:
}
};
+/// This represents the 'init' clause in '#pragma omp ...' directives.
+///
+/// \code
+/// #pragma omp interop init(target:obj)
+/// \endcode
+class OMPInitClause final
+ : public OMPVarListClause<OMPInitClause>,
+ private llvm::TrailingObjects<OMPInitClause, Expr *> {
+ friend class OMPClauseReader;
+ friend OMPVarListClause;
+ friend TrailingObjects;
+
+ /// Location of interop variable.
+ SourceLocation VarLoc;
+
+ bool IsTarget = false;
+ bool IsTargetSync = false;
+
+ void setInteropVar(Expr *E) { varlist_begin()[0] = E; }
+
+ void setIsTarget(bool V) { IsTarget = V; }
+
+ void setIsTargetSync(bool V) { IsTargetSync = V; }
+
+ /// Sets the location of the interop variable.
+ void setVarLoc(SourceLocation Loc) { VarLoc = Loc; }
+
+ /// Build 'init' clause.
+ ///
+ /// \param IsTarget Uses the 'target' interop-type.
+ /// \param IsTargetSync Uses the 'targetsync' interop-type.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param VarLoc Location of the interop variable.
+ /// \param EndLoc Ending location of the clause.
+ /// \param N Number of expressions.
+ OMPInitClause(bool IsTarget, bool IsTargetSync, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation VarLoc,
+ SourceLocation EndLoc, unsigned N)
+ : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, StartLoc,
+ LParenLoc, EndLoc, N),
+ VarLoc(VarLoc), IsTarget(IsTarget), IsTargetSync(IsTargetSync) {}
+
+ /// Build an empty clause.
+ OMPInitClause(unsigned N)
+ : OMPVarListClause<OMPInitClause>(llvm::omp::OMPC_init, SourceLocation(),
+ SourceLocation(), SourceLocation(), N) {
+ }
+
+public:
+ /// Creates a fully specified clause.
+ ///
+ /// \param C AST context.
+ /// \param InteropVar The interop variable.
+ /// \param PrefExprs The list of preference expressions.
+ /// \param IsTarget Uses the 'target' interop-type.
+ /// \param IsTargetSync Uses the 'targetsync' interop-type.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param VarLoc Location of the interop variable.
+ /// \param EndLoc Ending location of the clause.
+ static OMPInitClause *Create(const ASTContext &C, Expr *InteropVar,
+ ArrayRef<Expr *> PrefExprs, bool IsTarget,
+ bool IsTargetSync, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation VarLoc,
+ SourceLocation EndLoc);
+
+ /// Creates an empty clause with \a N expressions.
+ ///
+ /// \param C AST context.
+ /// \param N Number of expression items.
+ static OMPInitClause *CreateEmpty(const ASTContext &C, unsigned N);
+
+ /// Returns the location of the interop variable.
+ SourceLocation getVarLoc() const { return VarLoc; }
+
+ /// Returns the interop variable.
+ Expr *getInteropVar() { return varlist_begin()[0]; }
+ const Expr *getInteropVar() const { return varlist_begin()[0]; }
+
+ /// Returns true is interop-type 'target' is used.
+ bool getIsTarget() const { return IsTarget; }
+
+ /// Returns true is interop-type 'targetsync' is used.
+ bool getIsTargetSync() const { return IsTargetSync; }
+
+ child_range children() {
+ return child_range(reinterpret_cast<Stmt **>(varlist_begin()),
+ reinterpret_cast<Stmt **>(varlist_end()));
+ }
+
+ const_child_range children() const {
+ auto Children = const_cast<OMPInitClause *>(this)->children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ using prefs_iterator = MutableArrayRef<Expr *>::iterator;
+ using const_prefs_iterator = ArrayRef<const Expr *>::iterator;
+ using prefs_range = llvm::iterator_range<prefs_iterator>;
+ using const_prefs_range = llvm::iterator_range<const_prefs_iterator>;
+
+ prefs_range prefs() {
+ return prefs_range(reinterpret_cast<Expr **>(std::next(varlist_begin())),
+ reinterpret_cast<Expr **>(varlist_end()));
+ }
+
+ const_prefs_range prefs() const {
+ auto Prefs = const_cast<OMPInitClause *>(this)->prefs();
+ return const_prefs_range(Prefs.begin(), Prefs.end());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_init;
+ }
+};
+
+/// This represents the 'use' clause in '#pragma omp ...' directives.
+///
+/// \code
+/// #pragma omp interop use(obj)
+/// \endcode
+class OMPUseClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Location of interop variable.
+ SourceLocation VarLoc;
+
+ /// The interop variable.
+ Stmt *InteropVar = nullptr;
+
+ /// Set the interop variable.
+ void setInteropVar(Expr *E) { InteropVar = E; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Sets the location of the interop variable.
+ void setVarLoc(SourceLocation Loc) { VarLoc = Loc; }
+
+public:
+ /// Build 'use' clause with and interop variable expression \a InteropVar.
+ ///
+ /// \param InteropVar The interop variable.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param VarLoc Location of the interop variable.
+ /// \param EndLoc Ending location of the clause.
+ OMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation VarLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_use, StartLoc, EndLoc), LParenLoc(LParenLoc),
+ VarLoc(VarLoc), InteropVar(InteropVar) {}
+
+ /// Build an empty clause.
+ OMPUseClause()
+ : OMPClause(llvm::omp::OMPC_use, SourceLocation(), SourceLocation()) {}
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns the location of the interop variable.
+ SourceLocation getVarLoc() const { return VarLoc; }
+
+ /// Returns the interop variable.
+ Expr *getInteropVar() const { return cast<Expr>(InteropVar); }
+
+ child_range children() { return child_range(&InteropVar, &InteropVar + 1); }
+
+ const_child_range children() const {
+ return const_child_range(&InteropVar, &InteropVar + 1);
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_use;
+ }
+};
+
/// This represents 'destroy' clause in the '#pragma omp depobj'
-/// directive.
+/// directive or the '#pragma omp interop' directive..
///
/// \code
/// #pragma omp depobj(a) destroy
+/// #pragma omp interop destroy(obj)
/// \endcode
-/// In this example directive '#pragma omp depobj' has 'destroy' clause.
+/// In these examples directive '#pragma omp depobj' and '#pragma omp interop'
+/// have a 'destroy' clause. The 'interop' directive includes an object.
class OMPDestroyClause final : public OMPClause {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Location of interop variable.
+ SourceLocation VarLoc;
+
+ /// The interop variable.
+ Stmt *InteropVar = nullptr;
+
+ /// Set the interop variable.
+ void setInteropVar(Expr *E) { InteropVar = E; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+ /// Sets the location of the interop variable.
+ void setVarLoc(SourceLocation Loc) { VarLoc = Loc; }
+
public:
+ /// Build 'destroy' clause with an interop variable expression \a InteropVar.
+ ///
+ /// \param InteropVar The interop variable.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param VarLoc Location of the interop variable.
+ /// \param EndLoc Ending location of the clause.
+ OMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation VarLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc),
+ LParenLoc(LParenLoc), VarLoc(VarLoc), InteropVar(InteropVar) {}
+
/// Build 'destroy' clause.
///
/// \param StartLoc Starting location of the clause.
@@ -7293,11 +7724,24 @@ public:
: OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) {
}
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns the location of the interop variable.
+ SourceLocation getVarLoc() const { return VarLoc; }
+
+ /// Returns the interop variable.
+ Expr *getInteropVar() const { return cast_or_null<Expr>(InteropVar); }
+
child_range children() {
+ if (InteropVar)
+ return child_range(&InteropVar, &InteropVar + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
+ if (InteropVar)
+ return const_child_range(&InteropVar, &InteropVar + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
@@ -7313,6 +7757,146 @@ public:
}
};
+/// This represents 'novariants' clause in the '#pragma omp ...' directive.
+///
+/// \code
+/// #pragma omp dispatch novariants(a > 5)
+/// \endcode
+/// In this example directive '#pragma omp dispatch' has simple 'novariants'
+/// clause with condition 'a > 5'.
+class OMPNovariantsClause final : public OMPClause,
+ public OMPClauseWithPreInit {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Condition of the 'if' clause.
+ Stmt *Condition = nullptr;
+
+ /// Set condition.
+ void setCondition(Expr *Cond) { Condition = Cond; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'novariants' clause with condition \a Cond.
+ ///
+ /// \param Cond Condition of the clause.
+ /// \param HelperCond Helper condition for the construct.
+ /// \param CaptureRegion Innermost OpenMP region where expressions in this
+ /// clause must be captured.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPNovariantsClause(Expr *Cond, Stmt *HelperCond,
+ OpenMPDirectiveKind CaptureRegion,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_novariants, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
+ setPreInitStmt(HelperCond, CaptureRegion);
+ }
+
+ /// Build an empty clause.
+ OMPNovariantsClause()
+ : OMPClause(llvm::omp::OMPC_novariants, SourceLocation(),
+ SourceLocation()),
+ OMPClauseWithPreInit(this) {}
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns condition.
+ Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
+
+ child_range children() { return child_range(&Condition, &Condition + 1); }
+
+ const_child_range children() const {
+ return const_child_range(&Condition, &Condition + 1);
+ }
+
+ child_range used_children();
+ const_child_range used_children() const {
+ auto Children = const_cast<OMPNovariantsClause *>(this)->used_children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_novariants;
+ }
+};
+
+/// This represents 'nocontext' clause in the '#pragma omp ...' directive.
+///
+/// \code
+/// #pragma omp dispatch nocontext(a > 5)
+/// \endcode
+/// In this example directive '#pragma omp dispatch' has simple 'nocontext'
+/// clause with condition 'a > 5'.
+class OMPNocontextClause final : public OMPClause, public OMPClauseWithPreInit {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Condition of the 'if' clause.
+ Stmt *Condition = nullptr;
+
+ /// Set condition.
+ void setCondition(Expr *Cond) { Condition = Cond; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'nocontext' clause with condition \a Cond.
+ ///
+ /// \param Cond Condition of the clause.
+ /// \param HelperCond Helper condition for the construct.
+ /// \param CaptureRegion Innermost OpenMP region where expressions in this
+ /// clause must be captured.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPNocontextClause(Expr *Cond, Stmt *HelperCond,
+ OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_nocontext, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) {
+ setPreInitStmt(HelperCond, CaptureRegion);
+ }
+
+ /// Build an empty clause.
+ OMPNocontextClause()
+ : OMPClause(llvm::omp::OMPC_nocontext, SourceLocation(),
+ SourceLocation()),
+ OMPClauseWithPreInit(this) {}
+
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Returns condition.
+ Expr *getCondition() const { return cast_or_null<Expr>(Condition); }
+
+ child_range children() { return child_range(&Condition, &Condition + 1); }
+
+ const_child_range children() const {
+ return const_child_range(&Condition, &Condition + 1);
+ }
+
+ child_range used_children();
+ const_child_range used_children() const {
+ auto Children = const_cast<OMPNocontextClause *>(this)->used_children();
+ return const_child_range(Children.begin(), Children.end());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_nocontext;
+ }
+};
+
/// This represents 'detach' clause in the '#pragma omp task' directive.
///
/// \code
@@ -7749,6 +8333,77 @@ public:
}
};
+/// This represents 'filter' clause in the '#pragma omp ...' directive.
+///
+/// \code
+/// #pragma omp masked filter(tid)
+/// \endcode
+/// In this example directive '#pragma omp masked' has 'filter' clause with
+/// thread id.
+class OMPFilterClause final : public OMPClause, public OMPClauseWithPreInit {
+ friend class OMPClauseReader;
+
+ /// Location of '('.
+ SourceLocation LParenLoc;
+
+ /// Express of the 'filter' clause.
+ Stmt *ThreadID = nullptr;
+
+ /// Sets the thread identifier.
+ void setThreadID(Expr *TID) { ThreadID = TID; }
+
+ /// Sets the location of '('.
+ void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
+
+public:
+ /// Build 'filter' clause with thread-id \a ThreadID.
+ ///
+ /// \param ThreadID Thread identifier.
+ /// \param HelperE Helper expression associated with this clause.
+ /// \param CaptureRegion Innermost OpenMP region where expressions in this
+ /// clause must be captured.
+ /// \param StartLoc Starting location of the clause.
+ /// \param LParenLoc Location of '('.
+ /// \param EndLoc Ending location of the clause.
+ OMPFilterClause(Expr *ThreadID, Stmt *HelperE,
+ OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc,
+ SourceLocation LParenLoc, SourceLocation EndLoc)
+ : OMPClause(llvm::omp::OMPC_filter, StartLoc, EndLoc),
+ OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadID(ThreadID) {
+ setPreInitStmt(HelperE, CaptureRegion);
+ }
+
+ /// Build an empty clause.
+ OMPFilterClause()
+ : OMPClause(llvm::omp::OMPC_filter, SourceLocation(), SourceLocation()),
+ OMPClauseWithPreInit(this) {}
+ /// Returns the location of '('.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// Return thread identifier.
+ Expr *getThreadID() { return cast<Expr>(ThreadID); }
+
+ /// Return thread identifier.
+ Expr *getThreadID() const { return cast<Expr>(ThreadID); }
+
+ child_range children() { return child_range(&ThreadID, &ThreadID + 1); }
+
+ const_child_range children() const {
+ return const_child_range(&ThreadID, &ThreadID + 1);
+ }
+
+ child_range used_children() {
+ return child_range(child_iterator(), child_iterator());
+ }
+ const_child_range used_children() const {
+ return const_child_range(const_child_iterator(), const_child_iterator());
+ }
+
+ static bool classof(const OMPClause *T) {
+ return T->getClauseKind() == llvm::omp::OMPC_filter;
+ }
+};
+
/// This class implements a simple visitor for OMPClause
/// subclasses.
template<class ImplClass, template <typename> class Ptr, typename RetTy>
diff --git a/clang/include/clang/AST/OperationKinds.def b/clang/include/clang/AST/OperationKinds.def
index 7c82ab6e57ef..b05b9d81569e 100644
--- a/clang/include/clang/AST/OperationKinds.def
+++ b/clang/include/clang/AST/OperationKinds.def
@@ -181,6 +181,9 @@ CAST_OPERATION(PointerToBoolean)
/// (void) malloc(2048)
CAST_OPERATION(ToVoid)
+/// CK_MatrixCast - A cast between matrix types of the same dimensions.
+CAST_OPERATION(MatrixCast)
+
/// CK_VectorSplat - A conversion from an arithmetic type to a
/// vector of that element type. Fills all elements ("splats") with
/// the source value.
diff --git a/clang/include/clang/AST/ParentMapContext.h b/clang/include/clang/AST/ParentMapContext.h
index a0412380a864..2edbc987850d 100644
--- a/clang/include/clang/AST/ParentMapContext.h
+++ b/clang/include/clang/AST/ParentMapContext.h
@@ -64,9 +64,10 @@ public:
Expr *traverseIgnored(Expr *E) const;
DynTypedNode traverseIgnored(const DynTypedNode &N) const;
+ class ParentMap;
+
private:
ASTContext &ASTCtx;
- class ParentMap;
TraversalKind Traversal = TK_AsIs;
std::unique_ptr<ParentMap> Parents;
};
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index 505ea700fd0e..9bfa5b9c2326 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -186,17 +186,18 @@ public:
/// code, e.g., implicit constructors and destructors.
bool shouldVisitImplicitCode() const { return false; }
+ /// Return whether this visitor should recurse into lambda body
+ bool shouldVisitLambdaBody() const { return true; }
+
/// Return whether this visitor should traverse post-order.
bool shouldTraversePostOrder() const { return false; }
- /// Recursively visits an entire AST, starting from the top-level Decls
- /// in the AST traversal scope (by default, the TranslationUnitDecl).
+ /// Recursively visits an entire AST, starting from the TranslationUnitDecl.
/// \returns false if visitation was terminated early.
bool TraverseAST(ASTContext &AST) {
- for (Decl *D : AST.getTraversalScope())
- if (!getDerived().TraverseDecl(D))
- return false;
- return true;
+ // Currently just an alias for TraverseDecl(TUDecl), but kept in case
+ // we change the implementation again.
+ return getDerived().TraverseDecl(AST.getTranslationUnitDecl());
}
/// Recursively visit a statement or expression, by
@@ -478,6 +479,8 @@ private:
template <typename T>
bool TraverseDeclTemplateParameterLists(T *D);
+ bool TraverseTemplateTypeParamDeclConstraints(const TemplateTypeParmDecl *D);
+
bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
unsigned Count);
bool TraverseArrayTypeLocHelper(ArrayTypeLoc TL);
@@ -655,8 +658,14 @@ bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
// As a syntax visitor, by default we want to ignore declarations for
// implicit declarations (ones not typed explicitly by the user).
- if (!getDerived().shouldVisitImplicitCode() && D->isImplicit())
+ if (!getDerived().shouldVisitImplicitCode() && D->isImplicit()) {
+ // For an implicit template type parameter, its type constraints are not
+ // implicit and are not represented anywhere else. We still need to visit
+ // them.
+ if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(D))
+ return TraverseTemplateTypeParamDeclConstraints(TTPD);
return true;
+ }
switch (D->getKind()) {
#define ABSTRACT_DECL(DECL)
@@ -1484,12 +1493,24 @@ DEF_TRAVERSE_DECL(StaticAssertDecl, {
TRY_TO(TraverseStmt(D->getMessage()));
})
-DEF_TRAVERSE_DECL(
- TranslationUnitDecl,
- {// Code in an unnamed namespace shows up automatically in
- // decls_begin()/decls_end(). Thus we don't need to recurse on
- // D->getAnonymousNamespace().
- })
+DEF_TRAVERSE_DECL(TranslationUnitDecl, {
+ // Code in an unnamed namespace shows up automatically in
+ // decls_begin()/decls_end(). Thus we don't need to recurse on
+ // D->getAnonymousNamespace().
+
+ // If the traversal scope is set, then consider them to be the children of
+ // the TUDecl, rather than traversing (and loading?) all top-level decls.
+ auto Scope = D->getASTContext().getTraversalScope();
+ bool HasLimitedScope =
+ Scope.size() != 1 || !isa<TranslationUnitDecl>(Scope.front());
+ if (HasLimitedScope) {
+ ShouldVisitChildren = false; // we'll do that here instead
+ for (auto *Child : Scope) {
+ if (!canIgnoreChildDeclWhileTraversingDeclContext(Child))
+ TRY_TO(TraverseDecl(Child));
+ }
+ }
+})
DEF_TRAVERSE_DECL(PragmaCommentDecl, {})
@@ -1582,6 +1603,8 @@ DEF_TRAVERSE_DECL(UsingDecl, {
TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
})
+DEF_TRAVERSE_DECL(UsingEnumDecl, {})
+
DEF_TRAVERSE_DECL(UsingPackDecl, {})
DEF_TRAVERSE_DECL(UsingDirectiveDecl, {
@@ -1776,10 +1799,9 @@ DEF_TRAVERSE_DECL(BuiltinTemplateDecl, {
TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
})
-DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
- // D is the "T" in something like "template<typename T> class vector;"
- if (D->getTypeForDecl())
- TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateTypeParamDeclConstraints(
+ const TemplateTypeParmDecl *D) {
if (const auto *TC = D->getTypeConstraint()) {
if (Expr *IDC = TC->getImmediatelyDeclaredConstraint()) {
TRY_TO(TraverseStmt(IDC));
@@ -1791,6 +1813,14 @@ DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
TRY_TO(TraverseConceptReference(*TC));
}
}
+ return true;
+}
+
+DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
+ // D is the "T" in something like "template<typename T> class vector;"
+ if (D->getTypeForDecl())
+ TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+ TRY_TO(TraverseTemplateTypeParamDeclConstraints(D));
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
})
@@ -1828,6 +1858,8 @@ DEF_TRAVERSE_DECL(UnresolvedUsingTypenameDecl, {
// source.
})
+DEF_TRAVERSE_DECL(UnresolvedUsingIfExistsDecl, {})
+
DEF_TRAVERSE_DECL(EnumDecl, {
TRY_TO(TraverseDeclTemplateParameterLists(D));
@@ -2057,6 +2089,15 @@ bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
// by clang.
(!D->isDefaulted() || getDerived().shouldVisitImplicitCode());
+ if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (const CXXRecordDecl *RD = MD->getParent()) {
+ if (RD->isLambda() &&
+ declaresSameEntity(RD->getLambdaCallOperator(), MD)) {
+ VisitBody = VisitBody && getDerived().shouldVisitLambdaBody();
+ }
+ }
+ }
+
if (VisitBody) {
TRY_TO(TraverseStmt(D->getBody())); // Function body.
}
@@ -2611,7 +2652,16 @@ DEF_TRAVERSE_STMT(ObjCMessageExpr, {
TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
})
-DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, {})
+DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, {
+ if (S->isClassReceiver()) {
+ ObjCInterfaceDecl *IDecl = S->getClassReceiver();
+ QualType Type = IDecl->getASTContext().getObjCInterfaceType(IDecl);
+ ObjCInterfaceLocInfo Data;
+ Data.NameLoc = S->getReceiverLocation();
+ Data.NameEndLoc = Data.NameLoc;
+ TRY_TO(TraverseTypeLoc(TypeLoc(Type, &Data)));
+ }
+})
DEF_TRAVERSE_STMT(ObjCSubscriptRefExpr, {})
DEF_TRAVERSE_STMT(ObjCProtocolExpr, {})
DEF_TRAVERSE_STMT(ObjCSelectorExpr, {})
@@ -2624,6 +2674,9 @@ DEF_TRAVERSE_STMT(ObjCBridgedCastExpr, {
DEF_TRAVERSE_STMT(ObjCAvailabilityCheckExpr, {})
DEF_TRAVERSE_STMT(ParenExpr, {})
DEF_TRAVERSE_STMT(ParenListExpr, {})
+DEF_TRAVERSE_STMT(SYCLUniqueStableNameExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+})
DEF_TRAVERSE_STMT(PredefinedExpr, {})
DEF_TRAVERSE_STMT(ShuffleVectorExpr, {})
DEF_TRAVERSE_STMT(ConvertVectorExpr, {})
@@ -2775,6 +2828,14 @@ bool RecursiveASTVisitor<Derived>::TraverseOMPExecutableDirective(
return true;
}
+DEF_TRAVERSE_STMT(OMPCanonicalLoop, {
+ if (!getDerived().shouldVisitImplicitCode()) {
+ // Visit only the syntactical loop.
+ TRY_TO(TraverseStmt(S->getLoopStmt()));
+ ShouldVisitChildren = false;
+ }
+})
+
template <typename Derived>
bool
RecursiveASTVisitor<Derived>::TraverseOMPLoopDirective(OMPLoopDirective *S) {
@@ -2787,6 +2848,12 @@ DEF_TRAVERSE_STMT(OMPParallelDirective,
DEF_TRAVERSE_STMT(OMPSimdDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPTileDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPUnrollDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
DEF_TRAVERSE_STMT(OMPForDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
@@ -2945,6 +3012,15 @@ DEF_TRAVERSE_STMT(OMPTargetTeamsDistributeParallelForSimdDirective,
DEF_TRAVERSE_STMT(OMPTargetTeamsDistributeSimdDirective,
{ TRY_TO(TraverseOMPExecutableDirective(S)); })
+DEF_TRAVERSE_STMT(OMPInteropDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPDispatchDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
+DEF_TRAVERSE_STMT(OMPMaskedDirective,
+ { TRY_TO(TraverseOMPExecutableDirective(S)); })
+
// OpenMP clauses.
template <typename Derived>
bool RecursiveASTVisitor<Derived>::TraverseOMPClause(OMPClause *C) {
@@ -3028,6 +3104,24 @@ bool RecursiveASTVisitor<Derived>::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
}
template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPSizesClause(OMPSizesClause *C) {
+ for (Expr *E : C->getSizesRefs())
+ TRY_TO(TraverseStmt(E));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPFullClause(OMPFullClause *C) {
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPPartialClause(OMPPartialClause *C) {
+ TRY_TO(TraverseStmt(C->getFactor()));
+ return true;
+}
+
+template <typename Derived>
bool
RecursiveASTVisitor<Derived>::VisitOMPCollapseClause(OMPCollapseClause *C) {
TRY_TO(TraverseStmt(C->getNumForLoops()));
@@ -3165,7 +3259,36 @@ bool RecursiveASTVisitor<Derived>::VisitOMPNogroupClause(OMPNogroupClause *) {
}
template <typename Derived>
-bool RecursiveASTVisitor<Derived>::VisitOMPDestroyClause(OMPDestroyClause *) {
+bool RecursiveASTVisitor<Derived>::VisitOMPInitClause(OMPInitClause *C) {
+ TRY_TO(VisitOMPClauseList(C));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPUseClause(OMPUseClause *C) {
+ TRY_TO(TraverseStmt(C->getInteropVar()));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPDestroyClause(OMPDestroyClause *C) {
+ TRY_TO(TraverseStmt(C->getInteropVar()));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPNovariantsClause(
+ OMPNovariantsClause *C) {
+ TRY_TO(VisitOMPClauseWithPreInit(C));
+ TRY_TO(TraverseStmt(C->getCondition()));
+ return true;
+}
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPNocontextClause(
+ OMPNocontextClause *C) {
+ TRY_TO(VisitOMPClauseWithPreInit(C));
+ TRY_TO(TraverseStmt(C->getCondition()));
return true;
}
@@ -3544,6 +3667,13 @@ bool RecursiveASTVisitor<Derived>::VisitOMPAffinityClause(
return true;
}
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::VisitOMPFilterClause(OMPFilterClause *C) {
+ TRY_TO(VisitOMPClauseWithPreInit(C));
+ TRY_TO(TraverseStmt(C->getThreadID()));
+ return true;
+}
+
// FIXME: look at the following tricky-seeming exprs to see if we
// need to recurse on anything. These are ones that have methods
// returning decls or qualtypes or nestednamespecifier -- though I'm
diff --git a/clang/include/clang/AST/Redeclarable.h b/clang/include/clang/AST/Redeclarable.h
index 87252337a0f4..77b827c52bfb 100644
--- a/clang/include/clang/AST/Redeclarable.h
+++ b/clang/include/clang/AST/Redeclarable.h
@@ -193,6 +193,7 @@ protected:
public:
friend class ASTDeclReader;
friend class ASTDeclWriter;
+ friend class IncrementalParser;
Redeclarable(const ASTContext &Ctx)
: RedeclLink(LatestDeclLink(Ctx)),
diff --git a/clang/include/clang/AST/Stmt.h b/clang/include/clang/AST/Stmt.h
index c2e69a91e55d..8e1d7df97096 100644
--- a/clang/include/clang/AST/Stmt.h
+++ b/clang/include/clang/AST/Stmt.h
@@ -518,7 +518,7 @@ protected:
unsigned : NumExprBits;
- unsigned Kind : 6;
+ unsigned Kind : 7;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// True if the call expression has some floating-point features.
@@ -1798,6 +1798,7 @@ public:
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
+ bool SideEntry = false;
public:
/// Build a label statement.
@@ -1833,6 +1834,8 @@ public:
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
+ bool isSideEntry() const { return SideEntry; }
+ void setSideEntry(bool SE) { SideEntry = SE; }
};
/// Represents an attribute applied to a statement.
@@ -2080,6 +2083,7 @@ public:
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
+ Optional<Stmt *> getNondiscardedCase(const ASTContext &Ctx);
bool isObjCAvailabilityCheck() const;
@@ -2119,7 +2123,7 @@ class SwitchStmt final : public Stmt,
friend TrailingObjects;
/// Points to a linked list of case and default statements.
- SwitchCase *FirstCase;
+ SwitchCase *FirstCase = nullptr;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
diff --git a/clang/include/clang/AST/StmtDataCollectors.td b/clang/include/clang/AST/StmtDataCollectors.td
index 7cb9f16fbce2..922dd2a20d59 100644
--- a/clang/include/clang/AST/StmtDataCollectors.td
+++ b/clang/include/clang/AST/StmtDataCollectors.td
@@ -51,7 +51,7 @@ class CallExpr {
// Print all template arguments into ArgString
llvm::raw_string_ostream OS(ArgString);
for (unsigned i = 0; i < Args->size(); ++i) {
- Args->get(i).print(Context.getLangOpts(), OS);
+ Args->get(i).print(Context.getLangOpts(), OS, /*IncludeType*/ true);
// Add a padding character so that 'foo<X, XX>()' != 'foo<XX, X>()'.
OS << '\n';
}
diff --git a/clang/include/clang/AST/StmtIterator.h b/clang/include/clang/AST/StmtIterator.h
index bcdb0df829fb..e98408c51a50 100644
--- a/clang/include/clang/AST/StmtIterator.h
+++ b/clang/include/clang/AST/StmtIterator.h
@@ -74,14 +74,17 @@ protected:
};
template <typename DERIVED, typename REFERENCE>
-class StmtIteratorImpl : public StmtIteratorBase,
- public std::iterator<std::forward_iterator_tag,
- REFERENCE, ptrdiff_t,
- REFERENCE, REFERENCE> {
+class StmtIteratorImpl : public StmtIteratorBase {
protected:
StmtIteratorImpl(const StmtIteratorBase& RHS) : StmtIteratorBase(RHS) {}
public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = REFERENCE;
+ using difference_type = std::ptrdiff_t;
+ using pointer = REFERENCE;
+ using reference = REFERENCE;
+
StmtIteratorImpl() = default;
StmtIteratorImpl(Stmt **s) : StmtIteratorBase(s) {}
StmtIteratorImpl(Decl **dgi, Decl **dge) : StmtIteratorBase(dgi, dge) {}
diff --git a/clang/include/clang/AST/StmtOpenMP.h b/clang/include/clang/AST/StmtOpenMP.h
index b7bbf15949a0..9c85df741f48 100644
--- a/clang/include/clang/AST/StmtOpenMP.h
+++ b/clang/include/clang/AST/StmtOpenMP.h
@@ -28,6 +28,238 @@ namespace clang {
// AST classes for directives.
//===----------------------------------------------------------------------===//
+/// Representation of an OpenMP canonical loop.
+///
+/// OpenMP 1.0 C/C++, section 2.4.1 for Construct; canonical-shape
+/// OpenMP 2.0 C/C++, section 2.4.1 for Construct; canonical-shape
+/// OpenMP 2.5, section 2.5.1 Loop Construct; canonical form
+/// OpenMP 3.1, section 2.5.1 Loop Construct; canonical form
+/// OpenMP 4.0, section 2.6 Canonical Loop Form
+/// OpenMP 4.5, section 2.6 Canonical Loop Form
+/// OpenMP 5.0, section 2.9.1 Canonical Loop Form
+/// OpenMP 5.1, section 2.11.1 Canonical Loop Nest Form
+///
+/// An OpenMP canonical loop is a for-statement or range-based for-statement
+/// with additional requirements that ensure that the number of iterations is
+/// known before entering the loop and allow skipping to an arbitrary iteration.
+/// The OMPCanonicalLoop AST node wraps a ForStmt or CXXForRangeStmt that is
+/// known to fulfill OpenMP's canonical loop requirements because of being
+/// associated to an OMPLoopBasedDirective. That is, the general structure is:
+///
+/// OMPLoopBasedDirective
+/// [`- CapturedStmt ]
+/// [ `- CapturedDecl]
+/// ` OMPCanonicalLoop
+/// `- ForStmt/CXXForRangeStmt
+/// `- Stmt
+///
+/// One or multiple CapturedStmt/CapturedDecl pairs may be inserted by some
+/// directives such as OMPParallelForDirective, but others do not need them
+/// (such as OMPTileDirective). In The OMPCanonicalLoop and
+/// ForStmt/CXXForRangeStmt pair is repeated for loop associated with the
+/// directive. A OMPCanonicalLoop must not appear in the AST unless associated
+/// with a OMPLoopBasedDirective. In an imperfectly nested loop nest, the
+/// OMPCanonicalLoop may also be wrapped in a CompoundStmt:
+///
+/// [...]
+/// ` OMPCanonicalLoop
+/// `- ForStmt/CXXForRangeStmt
+/// `- CompoundStmt
+/// |- Leading in-between code (if any)
+/// |- OMPCanonicalLoop
+/// | `- ForStmt/CXXForRangeStmt
+/// | `- ...
+/// `- Trailing in-between code (if any)
+///
+/// The leading/trailing in-between code must not itself be a OMPCanonicalLoop
+/// to avoid confusion which loop belongs to the nesting.
+///
+/// There are three different kinds of iteration variables for different
+/// purposes:
+/// * Loop user variable: The user-accessible variable with different value for
+/// each iteration.
+/// * Loop iteration variable: The variable used to identify a loop iteration;
+/// for range-based for-statement, this is the hidden iterator '__begin'. For
+/// other loops, it is identical to the loop user variable. Must be a
+/// random-access iterator, pointer or integer type.
+/// * Logical iteration counter: Normalized loop counter starting at 0 and
+/// incrementing by one at each iteration. Allows abstracting over the type
+/// of the loop iteration variable and is always an unsigned integer type
+/// appropriate to represent the range of the loop iteration variable. Its
+/// value corresponds to the logical iteration number in the OpenMP
+/// specification.
+///
+/// This AST node provides two captured statements:
+/// * The distance function which computes the number of iterations.
+/// * The loop user variable function that computes the loop user variable when
+/// given a logical iteration number.
+///
+/// These captured statements provide the link between C/C++ semantics and the
+/// logical iteration counters used by the OpenMPIRBuilder which is
+/// language-agnostic and therefore does not know e.g. how to advance a
+/// random-access iterator. The OpenMPIRBuilder will use this information to
+/// apply simd, workshare-loop, distribute, taskloop and loop directives to the
+/// loop. For compatibility with the non-OpenMPIRBuilder codegen path, an
+/// OMPCanonicalLoop can itself also be wrapped into the CapturedStmts of an
+/// OMPLoopDirective and skipped when searching for the associated syntactical
+/// loop.
+///
+/// Example:
+/// <code>
+/// std::vector<std::string> Container{1,2,3};
+/// for (std::string Str : Container)
+/// Body(Str);
+/// </code>
+/// which is syntactic sugar for approximately:
+/// <code>
+/// auto &&__range = Container;
+/// auto __begin = std::begin(__range);
+/// auto __end = std::end(__range);
+/// for (; __begin != __end; ++__begin) {
+/// std::String Str = *__begin;
+/// Body(Str);
+/// }
+/// </code>
+/// In this example, the loop user variable is `Str`, the loop iteration
+/// variable is `__begin` of type `std::vector<std::string>::iterator` and the
+/// logical iteration number type is `size_t` (unsigned version of
+/// `std::vector<std::string>::iterator::difference_type` aka `ptrdiff_t`).
+/// Therefore, the distance function will be
+/// <code>
+/// [&](size_t &Result) { Result = __end - __begin; }
+/// </code>
+/// and the loop variable function is
+/// <code>
+/// [&,__begin](std::vector<std::string>::iterator &Result, size_t Logical) {
+/// Result = __begin + Logical;
+/// }
+/// </code>
+/// The variable `__begin`, aka the loop iteration variable, is captured by
+/// value because it is modified in the loop body, but both functions require
+/// the initial value. The OpenMP specification explicitly leaves unspecified
+/// when the loop expressions are evaluated such that a capture by reference is
+/// sufficient.
+class OMPCanonicalLoop : public Stmt {
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ /// Children of this AST node.
+ enum {
+ LOOP_STMT,
+ DISTANCE_FUNC,
+ LOOPVAR_FUNC,
+ LOOPVAR_REF,
+ LastSubStmt = LOOPVAR_REF
+ };
+
+private:
+ /// This AST node's children.
+ Stmt *SubStmts[LastSubStmt + 1] = {};
+
+ OMPCanonicalLoop() : Stmt(StmtClass::OMPCanonicalLoopClass) {}
+
+public:
+ /// Create a new OMPCanonicalLoop.
+ static OMPCanonicalLoop *create(const ASTContext &Ctx, Stmt *LoopStmt,
+ CapturedStmt *DistanceFunc,
+ CapturedStmt *LoopVarFunc,
+ DeclRefExpr *LoopVarRef) {
+ OMPCanonicalLoop *S = new (Ctx) OMPCanonicalLoop();
+ S->setLoopStmt(LoopStmt);
+ S->setDistanceFunc(DistanceFunc);
+ S->setLoopVarFunc(LoopVarFunc);
+ S->setLoopVarRef(LoopVarRef);
+ return S;
+ }
+
+ /// Create an empty OMPCanonicalLoop for deserialization.
+ static OMPCanonicalLoop *createEmpty(const ASTContext &Ctx) {
+ return new (Ctx) OMPCanonicalLoop();
+ }
+
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() == StmtClass::OMPCanonicalLoopClass;
+ }
+
+ SourceLocation getBeginLoc() const { return getLoopStmt()->getBeginLoc(); }
+ SourceLocation getEndLoc() const { return getLoopStmt()->getEndLoc(); }
+
+ /// Return this AST node's children.
+ /// @{
+ child_range children() {
+ return child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
+ }
+ const_child_range children() const {
+ return const_child_range(&SubStmts[0], &SubStmts[0] + LastSubStmt + 1);
+ }
+ /// @}
+
+ /// The wrapped syntactic loop statement (ForStmt or CXXForRangeStmt).
+ /// @{
+ Stmt *getLoopStmt() { return SubStmts[LOOP_STMT]; }
+ const Stmt *getLoopStmt() const { return SubStmts[LOOP_STMT]; }
+ void setLoopStmt(Stmt *S) {
+ assert((isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) &&
+ "Canonical loop must be a for loop (range-based or otherwise)");
+ SubStmts[LOOP_STMT] = S;
+ }
+ /// @}
+
+ /// The function that computes the number of loop iterations. Can be evaluated
+ /// before entering the loop but after the syntactical loop's init
+ /// statement(s).
+ ///
+ /// Function signature: void(LogicalTy &Result)
+ /// Any values necessary to compute the distance are captures of the closure.
+ /// @{
+ CapturedStmt *getDistanceFunc() {
+ return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
+ }
+ const CapturedStmt *getDistanceFunc() const {
+ return cast<CapturedStmt>(SubStmts[DISTANCE_FUNC]);
+ }
+ void setDistanceFunc(CapturedStmt *S) {
+ assert(S && "Expected non-null captured statement");
+ SubStmts[DISTANCE_FUNC] = S;
+ }
+ /// @}
+
+ /// The function that computes the loop user variable from a logical iteration
+ /// counter. Can be evaluated as first statement in the loop.
+ ///
+ /// Function signature: void(LoopVarTy &Result, LogicalTy Number)
+ /// Any other values required to compute the loop user variable (such as start
+ /// value, step size) are captured by the closure. In particular, the initial
+ /// value of loop iteration variable is captured by value to be unaffected by
+ /// previous iterations.
+ /// @{
+ CapturedStmt *getLoopVarFunc() {
+ return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
+ }
+ const CapturedStmt *getLoopVarFunc() const {
+ return cast<CapturedStmt>(SubStmts[LOOPVAR_FUNC]);
+ }
+ void setLoopVarFunc(CapturedStmt *S) {
+ assert(S && "Expected non-null captured statement");
+ SubStmts[LOOPVAR_FUNC] = S;
+ }
+ /// @}
+
+ /// Reference to the loop user variable as accessed in the loop body.
+ /// @{
+ DeclRefExpr *getLoopVarRef() {
+ return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
+ }
+ const DeclRefExpr *getLoopVarRef() const {
+ return cast<DeclRefExpr>(SubStmts[LOOPVAR_REF]);
+ }
+ void setLoopVarRef(DeclRefExpr *E) {
+ assert(E && "Expected non-null loop variable");
+ SubStmts[LOOPVAR_REF] = E;
+ }
+ /// @}
+};
+
/// This is a basic class for representing single OpenMP executable
/// directive.
///
@@ -228,17 +460,22 @@ public:
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
- const SpecificClause *getSingleClause() const {
- auto Clauses = getClausesOfKind<SpecificClause>();
+ static const SpecificClause *getSingleClause(ArrayRef<OMPClause *> Clauses) {
+ auto ClausesOfKind = getClausesOfKind<SpecificClause>(Clauses);
- if (Clauses.begin() != Clauses.end()) {
- assert(std::next(Clauses.begin()) == Clauses.end() &&
+ if (ClausesOfKind.begin() != ClausesOfKind.end()) {
+ assert(std::next(ClausesOfKind.begin()) == ClausesOfKind.end() &&
"There are at least 2 clauses of the specified kind");
- return *Clauses.begin();
+ return *ClausesOfKind.begin();
}
return nullptr;
}
+ template <typename SpecificClause>
+ const SpecificClause *getSingleClause() const {
+ return getSingleClause<SpecificClause>(clauses());
+ }
+
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
@@ -440,13 +677,288 @@ public:
}
};
+/// The base class for all loop-based directives, including loop transformation
+/// directives.
+class OMPLoopBasedDirective : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+
+protected:
+ /// Number of collapsed loops as specified by 'collapse' clause.
+ unsigned NumAssociatedLoops = 0;
+
+ /// Build instance of loop directive of class \a Kind.
+ ///
+ /// \param SC Statement class.
+ /// \param Kind Kind of OpenMP directive.
+ /// \param StartLoc Starting location of the directive (directive keyword).
+ /// \param EndLoc Ending location of the directive.
+ /// \param NumAssociatedLoops Number of loops associated with the construct.
+ ///
+ OMPLoopBasedDirective(StmtClass SC, OpenMPDirectiveKind Kind,
+ SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned NumAssociatedLoops)
+ : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
+ NumAssociatedLoops(NumAssociatedLoops) {}
+
+public:
+ /// The expressions built to support OpenMP loops in combined/composite
+ /// pragmas (e.g. pragma omp distribute parallel for)
+ struct DistCombinedHelperExprs {
+ /// DistributeLowerBound - used when composing 'omp distribute' with
+ /// 'omp for' in a same construct.
+ Expr *LB;
+ /// DistributeUpperBound - used when composing 'omp distribute' with
+ /// 'omp for' in a same construct.
+ Expr *UB;
+ /// DistributeEnsureUpperBound - used when composing 'omp distribute'
+ /// with 'omp for' in a same construct, EUB depends on DistUB
+ Expr *EUB;
+ /// Distribute loop iteration variable init used when composing 'omp
+ /// distribute'
+ /// with 'omp for' in a same construct
+ Expr *Init;
+ /// Distribute Loop condition used when composing 'omp distribute'
+ /// with 'omp for' in a same construct
+ Expr *Cond;
+ /// Update of LowerBound for statically scheduled omp loops for
+ /// outer loop in combined constructs (e.g. 'distribute parallel for')
+ Expr *NLB;
+ /// Update of UpperBound for statically scheduled omp loops for
+ /// outer loop in combined constructs (e.g. 'distribute parallel for')
+ Expr *NUB;
+ /// Distribute Loop condition used when composing 'omp distribute'
+ /// with 'omp for' in a same construct when schedule is chunked.
+ Expr *DistCond;
+ /// 'omp parallel for' loop condition used when composed with
+ /// 'omp distribute' in the same construct and when schedule is
+ /// chunked and the chunk size is 1.
+ Expr *ParForInDistCond;
+ };
+
+ /// The expressions built for the OpenMP loop CodeGen for the
+ /// whole collapsed loop nest.
+ struct HelperExprs {
+ /// Loop iteration variable.
+ Expr *IterationVarRef;
+ /// Loop last iteration number.
+ Expr *LastIteration;
+ /// Loop number of iterations.
+ Expr *NumIterations;
+ /// Calculation of last iteration.
+ Expr *CalcLastIteration;
+ /// Loop pre-condition.
+ Expr *PreCond;
+ /// Loop condition.
+ Expr *Cond;
+ /// Loop iteration variable init.
+ Expr *Init;
+ /// Loop increment.
+ Expr *Inc;
+ /// IsLastIteration - local flag variable passed to runtime.
+ Expr *IL;
+ /// LowerBound - local variable passed to runtime.
+ Expr *LB;
+ /// UpperBound - local variable passed to runtime.
+ Expr *UB;
+ /// Stride - local variable passed to runtime.
+ Expr *ST;
+ /// EnsureUpperBound -- expression UB = min(UB, NumIterations).
+ Expr *EUB;
+ /// Update of LowerBound for statically scheduled 'omp for' loops.
+ Expr *NLB;
+ /// Update of UpperBound for statically scheduled 'omp for' loops.
+ Expr *NUB;
+ /// PreviousLowerBound - local variable passed to runtime in the
+ /// enclosing schedule or null if that does not apply.
+ Expr *PrevLB;
+ /// PreviousUpperBound - local variable passed to runtime in the
+ /// enclosing schedule or null if that does not apply.
+ Expr *PrevUB;
+ /// DistInc - increment expression for distribute loop when found
+ /// combined with a further loop level (e.g. in 'distribute parallel for')
+ /// expression IV = IV + ST
+ Expr *DistInc;
+ /// PrevEUB - expression similar to EUB but to be used when loop
+ /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
+ /// when ensuring that the UB is either the calculated UB by the runtime or
+ /// the end of the assigned distribute chunk)
+ /// expression UB = min (UB, PrevUB)
+ Expr *PrevEUB;
+ /// Counters Loop counters.
+ SmallVector<Expr *, 4> Counters;
+ /// PrivateCounters Loop counters.
+ SmallVector<Expr *, 4> PrivateCounters;
+ /// Expressions for loop counters inits for CodeGen.
+ SmallVector<Expr *, 4> Inits;
+ /// Expressions for loop counters update for CodeGen.
+ SmallVector<Expr *, 4> Updates;
+ /// Final loop counter values for GodeGen.
+ SmallVector<Expr *, 4> Finals;
+ /// List of counters required for the generation of the non-rectangular
+ /// loops.
+ SmallVector<Expr *, 4> DependentCounters;
+ /// List of initializers required for the generation of the non-rectangular
+ /// loops.
+ SmallVector<Expr *, 4> DependentInits;
+ /// List of final conditions required for the generation of the
+ /// non-rectangular loops.
+ SmallVector<Expr *, 4> FinalsConditions;
+ /// Init statement for all captured expressions.
+ Stmt *PreInits;
+
+ /// Expressions used when combining OpenMP loop pragmas
+ DistCombinedHelperExprs DistCombinedFields;
+
+ /// Check if all the expressions are built (does not check the
+ /// worksharing ones).
+ bool builtAll() {
+ return IterationVarRef != nullptr && LastIteration != nullptr &&
+ NumIterations != nullptr && PreCond != nullptr &&
+ Cond != nullptr && Init != nullptr && Inc != nullptr;
+ }
+
+ /// Initialize all the fields to null.
+ /// \param Size Number of elements in the
+ /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
+ /// arrays.
+ void clear(unsigned Size) {
+ IterationVarRef = nullptr;
+ LastIteration = nullptr;
+ CalcLastIteration = nullptr;
+ PreCond = nullptr;
+ Cond = nullptr;
+ Init = nullptr;
+ Inc = nullptr;
+ IL = nullptr;
+ LB = nullptr;
+ UB = nullptr;
+ ST = nullptr;
+ EUB = nullptr;
+ NLB = nullptr;
+ NUB = nullptr;
+ NumIterations = nullptr;
+ PrevLB = nullptr;
+ PrevUB = nullptr;
+ DistInc = nullptr;
+ PrevEUB = nullptr;
+ Counters.resize(Size);
+ PrivateCounters.resize(Size);
+ Inits.resize(Size);
+ Updates.resize(Size);
+ Finals.resize(Size);
+ DependentCounters.resize(Size);
+ DependentInits.resize(Size);
+ FinalsConditions.resize(Size);
+ for (unsigned I = 0; I < Size; ++I) {
+ Counters[I] = nullptr;
+ PrivateCounters[I] = nullptr;
+ Inits[I] = nullptr;
+ Updates[I] = nullptr;
+ Finals[I] = nullptr;
+ DependentCounters[I] = nullptr;
+ DependentInits[I] = nullptr;
+ FinalsConditions[I] = nullptr;
+ }
+ PreInits = nullptr;
+ DistCombinedFields.LB = nullptr;
+ DistCombinedFields.UB = nullptr;
+ DistCombinedFields.EUB = nullptr;
+ DistCombinedFields.Init = nullptr;
+ DistCombinedFields.Cond = nullptr;
+ DistCombinedFields.NLB = nullptr;
+ DistCombinedFields.NUB = nullptr;
+ DistCombinedFields.DistCond = nullptr;
+ DistCombinedFields.ParForInDistCond = nullptr;
+ }
+ };
+
+ /// Get number of collapsed loops.
+ unsigned getLoopsNumber() const { return NumAssociatedLoops; }
+
+ /// Try to find the next loop sub-statement in the specified statement \p
+ /// CurStmt.
+ /// \param TryImperfectlyNestedLoops true, if we need to try to look for the
+ /// imperfectly nested loop.
+ static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
+ bool TryImperfectlyNestedLoops);
+ static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
+ bool TryImperfectlyNestedLoops) {
+ return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
+ TryImperfectlyNestedLoops);
+ }
+
+ /// Calls the specified callback function for all the loops in \p CurStmt,
+ /// from the outermost to the innermost.
+ static bool doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
+ unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, Stmt *)> Callback,
+ llvm::function_ref<void(OMPLoopBasedDirective *)>
+ OnTransformationCallback);
+ static bool
+ doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
+ unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, const Stmt *)> Callback,
+ llvm::function_ref<void(const OMPLoopBasedDirective *)>
+ OnTransformationCallback) {
+ auto &&NewCallback = [Callback](unsigned Cnt, Stmt *CurStmt) {
+ return Callback(Cnt, CurStmt);
+ };
+ auto &&NewTransformCb =
+ [OnTransformationCallback](OMPLoopBasedDirective *A) {
+ OnTransformationCallback(A);
+ };
+ return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
+ NumLoops, NewCallback, NewTransformCb);
+ }
+
+ /// Calls the specified callback function for all the loops in \p CurStmt,
+ /// from the outermost to the innermost.
+ static bool
+ doForAllLoops(Stmt *CurStmt, bool TryImperfectlyNestedLoops,
+ unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, Stmt *)> Callback) {
+ auto &&TransformCb = [](OMPLoopBasedDirective *) {};
+ return doForAllLoops(CurStmt, TryImperfectlyNestedLoops, NumLoops, Callback,
+ TransformCb);
+ }
+ static bool
+ doForAllLoops(const Stmt *CurStmt, bool TryImperfectlyNestedLoops,
+ unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, const Stmt *)> Callback) {
+ auto &&NewCallback = [Callback](unsigned Cnt, const Stmt *CurStmt) {
+ return Callback(Cnt, CurStmt);
+ };
+ return doForAllLoops(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
+ NumLoops, NewCallback);
+ }
+
+ /// Calls the specified callback function for all the loop bodies in \p
+ /// CurStmt, from the outermost loop to the innermost.
+ static void doForAllLoopsBodies(
+ Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
+ llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback);
+ static void doForAllLoopsBodies(
+ const Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
+ llvm::function_ref<void(unsigned, const Stmt *, const Stmt *)> Callback) {
+ auto &&NewCallback = [Callback](unsigned Cnt, Stmt *Loop, Stmt *Body) {
+ Callback(Cnt, Loop, Body);
+ };
+ doForAllLoopsBodies(const_cast<Stmt *>(CurStmt), TryImperfectlyNestedLoops,
+ NumLoops, NewCallback);
+ }
+
+ static bool classof(const Stmt *T) {
+ if (auto *D = dyn_cast<OMPExecutableDirective>(T))
+ return isOpenMPLoopDirective(D->getDirectiveKind());
+ return false;
+ }
+};
+
/// This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
-class OMPLoopDirective : public OMPExecutableDirective {
+class OMPLoopDirective : public OMPLoopBasedDirective {
friend class ASTStmtReader;
- /// Number of collapsed loops as specified by 'collapse' clause.
- unsigned CollapsedNum = 0;
/// Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
@@ -454,7 +966,7 @@ class OMPLoopDirective : public OMPExecutableDirective {
/// The first 9 children are necessary for all the loop directives,
/// the next 8 are specific to the worksharing ones, and the next 11 are
/// used for combined constructs containing two pragmas associated to loops.
- /// After the fixed children, three arrays of length CollapsedNum are
+ /// After the fixed children, three arrays of length NumAssociatedLoops are
/// allocated: loop counters, their updates and final values.
/// PrevLowerBound and PrevUpperBound are used to communicate blocking
/// information in composite constructs which require loop blocking
@@ -512,63 +1024,63 @@ class OMPLoopDirective : public OMPExecutableDirective {
MutableArrayRef<Expr *> getCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind())]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 2 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 2 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 3 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 3 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 4 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 4 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent counters storage.
MutableArrayRef<Expr *> getDependentCounters() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 5 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 5 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the dependent inits storage.
MutableArrayRef<Expr *> getDependentInits() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 6 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 6 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
/// Get the finals conditions storage.
MutableArrayRef<Expr *> getFinalsConditions() {
auto **Storage = reinterpret_cast<Expr **>(
&Data->getChildren()[getArraysOffset(getDirectiveKind()) +
- 7 * CollapsedNum]);
- return llvm::makeMutableArrayRef(Storage, CollapsedNum);
+ 7 * getLoopsNumber()]);
+ return llvm::makeMutableArrayRef(Storage, getLoopsNumber());
}
protected:
@@ -583,8 +1095,7 @@ protected:
OMPLoopDirective(StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum)
- : OMPExecutableDirective(SC, Kind, StartLoc, EndLoc),
- CollapsedNum(CollapsedNum) {}
+ : OMPLoopBasedDirective(SC, Kind, StartLoc, EndLoc, CollapsedNum) {}
/// Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
@@ -753,180 +1264,6 @@ protected:
void setFinalsConditions(ArrayRef<Expr *> A);
public:
- /// The expressions built to support OpenMP loops in combined/composite
- /// pragmas (e.g. pragma omp distribute parallel for)
- struct DistCombinedHelperExprs {
- /// DistributeLowerBound - used when composing 'omp distribute' with
- /// 'omp for' in a same construct.
- Expr *LB;
- /// DistributeUpperBound - used when composing 'omp distribute' with
- /// 'omp for' in a same construct.
- Expr *UB;
- /// DistributeEnsureUpperBound - used when composing 'omp distribute'
- /// with 'omp for' in a same construct, EUB depends on DistUB
- Expr *EUB;
- /// Distribute loop iteration variable init used when composing 'omp
- /// distribute'
- /// with 'omp for' in a same construct
- Expr *Init;
- /// Distribute Loop condition used when composing 'omp distribute'
- /// with 'omp for' in a same construct
- Expr *Cond;
- /// Update of LowerBound for statically scheduled omp loops for
- /// outer loop in combined constructs (e.g. 'distribute parallel for')
- Expr *NLB;
- /// Update of UpperBound for statically scheduled omp loops for
- /// outer loop in combined constructs (e.g. 'distribute parallel for')
- Expr *NUB;
- /// Distribute Loop condition used when composing 'omp distribute'
- /// with 'omp for' in a same construct when schedule is chunked.
- Expr *DistCond;
- /// 'omp parallel for' loop condition used when composed with
- /// 'omp distribute' in the same construct and when schedule is
- /// chunked and the chunk size is 1.
- Expr *ParForInDistCond;
- };
-
- /// The expressions built for the OpenMP loop CodeGen for the
- /// whole collapsed loop nest.
- struct HelperExprs {
- /// Loop iteration variable.
- Expr *IterationVarRef;
- /// Loop last iteration number.
- Expr *LastIteration;
- /// Loop number of iterations.
- Expr *NumIterations;
- /// Calculation of last iteration.
- Expr *CalcLastIteration;
- /// Loop pre-condition.
- Expr *PreCond;
- /// Loop condition.
- Expr *Cond;
- /// Loop iteration variable init.
- Expr *Init;
- /// Loop increment.
- Expr *Inc;
- /// IsLastIteration - local flag variable passed to runtime.
- Expr *IL;
- /// LowerBound - local variable passed to runtime.
- Expr *LB;
- /// UpperBound - local variable passed to runtime.
- Expr *UB;
- /// Stride - local variable passed to runtime.
- Expr *ST;
- /// EnsureUpperBound -- expression UB = min(UB, NumIterations).
- Expr *EUB;
- /// Update of LowerBound for statically scheduled 'omp for' loops.
- Expr *NLB;
- /// Update of UpperBound for statically scheduled 'omp for' loops.
- Expr *NUB;
- /// PreviousLowerBound - local variable passed to runtime in the
- /// enclosing schedule or null if that does not apply.
- Expr *PrevLB;
- /// PreviousUpperBound - local variable passed to runtime in the
- /// enclosing schedule or null if that does not apply.
- Expr *PrevUB;
- /// DistInc - increment expression for distribute loop when found
- /// combined with a further loop level (e.g. in 'distribute parallel for')
- /// expression IV = IV + ST
- Expr *DistInc;
- /// PrevEUB - expression similar to EUB but to be used when loop
- /// scheduling uses PrevLB and PrevUB (e.g. in 'distribute parallel for'
- /// when ensuring that the UB is either the calculated UB by the runtime or
- /// the end of the assigned distribute chunk)
- /// expression UB = min (UB, PrevUB)
- Expr *PrevEUB;
- /// Counters Loop counters.
- SmallVector<Expr *, 4> Counters;
- /// PrivateCounters Loop counters.
- SmallVector<Expr *, 4> PrivateCounters;
- /// Expressions for loop counters inits for CodeGen.
- SmallVector<Expr *, 4> Inits;
- /// Expressions for loop counters update for CodeGen.
- SmallVector<Expr *, 4> Updates;
- /// Final loop counter values for GodeGen.
- SmallVector<Expr *, 4> Finals;
- /// List of counters required for the generation of the non-rectangular
- /// loops.
- SmallVector<Expr *, 4> DependentCounters;
- /// List of initializers required for the generation of the non-rectangular
- /// loops.
- SmallVector<Expr *, 4> DependentInits;
- /// List of final conditions required for the generation of the
- /// non-rectangular loops.
- SmallVector<Expr *, 4> FinalsConditions;
- /// Init statement for all captured expressions.
- Stmt *PreInits;
-
- /// Expressions used when combining OpenMP loop pragmas
- DistCombinedHelperExprs DistCombinedFields;
-
- /// Check if all the expressions are built (does not check the
- /// worksharing ones).
- bool builtAll() {
- return IterationVarRef != nullptr && LastIteration != nullptr &&
- NumIterations != nullptr && PreCond != nullptr &&
- Cond != nullptr && Init != nullptr && Inc != nullptr;
- }
-
- /// Initialize all the fields to null.
- /// \param Size Number of elements in the
- /// counters/finals/updates/dependent_counters/dependent_inits/finals_conditions
- /// arrays.
- void clear(unsigned Size) {
- IterationVarRef = nullptr;
- LastIteration = nullptr;
- CalcLastIteration = nullptr;
- PreCond = nullptr;
- Cond = nullptr;
- Init = nullptr;
- Inc = nullptr;
- IL = nullptr;
- LB = nullptr;
- UB = nullptr;
- ST = nullptr;
- EUB = nullptr;
- NLB = nullptr;
- NUB = nullptr;
- NumIterations = nullptr;
- PrevLB = nullptr;
- PrevUB = nullptr;
- DistInc = nullptr;
- PrevEUB = nullptr;
- Counters.resize(Size);
- PrivateCounters.resize(Size);
- Inits.resize(Size);
- Updates.resize(Size);
- Finals.resize(Size);
- DependentCounters.resize(Size);
- DependentInits.resize(Size);
- FinalsConditions.resize(Size);
- for (unsigned i = 0; i < Size; ++i) {
- Counters[i] = nullptr;
- PrivateCounters[i] = nullptr;
- Inits[i] = nullptr;
- Updates[i] = nullptr;
- Finals[i] = nullptr;
- DependentCounters[i] = nullptr;
- DependentInits[i] = nullptr;
- FinalsConditions[i] = nullptr;
- }
- PreInits = nullptr;
- DistCombinedFields.LB = nullptr;
- DistCombinedFields.UB = nullptr;
- DistCombinedFields.EUB = nullptr;
- DistCombinedFields.Init = nullptr;
- DistCombinedFields.Cond = nullptr;
- DistCombinedFields.NLB = nullptr;
- DistCombinedFields.NUB = nullptr;
- DistCombinedFields.DistCond = nullptr;
- DistCombinedFields.ParForInDistCond = nullptr;
- }
- };
-
- /// Get number of collapsed loops.
- unsigned getCollapsedNumber() const { return CollapsedNum; }
-
Expr *getIterationVariable() const {
return cast<Expr>(Data->getChildren()[IterationVariableOffset]);
}
@@ -1067,17 +1404,6 @@ public:
"expected loop bound distribute sharing directive");
return cast<Expr>(Data->getChildren()[CombinedParForInDistConditionOffset]);
}
- /// Try to find the next loop sub-statement in the specified statement \p
- /// CurStmt.
- /// \param TryImperfectlyNestedLoops true, if we need to try to look for the
- /// imperfectly nested loop.
- static Stmt *tryToFindNextInnerLoop(Stmt *CurStmt,
- bool TryImperfectlyNestedLoops);
- static const Stmt *tryToFindNextInnerLoop(const Stmt *CurStmt,
- bool TryImperfectlyNestedLoops) {
- return tryToFindNextInnerLoop(const_cast<Stmt *>(CurStmt),
- TryImperfectlyNestedLoops);
- }
Stmt *getBody();
const Stmt *getBody() const {
return const_cast<OMPLoopDirective *>(this)->getBody();
@@ -1263,7 +1589,7 @@ class OMPForDirective : public OMPLoopDirective {
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
- Data->getChildren()[numLoopChildren(getCollapsedNumber(),
+ Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_for)] = E;
}
@@ -1303,7 +1629,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_for)]);
+ getLoopsNumber(), llvm::omp::OMPD_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPForDirective *>(this)->getTaskReductionRefExpr();
@@ -1728,7 +2054,7 @@ class OMPParallelForDirective : public OMPLoopDirective {
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
- Data->getChildren()[numLoopChildren(getCollapsedNumber(),
+ Data->getChildren()[numLoopChildren(getLoopsNumber(),
llvm::omp::OMPD_parallel_for)] = E;
}
@@ -1770,7 +2096,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_parallel_for)]);
+ getLoopsNumber(), llvm::omp::OMPD_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPParallelForDirective *>(this)
@@ -2884,7 +3210,7 @@ class OMPTargetParallelForDirective : public OMPLoopDirective {
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
+ getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)] = E;
}
/// Set cancel state.
@@ -2925,7 +3251,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_target_parallel_for)]);
+ getLoopsNumber(), llvm::omp::OMPD_target_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTargetParallelForDirective *>(this)
@@ -3696,7 +4022,7 @@ class OMPDistributeParallelForDirective : public OMPLoopDirective {
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
+ getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)] = E;
}
/// Set cancel state.
@@ -3737,7 +4063,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
+ getLoopsNumber(), llvm::omp::OMPD_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPDistributeParallelForDirective *>(this)
@@ -4255,8 +4581,7 @@ class OMPTeamsDistributeParallelForDirective final : public OMPLoopDirective {
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] =
- E;
+ getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)] = E;
}
/// Set cancel state.
@@ -4295,7 +4620,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
+ getLoopsNumber(), llvm::omp::OMPD_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
return const_cast<OMPTeamsDistributeParallelForDirective *>(this)
@@ -4472,7 +4797,7 @@ class OMPTargetTeamsDistributeParallelForDirective final
/// Sets special task reduction descriptor.
void setTaskReductionRefExpr(Expr *E) {
Data->getChildren()[numLoopChildren(
- getCollapsedNumber(),
+ getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)] = E;
}
@@ -4512,7 +4837,7 @@ public:
/// Returns special task reduction reference expression.
Expr *getTaskReductionRefExpr() {
return cast_or_null<Expr>(Data->getChildren()[numLoopChildren(
- getCollapsedNumber(),
+ getLoopsNumber(),
llvm::omp::OMPD_target_teams_distribute_parallel_for)]);
}
const Expr *getTaskReductionRefExpr() const {
@@ -4666,6 +4991,154 @@ public:
}
};
+/// This represents the '#pragma omp tile' loop transformation directive.
+class OMPTileDirective final : public OMPLoopBasedDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Default list of offsets.
+ enum {
+ PreInitsOffset = 0,
+ TransformedStmtOffset,
+ };
+
+ explicit OMPTileDirective(SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned NumLoops)
+ : OMPLoopBasedDirective(OMPTileDirectiveClass, llvm::omp::OMPD_tile,
+ StartLoc, EndLoc, NumLoops) {}
+
+ void setPreInits(Stmt *PreInits) {
+ Data->getChildren()[PreInitsOffset] = PreInits;
+ }
+
+ void setTransformedStmt(Stmt *S) {
+ Data->getChildren()[TransformedStmtOffset] = S;
+ }
+
+public:
+ /// Create a new AST node representation for '#pragma omp tile'.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Location of the introducer (e.g. the 'omp' token).
+ /// \param EndLoc Location of the directive's end (e.g. the tok::eod).
+ /// \param Clauses The directive's clauses.
+ /// \param NumLoops Number of associated loops (number of items in the
+ /// 'sizes' clause).
+ /// \param AssociatedStmt The outermost associated loop.
+ /// \param TransformedStmt The loop nest after tiling, or nullptr in
+ /// dependent contexts.
+ /// \param PreInits Helper preinits statements for the loop nest.
+ static OMPTileDirective *Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ unsigned NumLoops, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits);
+
+ /// Build an empty '#pragma omp tile' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ /// \param NumClauses Number of clauses to allocate.
+ /// \param NumLoops Number of associated loops to allocate.
+ static OMPTileDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
+ unsigned NumLoops);
+
+ unsigned getNumAssociatedLoops() const { return getLoopsNumber(); }
+
+ /// Gets/sets the associated loops after tiling.
+ ///
+ /// This is in de-sugared format stored as a CompoundStmt.
+ ///
+ /// \code
+ /// for (...)
+ /// ...
+ /// \endcode
+ ///
+ /// Note that if the generated loops a become associated loops of another
+ /// directive, they may need to be hoisted before them.
+ Stmt *getTransformedStmt() const {
+ return Data->getChildren()[TransformedStmtOffset];
+ }
+
+ /// Return preinits statement.
+ Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPTileDirectiveClass;
+ }
+};
+
+/// This represents the '#pragma omp unroll' loop transformation directive.
+///
+/// \code
+/// #pragma omp unroll
+/// for (int i = 0; i < 64; ++i)
+/// \endcode
+class OMPUnrollDirective final : public OMPLoopBasedDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Default list of offsets.
+ enum {
+ PreInitsOffset = 0,
+ TransformedStmtOffset,
+ };
+
+ explicit OMPUnrollDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPLoopBasedDirective(OMPUnrollDirectiveClass, llvm::omp::OMPD_unroll,
+ StartLoc, EndLoc, 1) {}
+
+ /// Set the pre-init statements.
+ void setPreInits(Stmt *PreInits) {
+ Data->getChildren()[PreInitsOffset] = PreInits;
+ }
+
+ /// Set the de-sugared statement.
+ void setTransformedStmt(Stmt *S) {
+ Data->getChildren()[TransformedStmtOffset] = S;
+ }
+
+public:
+ /// Create a new AST node representation for '#pragma omp unroll'.
+ ///
+ /// \param C Context of the AST.
+ /// \param StartLoc Location of the introducer (e.g. the 'omp' token).
+ /// \param EndLoc Location of the directive's end (e.g. the tok::eod).
+ /// \param Clauses The directive's clauses.
+ /// \param AssociatedStmt The outermost associated loop.
+ /// \param TransformedStmt The loop nest after tiling, or nullptr in
+ /// dependent contexts.
+ /// \param PreInits Helper preinits statements for the loop nest.
+ static OMPUnrollDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits);
+
+ /// Build an empty '#pragma omp unroll' AST node for deserialization.
+ ///
+ /// \param C Context of the AST.
+ /// \param NumClauses Number of clauses to allocate.
+ static OMPUnrollDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses);
+
+ /// Get the de-sugared associated loops after unrolling.
+ ///
+ /// This is only used if the unrolled loop becomes an associated loop of
+ /// another directive, otherwise the loop is emitted directly using loop
+ /// transformation metadata. When the unrolled loop cannot be used by another
+ /// directive (e.g. because of the full clause), the transformed stmt can also
+ /// be nullptr.
+ Stmt *getTransformedStmt() const {
+ return Data->getChildren()[TransformedStmtOffset];
+ }
+
+ /// Return the pre-init statements.
+ Stmt *getPreInits() const { return Data->getChildren()[PreInitsOffset]; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPUnrollDirectiveClass;
+ }
+};
+
/// This represents '#pragma omp scan' directive.
///
/// \code
@@ -4718,6 +5191,175 @@ public:
}
};
+/// This represents '#pragma omp interop' directive.
+///
+/// \code
+/// #pragma omp interop init(target:obj) device(x) depend(inout:y) nowait
+/// \endcode
+/// In this example directive '#pragma omp interop' has
+/// clauses 'init', 'device', 'depend' and 'nowait'.
+///
+class OMPInteropDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive.
+ /// \param EndLoc Ending location of the directive.
+ ///
+ OMPInteropDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPInteropDirectiveClass,
+ llvm::omp::OMPD_interop, StartLoc, EndLoc) {}
+
+ /// Build an empty directive.
+ ///
+ explicit OMPInteropDirective()
+ : OMPExecutableDirective(OMPInteropDirectiveClass,
+ llvm::omp::OMPD_interop, SourceLocation(),
+ SourceLocation()) {}
+
+public:
+ /// Creates directive.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses The directive's clauses.
+ ///
+ static OMPInteropDirective *Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses);
+
+ /// Creates an empty directive.
+ ///
+ /// \param C AST context.
+ ///
+ static OMPInteropDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPInteropDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp dispatch' directive.
+///
+/// \code
+/// #pragma omp dispatch device(dnum)
+/// \endcode
+/// This example shows a directive '#pragma omp dispatch' with a
+/// device clause with variable 'dnum'.
+///
+class OMPDispatchDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// The location of the target-call.
+ SourceLocation TargetCallLoc;
+
+ /// Set the location of the target-call.
+ void setTargetCallLoc(SourceLocation Loc) { TargetCallLoc = Loc; }
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ ///
+ OMPDispatchDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPDispatchDirectiveClass,
+ llvm::omp::OMPD_dispatch, StartLoc, EndLoc) {}
+
+ /// Build an empty directive.
+ ///
+ explicit OMPDispatchDirective()
+ : OMPExecutableDirective(OMPDispatchDirectiveClass,
+ llvm::omp::OMPD_dispatch, SourceLocation(),
+ SourceLocation()) {}
+
+public:
+ /// Creates directive with a list of \a Clauses.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param Clauses List of clauses.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ /// \param TargetCallLoc Location of the target-call.
+ ///
+ static OMPDispatchDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ SourceLocation TargetCallLoc);
+
+ /// Creates an empty directive with the place for \a NumClauses
+ /// clauses.
+ ///
+ /// \param C AST context.
+ /// \param NumClauses Number of clauses.
+ ///
+ static OMPDispatchDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ /// Return location of target-call.
+ SourceLocation getTargetCallLoc() const { return TargetCallLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPDispatchDirectiveClass;
+ }
+};
+
+/// This represents '#pragma omp masked' directive.
+/// \code
+/// #pragma omp masked filter(tid)
+/// \endcode
+/// This example shows a directive '#pragma omp masked' with a filter clause
+/// with variable 'tid'.
+///
+class OMPMaskedDirective final : public OMPExecutableDirective {
+ friend class ASTStmtReader;
+ friend class OMPExecutableDirective;
+
+ /// Build directive with the given start and end location.
+ ///
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending location of the directive.
+ ///
+ OMPMaskedDirective(SourceLocation StartLoc, SourceLocation EndLoc)
+ : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
+ StartLoc, EndLoc) {}
+
+ /// Build an empty directive.
+ ///
+ explicit OMPMaskedDirective()
+ : OMPExecutableDirective(OMPMaskedDirectiveClass, llvm::omp::OMPD_masked,
+ SourceLocation(), SourceLocation()) {}
+
+public:
+ /// Creates directive.
+ ///
+ /// \param C AST context.
+ /// \param StartLoc Starting location of the directive kind.
+ /// \param EndLoc Ending Location of the directive.
+ /// \param AssociatedStmt Statement, associated with the directive.
+ ///
+ static OMPMaskedDirective *
+ Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
+
+ /// Creates an empty directive.
+ ///
+ /// \param C AST context.
+ ///
+ static OMPMaskedDirective *CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OMPMaskedDirectiveClass;
+ }
+};
+
} // end namespace clang
#endif
diff --git a/clang/include/clang/AST/TemplateBase.h b/clang/include/clang/AST/TemplateBase.h
index 1671637521e2..fa27a12cfbb9 100644
--- a/clang/include/clang/AST/TemplateBase.h
+++ b/clang/include/clang/AST/TemplateBase.h
@@ -389,7 +389,8 @@ public:
TemplateArgument getPackExpansionPattern() const;
/// Print this template argument to the given output stream.
- void print(const PrintingPolicy &Policy, raw_ostream &Out) const;
+ void print(const PrintingPolicy &Policy, raw_ostream &Out,
+ bool IncludeType) const;
/// Debugging aid that dumps the template argument.
void dump(raw_ostream &Out) const;
@@ -512,7 +513,8 @@ public:
}
TypeSourceInfo *getTypeSourceInfo() const {
- assert(Argument.getKind() == TemplateArgument::Type);
+ if (Argument.getKind() != TemplateArgument::Type)
+ return nullptr;
return LocInfo.getAsTypeSourceInfo();
}
diff --git a/clang/include/clang/AST/TextNodeDumper.h b/clang/include/clang/AST/TextNodeDumper.h
index 15ca348f4766..0eb0031de11f 100644
--- a/clang/include/clang/AST/TextNodeDumper.h
+++ b/clang/include/clang/AST/TextNodeDumper.h
@@ -19,6 +19,7 @@
#include "clang/AST/CommentCommandTraits.h"
#include "clang/AST/CommentVisitor.h"
#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/TemplateArgumentVisitor.h"
@@ -69,10 +70,8 @@ public:
return;
}
- // We need to capture an owning-string in the lambda because the lambda
- // is invoked in a deferred manner.
- std::string LabelStr(Label);
- auto DumpWithIndent = [this, DoAddChild, LabelStr](bool IsLastChild) {
+ auto DumpWithIndent = [this, DoAddChild,
+ Label(Label.str())](bool IsLastChild) {
// Print out the appropriate tree structure and work out the prefix for
// children of this node. For instance:
//
@@ -89,8 +88,8 @@ public:
OS << '\n';
ColorScope Color(OS, ShowColors, IndentColor);
OS << Prefix << (IsLastChild ? '`' : '|') << '-';
- if (!LabelStr.empty())
- OS << LabelStr << ": ";
+ if (!Label.empty())
+ OS << Label << ": ";
this->Prefix.push_back(IsLastChild ? ' ' : '|');
this->Prefix.push_back(' ');
@@ -190,6 +189,8 @@ public:
void Visit(const GenericSelectionExpr::ConstAssociation &A);
+ void Visit(const concepts::Requirement *R);
+
void Visit(const APValue &Value, QualType Ty);
void dumpPointer(const void *Ptr);
@@ -251,6 +252,7 @@ public:
void VisitCastExpr(const CastExpr *Node);
void VisitImplicitCastExpr(const ImplicitCastExpr *Node);
void VisitDeclRefExpr(const DeclRefExpr *Node);
+ void VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *Node);
void VisitPredefinedExpr(const PredefinedExpr *Node);
void VisitCharacterLiteral(const CharacterLiteral *Node);
void VisitIntegerLiteral(const IntegerLiteral *Node);
@@ -297,6 +299,7 @@ public:
void VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *Node);
void VisitOMPIteratorExpr(const OMPIteratorExpr *Node);
void VisitConceptSpecializationExpr(const ConceptSpecializationExpr *Node);
+ void VisitRequiresExpr(const RequiresExpr *Node);
void VisitRValueReferenceType(const ReferenceType *T);
void VisitArrayType(const ArrayType *T);
@@ -352,6 +355,7 @@ public:
void VisitUsingDecl(const UsingDecl *D);
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
+ void VisitUsingEnumDecl(const UsingEnumDecl *D);
void VisitUsingShadowDecl(const UsingShadowDecl *D);
void VisitConstructorUsingShadowDecl(const ConstructorUsingShadowDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D);
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index 319d3850346b..9f46d5337897 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -486,9 +486,16 @@ public:
// allocated on device, which are a subset of __global.
(A == LangAS::opencl_global && (B == LangAS::opencl_global_device ||
B == LangAS::opencl_global_host)) ||
+ (A == LangAS::sycl_global && (B == LangAS::sycl_global_device ||
+ B == LangAS::sycl_global_host)) ||
// Consider pointer size address spaces to be equivalent to default.
((isPtrSizeAddressSpace(A) || A == LangAS::Default) &&
- (isPtrSizeAddressSpace(B) || B == LangAS::Default));
+ (isPtrSizeAddressSpace(B) || B == LangAS::Default)) ||
+ // Default is a superset of SYCL address spaces.
+ (A == LangAS::Default &&
+ (B == LangAS::sycl_private || B == LangAS::sycl_local ||
+ B == LangAS::sycl_global || B == LangAS::sycl_global_device ||
+ B == LangAS::sycl_global_host));
}
/// Returns true if the address space in these qualifiers is equal to or
@@ -2492,6 +2499,9 @@ public:
// PPC MMA Types
#define PPC_VECTOR_TYPE(Name, Id, Size) Id,
#include "clang/Basic/PPCTypes.def"
+// RVV Types
+#define RVV_TYPE(Name, Id, SingletonId) Id,
+#include "clang/Basic/RISCVVTypes.def"
// All other builtin types
#define BUILTIN_TYPE(Id, SingletonId) Id,
#define LAST_BUILTIN_TYPE(Id) LastKind = Id
@@ -5412,8 +5422,13 @@ class ElaboratedType final
ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
QualType NamedType, QualType CanonType, TagDecl *OwnedTagDecl)
: TypeWithKeyword(Keyword, Elaborated, CanonType,
+ // Any semantic dependence on the qualifier will have
+ // been incorporated into NamedType. We still need to
+ // track syntactic (instantiation / error / pack)
+ // dependence on the qualifier.
NamedType->getDependence() |
- (NNS ? toTypeDependence(NNS->getDependence())
+ (NNS ? toSyntacticDependence(
+ toTypeDependence(NNS->getDependence()))
: TypeDependence::None)),
NNS(NNS), NamedType(NamedType) {
ElaboratedTypeBits.HasOwnedTagDecl = false;
diff --git a/clang/include/clang/AST/TypeProperties.td b/clang/include/clang/AST/TypeProperties.td
index ffcc8290938f..438d5af5a2e2 100644
--- a/clang/include/clang/AST/TypeProperties.td
+++ b/clang/include/clang/AST/TypeProperties.td
@@ -769,6 +769,10 @@ let Class = BuiltinType in {
case BuiltinType::ID: return ctx.ID##Ty;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(NAME, ID, SINGLETON_ID) \
+ case BuiltinType::ID: return ctx.SINGLETON_ID;
+#include "clang/Basic/RISCVVTypes.def"
+
#define BUILTIN_TYPE(ID, SINGLETON_ID) \
case BuiltinType::ID: return ctx.SINGLETON_ID;
#include "clang/AST/BuiltinTypes.def"
diff --git a/clang/include/clang/AST/VTableBuilder.h b/clang/include/clang/AST/VTableBuilder.h
index 241dd13f903e..e451f3f861b7 100644
--- a/clang/include/clang/AST/VTableBuilder.h
+++ b/clang/include/clang/AST/VTableBuilder.h
@@ -18,6 +18,7 @@
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/ABI.h"
+#include "clang/Basic/Thunk.h"
#include "llvm/ADT/DenseMap.h"
#include <memory>
#include <utility>
diff --git a/clang/include/clang/ASTMatchers/ASTMatchFinder.h b/clang/include/clang/ASTMatchers/ASTMatchFinder.h
index 81125ad8d96d..91024f9425e0 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchFinder.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchFinder.h
@@ -110,6 +110,12 @@ public:
/// This id is used, for example, for the profiling output.
/// It defaults to "<unknown>".
virtual StringRef getID() const;
+
+ /// TraversalKind to use while matching and processing
+ /// the result nodes. This API is temporary to facilitate
+ /// third parties porting existing code to the default
+ /// behavior of clang-tidy.
+ virtual llvm::Optional<TraversalKind> getCheckTraversalKind() const;
};
/// Called when parsing is finished. Intended for testing only.
@@ -280,6 +286,11 @@ public:
void run(const MatchFinder::MatchResult &Result) override {
Nodes.push_back(Result.Nodes);
}
+
+ llvm::Optional<TraversalKind> getCheckTraversalKind() const override {
+ return llvm::None;
+ }
+
SmallVector<BoundNodes, 1> Nodes;
};
}
diff --git a/clang/include/clang/ASTMatchers/ASTMatchers.h b/clang/include/clang/ASTMatchers/ASTMatchers.h
index 6f6dfab59a39..8e3ee6cb9e7e 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchers.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchers.h
@@ -144,6 +144,7 @@ using TypeMatcher = internal::Matcher<QualType>;
using TypeLocMatcher = internal::Matcher<TypeLoc>;
using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>;
using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>;
+using CXXBaseSpecifierMatcher = internal::Matcher<CXXBaseSpecifier>;
using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>;
using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>;
using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>;
@@ -344,9 +345,19 @@ extern const internal::VariadicAllOfMatcher<Decl> decl;
/// int number = 42;
/// auto [foo, bar] = std::make_pair{42, 42};
/// \endcode
-extern const internal::VariadicAllOfMatcher<DecompositionDecl>
+extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl>
decompositionDecl;
+/// Matches binding declarations
+/// Example matches \c foo and \c bar
+/// (matcher = bindingDecl()
+///
+/// \code
+/// auto [foo, bar] = std::make_pair{42, 42};
+/// \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl>
+ bindingDecl;
+
/// Matches a declaration of a linkage specification.
///
/// Given
@@ -506,6 +517,15 @@ extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl>
extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
+/// Matches class bases.
+///
+/// Examples matches \c public virtual B.
+/// \code
+/// class B {};
+/// class C : public virtual B {};
+/// \endcode
+extern const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier;
+
/// Matches constructor initializers.
///
/// Examples matches \c i(42).
@@ -825,26 +845,16 @@ traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor<
ToTypes>>(TK, InnerMatcher);
}
-template <template <typename T, typename P1> class MatcherT, typename P1,
+template <template <typename T, typename... P> class MatcherT, typename... P,
typename ReturnTypesF>
internal::TraversalWrapper<
- internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>
-traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1<
- MatcherT, P1, ReturnTypesF> &InnerMatcher) {
- return internal::TraversalWrapper<
- internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>(
- TK, InnerMatcher);
-}
-
-template <template <typename T, typename P1, typename P2> class MatcherT,
- typename P1, typename P2, typename ReturnTypesF>
-internal::TraversalWrapper<
- internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>
-traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2<
- MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) {
+ internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>
+traverse(TraversalKind TK,
+ const internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>
+ &InnerMatcher) {
return internal::TraversalWrapper<
- internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>(
- TK, InnerMatcher);
+ internal::PolymorphicMatcher<MatcherT, ReturnTypesF, P...>>(TK,
+ InnerMatcher);
}
template <typename... T>
@@ -903,7 +913,7 @@ AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>,
/// varDecl(hasInitializer(integerLiteral()))
/// varDecl(hasInitializer(declRefExpr()))
/// \endcode
-/// only match the declarations for b, c, and d.
+/// only match the declarations for a.
AST_MATCHER_P(Expr, ignoringImpCasts,
internal::Matcher<Expr>, InnerMatcher) {
return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder);
@@ -1203,7 +1213,7 @@ AST_MATCHER_P(TemplateArgument, equalsIntegralValue,
std::string, Value) {
if (Node.getKind() != TemplateArgument::Integral)
return false;
- return Node.getAsIntegral().toString(10) == Value;
+ return toString(Node.getAsIntegral(), 10) == Value;
}
/// Matches an Objective-C autorelease pool statement.
@@ -1742,6 +1752,18 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt,
/// matches \code using X::x \endcode
extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
+/// Matches using-enum declarations.
+///
+/// Given
+/// \code
+/// namespace X { enum x {...}; }
+/// using enum X::x;
+/// \endcode
+/// usingEnumDecl()
+/// matches \code using enum X::x \endcode
+extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl>
+ usingEnumDecl;
+
/// Matches using namespace declarations.
///
/// Given
@@ -2162,6 +2184,17 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt>
continueStmt;
+/// Matches co_return statements.
+///
+/// Given
+/// \code
+/// while (true) { co_return; }
+/// \endcode
+/// coreturnStmt()
+/// matches 'co_return'
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt>
+ coreturnStmt;
+
/// Matches return statements.
///
/// Given
@@ -2379,6 +2412,30 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral>
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
compoundLiteralExpr;
+/// Matches co_await expressions.
+///
+/// Given
+/// \code
+/// co_await 1;
+/// \endcode
+/// coawaitExpr()
+/// matches 'co_await 1'
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
+ coawaitExpr;
+/// Matches co_await expressions where the type of the promise is dependent
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
+ dependentCoawaitExpr;
+/// Matches co_yield expressions.
+///
+/// Given
+/// \code
+/// co_yield 1;
+/// \endcode
+/// coyieldExpr()
+/// matches 'co_yield 1'
+extern const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
+ coyieldExpr;
+
/// Matches nullptr literal.
extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
@@ -2829,6 +2886,42 @@ extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator>
binaryOperation;
+/// Matches function calls and constructor calls
+///
+/// Because CallExpr and CXXConstructExpr do not share a common
+/// base class with API accessing arguments etc, AST Matchers for code
+/// which should match both are typically duplicated. This matcher
+/// removes the need for duplication.
+///
+/// Given code
+/// \code
+/// struct ConstructorTakesInt
+/// {
+/// ConstructorTakesInt(int i) {}
+/// };
+///
+/// void callTakesInt(int i)
+/// {
+/// }
+///
+/// void doCall()
+/// {
+/// callTakesInt(42);
+/// }
+///
+/// void doConstruct()
+/// {
+/// ConstructorTakesInt cti(42);
+/// }
+/// \endcode
+///
+/// The matcher
+/// \code
+/// invocation(hasArgument(0, integerLiteral(equals(42))))
+/// \endcode
+/// matches the expression in both doCall and doConstruct
+extern const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation;
+
/// Matches unary expressions that have a specific type of argument.
///
/// Given
@@ -2950,14 +3043,15 @@ AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) {
/// matches the declaration of \c A.
///
/// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl>
-inline internal::PolymorphicMatcherWithParam1<
- internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
- AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>
+inline internal::PolymorphicMatcher<
+ internal::HasOverloadedOperatorNameMatcher,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
+ std::vector<std::string>>
hasOverloadedOperatorName(StringRef Name) {
- return internal::PolymorphicMatcherWithParam1<
- internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
- AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>(
- {std::string(Name)});
+ return internal::PolymorphicMatcher<
+ internal::HasOverloadedOperatorNameMatcher,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl),
+ std::vector<std::string>>({std::string(Name)});
}
/// Matches overloaded operator names.
@@ -2969,9 +3063,10 @@ hasOverloadedOperatorName(StringRef Name) {
/// Is equivalent to
/// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-"))
extern const internal::VariadicFunction<
- internal::PolymorphicMatcherWithParam1<
- internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>,
- AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>,
+ internal::PolymorphicMatcher<internal::HasOverloadedOperatorNameMatcher,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(
+ CXXOperatorCallExpr, FunctionDecl),
+ std::vector<std::string>>,
StringRef, internal::hasAnyOverloadedOperatorNameFunc>
hasAnyOverloadedOperatorName;
@@ -3460,13 +3555,14 @@ extern const internal::VariadicOperatorMatcherFunc<1, 1> unless;
/// Matcher<TagType>, Matcher<TemplateSpecializationType>,
/// Matcher<TemplateTypeParmType>, Matcher<TypedefType>,
/// Matcher<UnresolvedUsingType>
-inline internal::PolymorphicMatcherWithParam1<
- internal::HasDeclarationMatcher, internal::Matcher<Decl>,
- void(internal::HasDeclarationSupportedTypes)>
+inline internal::PolymorphicMatcher<
+ internal::HasDeclarationMatcher,
+ void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>
hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) {
- return internal::PolymorphicMatcherWithParam1<
- internal::HasDeclarationMatcher, internal::Matcher<Decl>,
- void(internal::HasDeclarationSupportedTypes)>(InnerMatcher);
+ return internal::PolymorphicMatcher<
+ internal::HasDeclarationMatcher,
+ void(internal::HasDeclarationSupportedTypes), internal::Matcher<Decl>>(
+ InnerMatcher);
}
/// Matches a \c NamedDecl whose underlying declaration matches the given
@@ -3751,16 +3847,19 @@ AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher,
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and U (matcher = typedefDecl(hasType(asString("int")))
/// and friend class X (matcher = friendDecl(hasType("X"))
+/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
+/// asString("class X")))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// typedef int U;
/// class Y { friend class X; };
+/// class Z : public virtual X {};
/// \endcode
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
hasType,
AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl,
- ValueDecl),
+ ValueDecl, CXXBaseSpecifier),
internal::Matcher<QualType>, InnerMatcher, 0) {
QualType QT = internal::getUnderlyingType(Node);
if (!QT.isNull())
@@ -3780,10 +3879,13 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
/// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X")))))
/// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X")))))
/// and friend class X (matcher = friendDecl(hasType("X"))
+/// and public virtual X (matcher = cxxBaseSpecifier(hasType(
+/// cxxRecordDecl(hasName("X"))))
/// \code
/// class X {};
/// void y(X &x) { x; X z; }
/// class Y { friend class X; };
+/// class Z : public virtual X {};
/// \endcode
///
/// Example matches class Derived
@@ -3806,20 +3908,51 @@ AST_POLYMORPHIC_MATCHER_P_OVERLOAD(
return false;
}
-/// Matches if the type location of the declarator decl's type matches
-/// the inner matcher.
+/// Matches if the type location of a node matches the inner matcher.
///
-/// Given
+/// Examples:
/// \code
/// int x;
/// \endcode
/// declaratorDecl(hasTypeLoc(loc(asString("int"))))
/// matches int x
-AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) {
- if (!Node.getTypeSourceInfo())
+///
+/// \code
+/// auto x = int(3);
+/// \code
+/// cxxTemporaryObjectExpr(hasTypeLoc(loc(asString("int"))))
+/// matches int(3)
+///
+/// \code
+/// struct Foo { Foo(int, int); };
+/// auto x = Foo(1, 2);
+/// \code
+/// cxxFunctionalCastExpr(hasTypeLoc(loc(asString("struct Foo"))))
+/// matches Foo(1, 2)
+///
+/// Usable as: Matcher<BlockDecl>, Matcher<CXXBaseSpecifier>,
+/// Matcher<CXXCtorInitializer>, Matcher<CXXFunctionalCastExpr>,
+/// Matcher<CXXNewExpr>, Matcher<CXXTemporaryObjectExpr>,
+/// Matcher<CXXUnresolvedConstructExpr>,
+/// Matcher<ClassTemplateSpecializationDecl>, Matcher<CompoundLiteralExpr>,
+/// Matcher<DeclaratorDecl>, Matcher<ExplicitCastExpr>,
+/// Matcher<ObjCPropertyDecl>, Matcher<TemplateArgumentLoc>,
+/// Matcher<TypedefNameDecl>
+AST_POLYMORPHIC_MATCHER_P(
+ hasTypeLoc,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(
+ BlockDecl, CXXBaseSpecifier, CXXCtorInitializer, CXXFunctionalCastExpr,
+ CXXNewExpr, CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr,
+ ClassTemplateSpecializationDecl, CompoundLiteralExpr, DeclaratorDecl,
+ ExplicitCastExpr, ObjCPropertyDecl, TemplateArgumentLoc,
+ TypedefNameDecl),
+ internal::Matcher<TypeLoc>, Inner) {
+ TypeSourceInfo *source = internal::GetTypeSourceInfo(Node);
+ if (source == nullptr) {
// This happens for example for implicit destructors.
return false;
- return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder);
+ }
+ return Inner.matches(source->getTypeLoc(), Finder, Builder);
}
/// Matches if the matched type is represented by the given string.
@@ -4667,8 +4800,11 @@ AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType,
int ParamIndex = 0;
bool Matched = false;
+ unsigned NumArgs = Node.getNumArgs();
+ if (FProto && FProto->isVariadic())
+ NumArgs = std::min(NumArgs, FProto->getNumParams());
- for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) {
+ for (; ArgIndex < NumArgs; ++ArgIndex, ++ParamIndex) {
BoundNodesTreeBuilder ArgMatches(*Builder);
if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder,
&ArgMatches)) {
@@ -5178,7 +5314,7 @@ AST_POLYMORPHIC_MATCHER_P(hasBody,
/// void f() {}
/// void g();
/// \endcode
-/// hasAnyBody(functionDecl())
+/// functionDecl(hasAnyBody(compoundStmt()))
/// matches both 'void f();'
/// and 'void f() {}'
/// with compoundStmt()
@@ -5253,11 +5389,12 @@ AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) {
/// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>,
/// Matcher<FloatingLiteral>, Matcher<IntegerLiteral>
template <typename ValueT>
-internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT>
+internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
+ void(internal::AllNodeBaseTypes), ValueT>
equals(const ValueT &Value) {
- return internal::PolymorphicMatcherWithParam1<
- internal::ValueEqualsMatcher,
- ValueT>(Value);
+ return internal::PolymorphicMatcher<internal::ValueEqualsMatcher,
+ void(internal::AllNodeBaseTypes), ValueT>(
+ Value);
}
AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals,
@@ -5312,11 +5449,11 @@ AST_POLYMORPHIC_MATCHER_P(
/// Is equivalent to
/// anyOf(hasOperatorName("+"), hasOperatorName("-"))
extern const internal::VariadicFunction<
- internal::PolymorphicMatcherWithParam1<
- internal::HasAnyOperatorNameMatcher, std::vector<std::string>,
- AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator,
- UnaryOperator)>,
+ internal::PolymorphicMatcher<internal::HasAnyOperatorNameMatcher,
+ AST_POLYMORPHIC_SUPPORTED_TYPES(
+ BinaryOperator, CXXOperatorCallExpr,
+ CXXRewrittenBinaryOperator, UnaryOperator),
+ std::vector<std::string>>,
StringRef, internal::hasAnyOperatorNameFunc>
hasAnyOperatorName;
@@ -6072,7 +6209,7 @@ AST_POLYMORPHIC_MATCHER_P(
/// \endcode
/// usingDecl(hasAnyUsingShadowDecl(hasName("b"))))
/// matches \code using X::b \endcode
-AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl,
+AST_MATCHER_P(BaseUsingDecl, hasAnyUsingShadowDecl,
internal::Matcher<UsingShadowDecl>, InnerMatcher) {
return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(),
Node.shadow_end(), Finder,
@@ -7338,12 +7475,92 @@ extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr>
/// expr(nullPointerConstant())
/// matches the initializer for v1, v2, v3, cp, and ip. Does not match the
/// initializer for i.
-AST_MATCHER(Expr, nullPointerConstant) {
- return Node.isNullPointerConstant(Finder->getASTContext(),
- Expr::NPC_ValueDependentIsNull);
+AST_MATCHER_FUNCTION(internal::Matcher<Expr>, nullPointerConstant) {
+ return anyOf(
+ gnuNullExpr(), cxxNullPtrLiteralExpr(),
+ integerLiteral(equals(0), hasParent(expr(hasType(pointerType())))));
+}
+
+/// Matches the DecompositionDecl the binding belongs to.
+///
+/// For example, in:
+/// \code
+/// void foo()
+/// {
+/// int arr[3];
+/// auto &[f, s, t] = arr;
+///
+/// f = 42;
+/// }
+/// \endcode
+/// The matcher:
+/// \code
+/// bindingDecl(hasName("f"),
+/// forDecomposition(decompositionDecl())
+/// \endcode
+/// matches 'f' in 'auto &[f, s, t]'.
+AST_MATCHER_P(BindingDecl, forDecomposition, internal::Matcher<ValueDecl>,
+ InnerMatcher) {
+ if (const ValueDecl *VD = Node.getDecomposedDecl())
+ return InnerMatcher.matches(*VD, Finder, Builder);
+ return false;
}
-/// Matches declaration of the function the statement belongs to
+/// Matches the Nth binding of a DecompositionDecl.
+///
+/// For example, in:
+/// \code
+/// void foo()
+/// {
+/// int arr[3];
+/// auto &[f, s, t] = arr;
+///
+/// f = 42;
+/// }
+/// \endcode
+/// The matcher:
+/// \code
+/// decompositionDecl(hasBinding(0,
+/// bindingDecl(hasName("f").bind("fBinding"))))
+/// \endcode
+/// matches the decomposition decl with 'f' bound to "fBinding".
+AST_MATCHER_P2(DecompositionDecl, hasBinding, unsigned, N,
+ internal::Matcher<BindingDecl>, InnerMatcher) {
+ if (Node.bindings().size() <= N)
+ return false;
+ return InnerMatcher.matches(*Node.bindings()[N], Finder, Builder);
+}
+
+/// Matches any binding of a DecompositionDecl.
+///
+/// For example, in:
+/// \code
+/// void foo()
+/// {
+/// int arr[3];
+/// auto &[f, s, t] = arr;
+///
+/// f = 42;
+/// }
+/// \endcode
+/// The matcher:
+/// \code
+/// decompositionDecl(hasAnyBinding(bindingDecl(hasName("f").bind("fBinding"))))
+/// \endcode
+/// matches the decomposition decl with 'f' bound to "fBinding".
+AST_MATCHER_P(DecompositionDecl, hasAnyBinding, internal::Matcher<BindingDecl>,
+ InnerMatcher) {
+ return llvm::any_of(Node.bindings(), [&](const auto *Binding) {
+ return InnerMatcher.matches(*Binding, Finder, Builder);
+ });
+}
+
+/// Matches declaration of the function the statement belongs to.
+///
+/// Deprecated. Use forCallable() to correctly handle the situation when
+/// the declaration is not a function (but a block or an Objective-C method).
+/// forFunction() not only fails to take non-functions into account but also
+/// may match the wrong declaration in their presence.
///
/// Given:
/// \code
@@ -7360,20 +7577,79 @@ AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>,
const auto &Parents = Finder->getASTContext().getParents(Node);
llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
- while(!Stack.empty()) {
+ while (!Stack.empty()) {
const auto &CurNode = Stack.back();
Stack.pop_back();
- if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
- if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
+ if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
+ if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
+ return true;
+ }
+ } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
+ if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
+ Builder)) {
+ return true;
+ }
+ } else {
+ for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
+ Stack.push_back(Parent);
+ }
+ }
+ return false;
+}
+
+/// Matches declaration of the function, method, or block the statement
+/// belongs to.
+///
+/// Given:
+/// \code
+/// F& operator=(const F& o) {
+/// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; });
+/// return *this;
+/// }
+/// \endcode
+/// returnStmt(forCallable(functionDecl(hasName("operator="))))
+/// matches 'return *this'
+/// but does not match 'return v > 0'
+///
+/// Given:
+/// \code
+/// -(void) foo {
+/// int x = 1;
+/// dispatch_sync(queue, ^{ int y = 2; });
+/// }
+/// \endcode
+/// declStmt(forCallable(objcMethodDecl()))
+/// matches 'int x = 1'
+/// but does not match 'int y = 2'.
+/// whereas declStmt(forCallable(blockDecl()))
+/// matches 'int y = 2'
+/// but does not match 'int x = 1'.
+AST_MATCHER_P(Stmt, forCallable, internal::Matcher<Decl>, InnerMatcher) {
+ const auto &Parents = Finder->getASTContext().getParents(Node);
+
+ llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end());
+ while (!Stack.empty()) {
+ const auto &CurNode = Stack.back();
+ Stack.pop_back();
+ if (const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) {
+ if (InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) {
+ return true;
+ }
+ } else if (const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
+ if (InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder,
+ Builder)) {
+ return true;
+ }
+ } else if (const auto *ObjCMethodDeclNode = CurNode.get<ObjCMethodDecl>()) {
+ if (InnerMatcher.matches(*ObjCMethodDeclNode, Finder, Builder)) {
return true;
}
- } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) {
- if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(),
- Finder, Builder)) {
+ } else if (const auto *BlockDeclNode = CurNode.get<BlockDecl>()) {
+ if (InnerMatcher.matches(*BlockDeclNode, Finder, Builder)) {
return true;
}
} else {
- for(const auto &Parent: Finder->getASTContext().getParents(CurNode))
+ for (const auto &Parent : Finder->getASTContext().getParents(CurNode))
Stack.push_back(Parent);
}
}
diff --git a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
index 2af4e6e88109..71f4f2d17ae3 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchersInternal.h
@@ -83,6 +83,37 @@ class BoundNodes;
namespace internal {
+/// A type-list implementation.
+///
+/// A "linked list" of types, accessible by using the ::head and ::tail
+/// typedefs.
+template <typename... Ts> struct TypeList {}; // Empty sentinel type list.
+
+template <typename T1, typename... Ts> struct TypeList<T1, Ts...> {
+ /// The first type on the list.
+ using head = T1;
+
+ /// A sublist with the tail. ie everything but the head.
+ ///
+ /// This type is used to do recursion. TypeList<>/EmptyTypeList indicates the
+ /// end of the list.
+ using tail = TypeList<Ts...>;
+};
+
+/// The empty type list.
+using EmptyTypeList = TypeList<>;
+
+/// Helper meta-function to determine if some type \c T is present or
+/// a parent type in the list.
+template <typename AnyTypeList, typename T> struct TypeListContainsSuperOf {
+ static const bool value =
+ std::is_base_of<typename AnyTypeList::head, T>::value ||
+ TypeListContainsSuperOf<typename AnyTypeList::tail, T>::value;
+};
+template <typename T> struct TypeListContainsSuperOf<EmptyTypeList, T> {
+ static const bool value = false;
+};
+
/// Variadic function object.
///
/// Most of the functions below that use VariadicFunction could be implemented
@@ -135,6 +166,35 @@ inline QualType getUnderlyingType(const CXXBaseSpecifier &Node) {
return Node.getType();
}
+/// Unifies obtaining a `TypeSourceInfo` from different node types.
+template <typename T,
+ std::enable_if_t<TypeListContainsSuperOf<
+ TypeList<CXXBaseSpecifier, CXXCtorInitializer,
+ CXXTemporaryObjectExpr, CXXUnresolvedConstructExpr,
+ CompoundLiteralExpr, DeclaratorDecl, ObjCPropertyDecl,
+ TemplateArgumentLoc, TypedefNameDecl>,
+ T>::value> * = nullptr>
+inline TypeSourceInfo *GetTypeSourceInfo(const T &Node) {
+ return Node.getTypeSourceInfo();
+}
+template <typename T,
+ std::enable_if_t<TypeListContainsSuperOf<
+ TypeList<CXXFunctionalCastExpr, ExplicitCastExpr>, T>::value> * =
+ nullptr>
+inline TypeSourceInfo *GetTypeSourceInfo(const T &Node) {
+ return Node.getTypeInfoAsWritten();
+}
+inline TypeSourceInfo *GetTypeSourceInfo(const BlockDecl &Node) {
+ return Node.getSignatureAsWritten();
+}
+inline TypeSourceInfo *GetTypeSourceInfo(const CXXNewExpr &Node) {
+ return Node.getAllocatedTypeSourceInfo();
+}
+inline TypeSourceInfo *
+GetTypeSourceInfo(const ClassTemplateSpecializationDecl &Node) {
+ return Node.getTypeAsWritten();
+}
+
/// Unifies obtaining the FunctionProtoType pointer from both
/// FunctionProtoType and FunctionDecl nodes..
inline const FunctionProtoType *
@@ -541,12 +601,18 @@ public:
/// Convert \c this into a \c Matcher<T> by applying dyn_cast<> to the
/// argument.
/// \c To must be a base class of \c T.
- template <typename To>
- Matcher<To> dynCastTo() const {
+ template <typename To> Matcher<To> dynCastTo() const LLVM_LVALUE_FUNCTION {
static_assert(std::is_base_of<To, T>::value, "Invalid dynCast call.");
return Matcher<To>(Implementation);
}
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename To> Matcher<To> dynCastTo() && {
+ static_assert(std::is_base_of<To, T>::value, "Invalid dynCast call.");
+ return Matcher<To>(std::move(Implementation));
+ }
+#endif
+
/// Forwards the call to the underlying MatcherInterface<T> pointer.
bool matches(const T &Node,
ASTMatchFinder *Finder,
@@ -563,7 +629,13 @@ public:
///
/// The returned matcher keeps the same restrictions as \c this and remembers
/// that it is meant to support nodes of type \c T.
- operator DynTypedMatcher() const { return Implementation; }
+ operator DynTypedMatcher() const LLVM_LVALUE_FUNCTION {
+ return Implementation;
+ }
+
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ operator DynTypedMatcher() && { return std::move(Implementation); }
+#endif
/// Allows the conversion of a \c Matcher<Type> to a \c
/// Matcher<QualType>.
@@ -836,7 +908,7 @@ public:
/// Matches overloaded operators with a specific name.
///
/// The type argument ArgT is not used by this matcher but is used by
-/// PolymorphicMatcherWithParam1 and should be StringRef.
+/// PolymorphicMatcher and should be StringRef.
template <typename T, typename ArgT>
class HasOverloadedOperatorNameMatcher : public SingleNodeMatcherInterface<T> {
static_assert(std::is_same<T, CXXOperatorCallExpr>::value ||
@@ -870,7 +942,7 @@ private:
Names, getOperatorSpelling(Node.getOverloadedOperator()));
}
- const std::vector<std::string> Names;
+ std::vector<std::string> Names;
};
/// Matches named declarations with a specific name.
@@ -904,8 +976,8 @@ class HasNameMatcher : public SingleNodeMatcherInterface<NamedDecl> {
/// It is slower but simple and works on all cases.
bool matchesNodeFullSlow(const NamedDecl &Node) const;
- const bool UseUnqualifiedMatch;
- const std::vector<std::string> Names;
+ bool UseUnqualifiedMatch;
+ std::vector<std::string> Names;
};
/// Trampoline function to use VariadicFunction<> to construct a
@@ -919,14 +991,14 @@ Matcher<ObjCMessageExpr> hasAnySelectorFunc(
/// Matches declarations for QualType and CallExpr.
///
-/// Type argument DeclMatcherT is required by PolymorphicMatcherWithParam1 but
+/// Type argument DeclMatcherT is required by PolymorphicMatcher but
/// not actually used.
template <typename T, typename DeclMatcherT>
class HasDeclarationMatcher : public MatcherInterface<T> {
static_assert(std::is_same<DeclMatcherT, Matcher<Decl>>::value,
"instantiated with wrong types");
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit HasDeclarationMatcher(const Matcher<Decl> &InnerMatcher)
@@ -1108,39 +1180,6 @@ struct IsBaseType {
template <typename T>
const bool IsBaseType<T>::value;
-/// A type-list implementation.
-///
-/// A "linked list" of types, accessible by using the ::head and ::tail
-/// typedefs.
-template <typename... Ts> struct TypeList {}; // Empty sentinel type list.
-
-template <typename T1, typename... Ts> struct TypeList<T1, Ts...> {
- /// The first type on the list.
- using head = T1;
-
- /// A sublist with the tail. ie everything but the head.
- ///
- /// This type is used to do recursion. TypeList<>/EmptyTypeList indicates the
- /// end of the list.
- using tail = TypeList<Ts...>;
-};
-
-/// The empty type list.
-using EmptyTypeList = TypeList<>;
-
-/// Helper meta-function to determine if some type \c T is present or
-/// a parent type in the list.
-template <typename AnyTypeList, typename T>
-struct TypeListContainsSuperOf {
- static const bool value =
- std::is_base_of<typename AnyTypeList::head, T>::value ||
- TypeListContainsSuperOf<typename AnyTypeList::tail, T>::value;
-};
-template <typename T>
-struct TypeListContainsSuperOf<EmptyTypeList, T> {
- static const bool value = false;
-};
-
/// A "type list" that contains all types.
///
/// Useful for matchers like \c anything and \c unless.
@@ -1157,6 +1196,18 @@ template <class T> struct ExtractFunctionArgMeta<void(T)> {
using type = T;
};
+template <class T, class Tuple, std::size_t... I>
+constexpr T *new_from_tuple_impl(Tuple &&t, std::index_sequence<I...>) {
+ return new T(std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class Tuple> constexpr T *new_from_tuple(Tuple &&t) {
+ return new_from_tuple_impl<T>(
+ std::forward<Tuple>(t),
+ std::make_index_sequence<
+ std::tuple_size<std::remove_reference_t<Tuple>>::value>{});
+}
+
/// Default type lists for ArgumentAdaptingMatcher matchers.
using AdaptativeDefaultFromTypes = AllNodeBaseTypes;
using AdaptativeDefaultToTypes =
@@ -1303,20 +1354,36 @@ public:
VariadicOperatorMatcher(DynTypedMatcher::VariadicOperator Op, Ps &&... Params)
: Op(Op), Params(std::forward<Ps>(Params)...) {}
- template <typename T> operator Matcher<T>() const {
+ template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
return DynTypedMatcher::constructVariadic(
Op, ASTNodeKind::getFromNodeKind<T>(),
getMatchers<T>(std::index_sequence_for<Ps...>()))
.template unconditionalConvertTo<T>();
}
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename T> operator Matcher<T>() && {
+ return DynTypedMatcher::constructVariadic(
+ Op, ASTNodeKind::getFromNodeKind<T>(),
+ getMatchers<T>(std::index_sequence_for<Ps...>()))
+ .template unconditionalConvertTo<T>();
+ }
+#endif
private:
// Helper method to unpack the tuple into a vector.
template <typename T, std::size_t... Is>
- std::vector<DynTypedMatcher> getMatchers(std::index_sequence<Is...>) const {
+ std::vector<DynTypedMatcher>
+ getMatchers(std::index_sequence<Is...>) const LLVM_LVALUE_FUNCTION {
return {Matcher<T>(std::get<Is>(Params))...};
}
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename T, std::size_t... Is>
+ std::vector<DynTypedMatcher> getMatchers(std::index_sequence<Is...>) && {
+ return {Matcher<T>(std::get<Is>(std::move(Params)))...};
+ }
+#endif
+
const DynTypedMatcher::VariadicOperator Op;
std::tuple<Ps...> Params;
};
@@ -1374,8 +1441,7 @@ struct MapAnyOfMatcherImpl {
internal::DynTypedMatcher::VO_AnyOf},
applyMatcher(
[&](auto... Matcher) {
- return std::make_tuple(Matcher(
- std::forward<decltype(InnerMatcher)>(InnerMatcher)...)...);
+ return std::make_tuple(Matcher(InnerMatcher...)...);
},
std::tuple<
VariadicDynCastAllOfMatcher<CladeType, MatcherTypes>...>())));
@@ -1406,12 +1472,18 @@ public:
using ReturnTypes = ToTypes;
- template <typename To> operator Matcher<To>() const {
+ template <typename To> operator Matcher<To>() const LLVM_LVALUE_FUNCTION {
return Matcher<To>(new ArgumentAdapterT<To, T>(InnerMatcher));
}
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename To> operator Matcher<To>() && {
+ return Matcher<To>(new ArgumentAdapterT<To, T>(std::move(InnerMatcher)));
+ }
+#endif
+
private:
- const Matcher<T> InnerMatcher;
+ Matcher<T> InnerMatcher;
};
/// Converts a \c Matcher<T> to a matcher of desired type \c To by
@@ -1426,7 +1498,7 @@ private:
/// \c HasMatcher<To, T>(InnerMatcher).
///
/// If a matcher does not need knowledge about the inner type, prefer to use
-/// PolymorphicMatcherWithParam1.
+/// PolymorphicMatcher.
template <template <typename ToArg, typename FromArg> class ArgumentAdapterT,
typename FromTypes = AdaptativeDefaultFromTypes,
typename ToTypes = AdaptativeDefaultToTypes>
@@ -1453,7 +1525,7 @@ struct ArgumentAdaptingMatcherFunc {
};
template <typename T> class TraversalMatcher : public MatcherInterface<T> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
clang::TraversalKind Traversal;
public:
@@ -1479,85 +1551,64 @@ public:
TraversalWrapper(TraversalKind TK, const MatcherType &InnerMatcher)
: TK(TK), InnerMatcher(InnerMatcher) {}
- template <typename T> operator Matcher<T>() const {
+ template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
return internal::DynTypedMatcher::constructRestrictedWrapper(
new internal::TraversalMatcher<T>(TK, InnerMatcher),
ASTNodeKind::getFromNodeKind<T>())
.template unconditionalConvertTo<T>();
}
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename T> operator Matcher<T>() && {
+ return internal::DynTypedMatcher::constructRestrictedWrapper(
+ new internal::TraversalMatcher<T>(TK, std::move(InnerMatcher)),
+ ASTNodeKind::getFromNodeKind<T>())
+ .template unconditionalConvertTo<T>();
+ }
+#endif
+
private:
TraversalKind TK;
MatcherType InnerMatcher;
};
-/// A PolymorphicMatcherWithParamN<MatcherT, P1, ..., PN> object can be
+/// A PolymorphicMatcher<MatcherT, P1, ..., PN> object can be
/// created from N parameters p1, ..., pN (of type P1, ..., PN) and
/// used as a Matcher<T> where a MatcherT<T, P1, ..., PN>(p1, ..., pN)
/// can be constructed.
///
/// For example:
-/// - PolymorphicMatcherWithParam0<IsDefinitionMatcher>()
+/// - PolymorphicMatcher<IsDefinitionMatcher>()
/// creates an object that can be used as a Matcher<T> for any type T
/// where an IsDefinitionMatcher<T>() can be constructed.
-/// - PolymorphicMatcherWithParam1<ValueEqualsMatcher, int>(42)
+/// - PolymorphicMatcher<ValueEqualsMatcher, int>(42)
/// creates an object that can be used as a Matcher<T> for any type T
/// where a ValueEqualsMatcher<T, int>(42) can be constructed.
-template <template <typename T> class MatcherT,
- typename ReturnTypesF = void(AllNodeBaseTypes)>
-class PolymorphicMatcherWithParam0 {
+template <template <typename T, typename... Params> class MatcherT,
+ typename ReturnTypesF, typename... ParamTypes>
+class PolymorphicMatcher {
public:
- using ReturnTypes = typename ExtractFunctionArgMeta<ReturnTypesF>::type;
-
- template <typename T>
- operator Matcher<T>() const {
- static_assert(TypeListContainsSuperOf<ReturnTypes, T>::value,
- "right polymorphic conversion");
- return Matcher<T>(new MatcherT<T>());
- }
-};
-
-template <template <typename T, typename P1> class MatcherT,
- typename P1,
- typename ReturnTypesF = void(AllNodeBaseTypes)>
-class PolymorphicMatcherWithParam1 {
-public:
- explicit PolymorphicMatcherWithParam1(const P1 &Param1)
- : Param1(Param1) {}
+ PolymorphicMatcher(const ParamTypes &... Params) : Params(Params...) {}
using ReturnTypes = typename ExtractFunctionArgMeta<ReturnTypesF>::type;
- template <typename T>
- operator Matcher<T>() const {
+ template <typename T> operator Matcher<T>() const LLVM_LVALUE_FUNCTION {
static_assert(TypeListContainsSuperOf<ReturnTypes, T>::value,
"right polymorphic conversion");
- return Matcher<T>(new MatcherT<T, P1>(Param1));
+ return Matcher<T>(new_from_tuple<MatcherT<T, ParamTypes...>>(Params));
}
-private:
- const P1 Param1;
-};
-
-template <template <typename T, typename P1, typename P2> class MatcherT,
- typename P1, typename P2,
- typename ReturnTypesF = void(AllNodeBaseTypes)>
-class PolymorphicMatcherWithParam2 {
-public:
- PolymorphicMatcherWithParam2(const P1 &Param1, const P2 &Param2)
- : Param1(Param1), Param2(Param2) {}
-
- using ReturnTypes = typename ExtractFunctionArgMeta<ReturnTypesF>::type;
-
- template <typename T>
- operator Matcher<T>() const {
+#if LLVM_HAS_RVALUE_REFERENCE_THIS
+ template <typename T> operator Matcher<T>() && {
static_assert(TypeListContainsSuperOf<ReturnTypes, T>::value,
"right polymorphic conversion");
- return Matcher<T>(new MatcherT<T, P1, P2>(Param1, Param2));
+ return Matcher<T>(
+ new_from_tuple<MatcherT<T, ParamTypes...>>(std::move(Params)));
}
+#endif
private:
- const P1 Param1;
- const P2 Param2;
+ std::tuple<ParamTypes...> Params;
};
/// Matches nodes of type T that have child nodes of type ChildT for
@@ -1566,7 +1617,7 @@ private:
/// ChildT must be an AST base type.
template <typename T, typename ChildT>
class HasMatcher : public MatcherInterface<T> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit HasMatcher(const Matcher<ChildT> &InnerMatcher)
@@ -1589,7 +1640,7 @@ class ForEachMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<ChildT>::value,
"for each only accepts base type matcher");
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit ForEachMatcher(const Matcher<ChildT> &InnerMatcher)
@@ -1619,7 +1670,7 @@ class HasDescendantMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<DescendantT>::value,
"has descendant only accepts base type matcher");
- const DynTypedMatcher DescendantMatcher;
+ DynTypedMatcher DescendantMatcher;
public:
explicit HasDescendantMatcher(const Matcher<DescendantT> &DescendantMatcher)
@@ -1641,7 +1692,7 @@ class HasParentMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<ParentT>::value,
"has parent only accepts base type matcher");
- const DynTypedMatcher ParentMatcher;
+ DynTypedMatcher ParentMatcher;
public:
explicit HasParentMatcher(const Matcher<ParentT> &ParentMatcher)
@@ -1663,7 +1714,7 @@ class HasAncestorMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<AncestorT>::value,
"has ancestor only accepts base type matcher");
- const DynTypedMatcher AncestorMatcher;
+ DynTypedMatcher AncestorMatcher;
public:
explicit HasAncestorMatcher(const Matcher<AncestorT> &AncestorMatcher)
@@ -1687,7 +1738,7 @@ class ForEachDescendantMatcher : public MatcherInterface<T> {
static_assert(IsBaseType<DescendantT>::value,
"for each descendant only accepts base type matcher");
- const DynTypedMatcher DescendantMatcher;
+ DynTypedMatcher DescendantMatcher;
public:
explicit ForEachDescendantMatcher(
@@ -1720,7 +1771,7 @@ public:
}
private:
- const ValueT ExpectedValue;
+ ValueT ExpectedValue;
};
/// Template specializations to easily write matchers for floating point
@@ -1753,7 +1804,7 @@ inline bool ValueEqualsMatcher<FloatingLiteral, llvm::APFloat>::matchesNode(
/// \c Matcher<T> matches.
template <typename TLoc, typename T>
class LocMatcher : public MatcherInterface<TLoc> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit LocMatcher(const Matcher<T> &InnerMatcher)
@@ -1777,7 +1828,7 @@ private:
///
/// Used to implement the \c loc() matcher.
class TypeLocTypeMatcher : public MatcherInterface<TypeLoc> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit TypeLocTypeMatcher(const Matcher<QualType> &InnerMatcher)
@@ -1796,7 +1847,7 @@ public:
/// another node of type \c T that can be reached using a given traverse
/// function.
template <typename T> class TypeTraverseMatcher : public MatcherInterface<T> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit TypeTraverseMatcher(const Matcher<QualType> &InnerMatcher,
@@ -1821,7 +1872,7 @@ private:
/// given traverse function.
template <typename T>
class TypeLocTraverseMatcher : public MatcherInterface<T> {
- const DynTypedMatcher InnerMatcher;
+ DynTypedMatcher InnerMatcher;
public:
explicit TypeLocTraverseMatcher(const Matcher<TypeLoc> &InnerMatcher,
@@ -1876,7 +1927,7 @@ public:
};
private:
- const Matcher<InnerTBase> InnerMatcher;
+ Matcher<InnerTBase> InnerMatcher;
};
/// A simple memoizer of T(*)() functions.
@@ -2039,7 +2090,8 @@ equivalentUnaryOperator(const NodeType &Node) {
template <>
inline Optional<UnaryOperatorKind>
equivalentUnaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
- if (Node.getNumArgs() != 1)
+ if (Node.getNumArgs() != 1 && Node.getOperator() != OO_PlusPlus &&
+ Node.getOperator() != OO_MinusMinus)
return None;
switch (Node.getOperator()) {
default:
@@ -2050,6 +2102,8 @@ equivalentUnaryOperator<CXXOperatorCallExpr>(const CXXOperatorCallExpr &Node) {
return UO_Minus;
case OO_Amp:
return UO_AddrOf;
+ case OO_Star:
+ return UO_Deref;
case OO_Tilde:
return UO_Not;
case OO_Exclaim:
@@ -2173,7 +2227,7 @@ inline Optional<StringRef> getOpName(const CXXOperatorCallExpr &Node) {
/// Matches overloaded operators with a specific name.
///
/// The type argument ArgT is not used by this matcher but is used by
-/// PolymorphicMatcherWithParam1 and should be std::vector<std::string>>.
+/// PolymorphicMatcher and should be std::vector<std::string>>.
template <typename T, typename ArgT = std::vector<std::string>>
class HasAnyOperatorNameMatcher : public SingleNodeMatcherInterface<T> {
static_assert(std::is_same<T, BinaryOperator>::value ||
@@ -2219,19 +2273,22 @@ private:
return BinaryOperator::getOpcodeStr(*optBinaryOpcode);
}
- const std::vector<std::string> Names;
+ std::vector<std::string> Names;
};
-using HasOpNameMatcher = PolymorphicMatcherWithParam1<
- HasAnyOperatorNameMatcher, std::vector<std::string>,
- void(TypeList<BinaryOperator, CXXOperatorCallExpr,
- CXXRewrittenBinaryOperator, UnaryOperator>)>;
+using HasOpNameMatcher =
+ PolymorphicMatcher<HasAnyOperatorNameMatcher,
+ void(
+ TypeList<BinaryOperator, CXXOperatorCallExpr,
+ CXXRewrittenBinaryOperator, UnaryOperator>),
+ std::vector<std::string>>;
HasOpNameMatcher hasAnyOperatorNameFunc(ArrayRef<const StringRef *> NameRefs);
-using HasOverloadOpNameMatcher = PolymorphicMatcherWithParam1<
- HasOverloadedOperatorNameMatcher, std::vector<std::string>,
- void(TypeList<CXXOperatorCallExpr, FunctionDecl>)>;
+using HasOverloadOpNameMatcher =
+ PolymorphicMatcher<HasOverloadedOperatorNameMatcher,
+ void(TypeList<CXXOperatorCallExpr, FunctionDecl>),
+ std::vector<std::string>>;
HasOverloadOpNameMatcher
hasAnyOverloadedOperatorNameFunc(ArrayRef<const StringRef *> NameRefs);
diff --git a/clang/include/clang/ASTMatchers/ASTMatchersMacros.h b/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
index 7e01ab0c6ee9..592a3898a295 100644
--- a/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
+++ b/clang/include/clang/ASTMatchers/ASTMatchersMacros.h
@@ -143,7 +143,7 @@
*Builder) const override; \
\
private: \
- ParamType const Param; \
+ ParamType Param; \
}; \
} \
inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
@@ -151,7 +151,7 @@
return ::clang::ast_matchers::internal::makeMatcher( \
new internal::matcher_##DefineMatcher##OverloadId##Matcher(Param)); \
} \
- typedef ::clang::ast_matchers::internal::Matcher<Type>( \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
&DefineMatcher##_Type##OverloadId)(ParamType const &Param); \
inline bool internal::matcher_##DefineMatcher##OverloadId##Matcher::matches( \
const Type &Node, \
@@ -192,8 +192,8 @@
*Builder) const override; \
\
private: \
- ParamType1 const Param1; \
- ParamType2 const Param2; \
+ ParamType1 Param1; \
+ ParamType2 Param2; \
}; \
} \
inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
@@ -202,7 +202,7 @@
new internal::matcher_##DefineMatcher##OverloadId##Matcher(Param1, \
Param2)); \
} \
- typedef ::clang::ast_matchers::internal::Matcher<Type>( \
+ typedef ::clang::ast_matchers::internal::Matcher<Type> ( \
&DefineMatcher##_Type##OverloadId)(ParamType1 const &Param1, \
ParamType2 const &Param2); \
inline bool internal::matcher_##DefineMatcher##OverloadId##Matcher::matches( \
@@ -239,10 +239,10 @@
*Builder) const override; \
}; \
} \
- inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam0< \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcher< \
internal::matcher_##DefineMatcher##Matcher, ReturnTypesF> \
DefineMatcher() { \
- return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam0< \
+ return ::clang::ast_matchers::internal::PolymorphicMatcher< \
internal::matcher_##DefineMatcher##Matcher, ReturnTypesF>(); \
} \
template <typename NodeType> \
@@ -281,21 +281,20 @@
*Builder) const override; \
\
private: \
- ParamType const Param; \
+ ParamType Param; \
}; \
} \
- inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
- ReturnTypesF> \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType> \
DefineMatcher(ParamType const &Param) { \
- return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
- ReturnTypesF>(Param); \
+ return ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType>(Param); \
} \
- typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType, \
- ReturnTypesF>(&DefineMatcher##_Type##OverloadId)( \
- ParamType const &Param); \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType> (&DefineMatcher##_Type##OverloadId)(ParamType const &Param); \
template <typename NodeType, typename ParamT> \
bool internal:: \
matcher_##DefineMatcher##OverloadId##Matcher<NodeType, ParamT>::matches( \
@@ -334,21 +333,21 @@
*Builder) const override; \
\
private: \
- ParamType1 const Param1; \
- ParamType2 const Param2; \
+ ParamType1 Param1; \
+ ParamType2 Param2; \
}; \
} \
- inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
- ParamType2, ReturnTypesF> \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType1, ParamType2> \
DefineMatcher(ParamType1 const &Param1, ParamType2 const &Param2) { \
- return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
- ParamType2, ReturnTypesF>(Param1, Param2); \
+ return ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType1, ParamType2>(Param1, Param2); \
} \
- typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam2< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, ParamType1, \
- ParamType2, ReturnTypesF>(&DefineMatcher##_Type##OverloadId)( \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ ParamType1, ParamType2> (&DefineMatcher##_Type##OverloadId)( \
ParamType1 const &Param1, ParamType2 const &Param2); \
template <typename NodeType, typename ParamT1, typename ParamT2> \
bool internal::matcher_##DefineMatcher##OverloadId##Matcher< \
@@ -470,7 +469,7 @@
*Builder) const override; \
\
private: \
- std::shared_ptr<llvm::Regex> const Param; \
+ std::shared_ptr<llvm::Regex> Param; \
}; \
} \
inline ::clang::ast_matchers::internal::Matcher<Type> DefineMatcher( \
@@ -522,34 +521,34 @@
*Builder) const override; \
\
private: \
- std::shared_ptr<llvm::Regex> const Param; \
+ std::shared_ptr<llvm::Regex> Param; \
}; \
} \
- inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, \
- std::shared_ptr<llvm::Regex>, ReturnTypesF> \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ std::shared_ptr<llvm::Regex>> \
DefineMatcher(llvm::StringRef Param, llvm::Regex::RegexFlags RegexFlags) { \
- return ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, \
- std::shared_ptr<llvm::Regex>, ReturnTypesF>( \
+ return ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ std::shared_ptr<llvm::Regex>>( \
::clang::ast_matchers::internal::createAndVerifyRegex( \
Param, RegexFlags, #DefineMatcher)); \
} \
- inline ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, \
- std::shared_ptr<llvm::Regex>, ReturnTypesF> \
+ inline ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ std::shared_ptr<llvm::Regex>> \
DefineMatcher(llvm::StringRef Param) { \
return DefineMatcher(Param, llvm::Regex::NoFlags); \
} \
- typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, \
- std::shared_ptr<llvm::Regex>, ReturnTypesF> ( \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ std::shared_ptr<llvm::Regex>> ( \
&DefineMatcher##_Type##OverloadId##Flags)( \
llvm::StringRef Param, llvm::Regex::RegexFlags RegexFlags); \
- typedef ::clang::ast_matchers::internal::PolymorphicMatcherWithParam1< \
- internal::matcher_##DefineMatcher##OverloadId##Matcher, \
- std::shared_ptr<llvm::Regex>, ReturnTypesF> ( \
- &DefineMatcher##_Type##OverloadId)(llvm::StringRef Param); \
+ typedef ::clang::ast_matchers::internal::PolymorphicMatcher< \
+ internal::matcher_##DefineMatcher##OverloadId##Matcher, ReturnTypesF, \
+ std::shared_ptr<llvm::Regex>> (&DefineMatcher##_Type##OverloadId)( \
+ llvm::StringRef Param); \
template <typename NodeType, typename ParamT> \
bool internal:: \
matcher_##DefineMatcher##OverloadId##Matcher<NodeType, ParamT>::matches( \
diff --git a/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h b/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
index f095dcdd60b0..10625311c1a5 100644
--- a/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
+++ b/clang/include/clang/ASTMatchers/Dynamic/Diagnostics.h
@@ -66,6 +66,8 @@ public:
ET_RegistryAmbiguousOverload = 5,
ET_RegistryValueNotFound = 6,
ET_RegistryUnknownEnumWithReplace = 7,
+ ET_RegistryNonNodeMatcher = 8,
+ ET_RegistryMatcherNoWithSupport = 9,
ET_ParserStringError = 100,
ET_ParserNoOpenParen = 101,
@@ -77,7 +79,9 @@ public:
ET_ParserMalformedBindExpr = 107,
ET_ParserTrailingCode = 108,
ET_ParserNumberError = 109,
- ET_ParserOverloadedType = 110
+ ET_ParserOverloadedType = 110,
+ ET_ParserMalformedChainedExpr = 111,
+ ET_ParserFailedToBuildMatcher = 112
};
/// Helper stream class.
diff --git a/clang/include/clang/ASTMatchers/Dynamic/Parser.h b/clang/include/clang/ASTMatchers/Dynamic/Parser.h
index 70bbe816accd..af370d83782a 100644
--- a/clang/include/clang/ASTMatchers/Dynamic/Parser.h
+++ b/clang/include/clang/ASTMatchers/Dynamic/Parser.h
@@ -100,6 +100,14 @@ public:
virtual llvm::Optional<MatcherCtor>
lookupMatcherCtor(StringRef MatcherName) = 0;
+ virtual bool isBuilderMatcher(MatcherCtor) const = 0;
+
+ virtual ASTNodeKind nodeMatcherType(MatcherCtor) const = 0;
+
+ virtual internal::MatcherDescriptorPtr
+ buildMatcherCtor(MatcherCtor, SourceRange NameRange,
+ ArrayRef<ParserValue> Args, Diagnostics *Error) const = 0;
+
/// Compute the list of completion types for \p Context.
///
/// Each element of \p Context represents a matcher invocation, going from
@@ -142,6 +150,15 @@ public:
std::vector<ArgKind> getAcceptedCompletionTypes(
llvm::ArrayRef<std::pair<MatcherCtor, unsigned>> Context) override;
+ bool isBuilderMatcher(MatcherCtor Ctor) const override;
+
+ ASTNodeKind nodeMatcherType(MatcherCtor) const override;
+
+ internal::MatcherDescriptorPtr
+ buildMatcherCtor(MatcherCtor, SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const override;
+
std::vector<MatcherCompletion>
getMatcherCompletions(llvm::ArrayRef<ArgKind> AcceptedTypes) override;
};
@@ -233,7 +250,11 @@ private:
bool parseBindID(std::string &BindID);
bool parseExpressionImpl(VariantValue *Value);
+ bool parseMatcherBuilder(MatcherCtor Ctor, const TokenInfo &NameToken,
+ const TokenInfo &OpenToken, VariantValue *Value);
bool parseMatcherExpressionImpl(const TokenInfo &NameToken,
+ const TokenInfo &OpenToken,
+ llvm::Optional<MatcherCtor> Ctor,
VariantValue *Value);
bool parseIdentifierPrefixImpl(VariantValue *Value);
diff --git a/clang/include/clang/ASTMatchers/Dynamic/Registry.h b/clang/include/clang/ASTMatchers/Dynamic/Registry.h
index 215206b2f50c..f91f5fe01c4e 100644
--- a/clang/include/clang/ASTMatchers/Dynamic/Registry.h
+++ b/clang/include/clang/ASTMatchers/Dynamic/Registry.h
@@ -33,6 +33,23 @@ namespace internal {
class MatcherDescriptor;
+/// A smart (owning) pointer for MatcherDescriptor. We can't use unique_ptr
+/// because MatcherDescriptor is forward declared
+class MatcherDescriptorPtr {
+public:
+ explicit MatcherDescriptorPtr(MatcherDescriptor *);
+ ~MatcherDescriptorPtr();
+ MatcherDescriptorPtr(MatcherDescriptorPtr &&) = default;
+ MatcherDescriptorPtr &operator=(MatcherDescriptorPtr &&) = default;
+ MatcherDescriptorPtr(const MatcherDescriptorPtr &) = delete;
+ MatcherDescriptorPtr &operator=(const MatcherDescriptorPtr &) = delete;
+
+ MatcherDescriptor *get() { return Ptr; }
+
+private:
+ MatcherDescriptor *Ptr;
+};
+
} // namespace internal
using MatcherCtor = const internal::MatcherDescriptor *;
@@ -66,6 +83,14 @@ class Registry {
public:
Registry() = delete;
+ static ASTNodeKind nodeMatcherType(MatcherCtor);
+
+ static bool isBuilderMatcher(MatcherCtor Ctor);
+
+ static internal::MatcherDescriptorPtr
+ buildMatcherCtor(MatcherCtor, SourceRange NameRange,
+ ArrayRef<ParserValue> Args, Diagnostics *Error);
+
/// Look up a matcher in the registry by name,
///
/// \return An opaque value which may be used to refer to the matcher
diff --git a/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h b/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
index fa033f49bc90..5b3f8a7ca5eb 100644
--- a/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
+++ b/clang/include/clang/ASTMatchers/Dynamic/VariantValue.h
@@ -35,6 +35,7 @@ class ArgKind {
public:
enum Kind {
AK_Matcher,
+ AK_Node,
AK_Boolean,
AK_Double,
AK_Unsigned,
@@ -44,12 +45,22 @@ class ArgKind {
ArgKind(Kind K) : K(K) { assert(K != AK_Matcher); }
/// Constructor for matcher types.
- ArgKind(ASTNodeKind MatcherKind) : K(AK_Matcher), MatcherKind(MatcherKind) {}
+ static ArgKind MakeMatcherArg(ASTNodeKind MatcherKind) {
+ return ArgKind{AK_Matcher, MatcherKind};
+ }
+
+ static ArgKind MakeNodeArg(ASTNodeKind MatcherKind) {
+ return ArgKind{AK_Node, MatcherKind};
+ }
Kind getArgKind() const { return K; }
ASTNodeKind getMatcherKind() const {
assert(K == AK_Matcher);
- return MatcherKind;
+ return NodeKind;
+ }
+ ASTNodeKind getNodeKind() const {
+ assert(K == AK_Node);
+ return NodeKind;
}
/// Determines if this type can be converted to \p To.
@@ -61,8 +72,9 @@ class ArgKind {
bool isConvertibleTo(ArgKind To, unsigned *Specificity) const;
bool operator<(const ArgKind &Other) const {
- if (K == AK_Matcher && Other.K == AK_Matcher)
- return MatcherKind < Other.MatcherKind;
+ if ((K == AK_Matcher && Other.K == AK_Matcher) ||
+ (K == AK_Node && Other.K == AK_Node))
+ return NodeKind < Other.NodeKind;
return K < Other.K;
}
@@ -70,8 +82,9 @@ class ArgKind {
std::string asString() const;
private:
+ ArgKind(Kind K, ASTNodeKind NK) : K(K), NodeKind(NK) {}
Kind K;
- ASTNodeKind MatcherKind;
+ ASTNodeKind NodeKind;
};
using ast_matchers::internal::DynTypedMatcher;
diff --git a/clang/include/clang/ASTMatchers/GtestMatchers.h b/clang/include/clang/ASTMatchers/GtestMatchers.h
index 4f8addcf744a..e19d91a674f2 100644
--- a/clang/include/clang/ASTMatchers/GtestMatchers.h
+++ b/clang/include/clang/ASTMatchers/GtestMatchers.h
@@ -16,6 +16,7 @@
#include "clang/AST/Stmt.h"
#include "clang/ASTMatchers/ASTMatchers.h"
+#include "llvm/ADT/StringRef.h"
namespace clang {
namespace ast_matchers {
@@ -30,14 +31,55 @@ enum class GtestCmp {
Lt,
};
-/// Matcher for gtest's ASSERT_... macros.
+/// This enum indicates whether the mock method in the matched ON_CALL or
+/// EXPECT_CALL macro has arguments. For example, `None` can be used to match
+/// `ON_CALL(mock, TwoParamMethod)` whereas `Some` can be used to match
+/// `ON_CALL(mock, TwoParamMethod(m1, m2))`.
+enum class MockArgs {
+ None,
+ Some,
+};
+
+/// Matcher for gtest's ASSERT comparison macros including ASSERT_EQ, ASSERT_NE,
+/// ASSERT_GE, ASSERT_GT, ASSERT_LE and ASSERT_LT.
internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right);
-/// Matcher for gtest's EXPECT_... macros.
+/// Matcher for gtest's ASSERT_THAT macro.
+internal::BindableMatcher<Stmt> gtestAssertThat(StatementMatcher Actual,
+ StatementMatcher Matcher);
+
+/// Matcher for gtest's EXPECT comparison macros including EXPECT_EQ, EXPECT_NE,
+/// EXPECT_GE, EXPECT_GT, EXPECT_LE and EXPECT_LT.
internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right);
+/// Matcher for gtest's EXPECT_THAT macro.
+internal::BindableMatcher<Stmt> gtestExpectThat(StatementMatcher Actual,
+ StatementMatcher Matcher);
+
+/// Matcher for gtest's EXPECT_CALL macro. `MockObject` matches the mock
+/// object and `MockMethodName` is the name of the method invoked on the mock
+/// object.
+internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockObject,
+ llvm::StringRef MockMethodName,
+ MockArgs Args);
+
+/// Matcher for gtest's EXPECT_CALL macro. `MockCall` matches the whole mock
+/// member method call. This API is more flexible but requires more knowledge of
+/// the AST structure of EXPECT_CALL macros.
+internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockCall,
+ MockArgs Args);
+
+/// Like the first `gtestExpectCall` overload but for `ON_CALL`.
+internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockObject,
+ llvm::StringRef MockMethodName,
+ MockArgs Args);
+
+/// Like the second `gtestExpectCall` overload but for `ON_CALL`.
+internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockCall,
+ MockArgs Args);
+
} // namespace ast_matchers
} // namespace clang
diff --git a/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h b/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
index fc574c680a44..a0c767bf92d2 100644
--- a/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
+++ b/clang/include/clang/Analysis/Analyses/CalledOnceCheck.h
@@ -17,6 +17,7 @@
namespace clang {
class AnalysisDeclContext;
+class BlockDecl;
class CFG;
class Decl;
class DeclContext;
@@ -79,6 +80,7 @@ public:
/// the path containing the call and not containing the call. This helps us
/// to pinpoint a bad path for the user.
/// \param Parameter -- parameter that should be called once.
+ /// \param Function -- function declaration where the problem occured.
/// \param Where -- the least common ancestor statement.
/// \param Reason -- a reason describing the path without a call.
/// \param IsCalledDirectly -- true, if parameter actually gets called on
@@ -86,9 +88,22 @@ public:
/// collection, passed as a parameter, etc.).
/// \param IsCompletionHandler -- true, if parameter is a completion handler.
virtual void handleNeverCalled(const ParmVarDecl *Parameter,
- const Stmt *Where, NeverCalledReason Reason,
+ const Decl *Function, const Stmt *Where,
+ NeverCalledReason Reason,
bool IsCalledDirectly,
bool IsCompletionHandler) {}
+
+ /// Called when the block is guaranteed to be called exactly once.
+ /// It means that we can be stricter with what we report on that block.
+ /// \param Block -- block declaration that is known to be called exactly once.
+ virtual void
+ handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl *Block) {}
+
+ /// Called when the block has no guarantees about how many times it can get
+ /// called.
+ /// It means that we should be more lenient with reporting warnings in it.
+ /// \param Block -- block declaration in question.
+ virtual void handleBlockWithNoGuarantees(const BlockDecl *Block) {}
};
/// Check given CFG for 'called once' parameter violations.
diff --git a/clang/include/clang/Analysis/AnalysisDeclContext.h b/clang/include/clang/Analysis/AnalysisDeclContext.h
index d12582f4f329..102970a1d55e 100644
--- a/clang/include/clang/Analysis/AnalysisDeclContext.h
+++ b/clang/include/clang/Analysis/AnalysisDeclContext.h
@@ -200,6 +200,8 @@ public:
/// \returns Whether the root namespace of \p D is the \c std C++ namespace.
static bool isInStdNamespace(const Decl *D);
+ static std::string getFunctionName(const Decl *D);
+
private:
std::unique_ptr<ManagedAnalysis> &getAnalysisImpl(const void *tag);
diff --git a/clang/include/clang/Analysis/AnyCall.h b/clang/include/clang/Analysis/AnyCall.h
index 16371eb1da18..846ff7719ce1 100644
--- a/clang/include/clang/Analysis/AnyCall.h
+++ b/clang/include/clang/Analysis/AnyCall.h
@@ -107,8 +107,8 @@ public:
}
- /// If {@code E} is a generic call (to ObjC method /function/block/etc),
- /// return a constructed {@code AnyCall} object. Return None otherwise.
+ /// If @c E is a generic call (to ObjC method /function/block/etc),
+ /// return a constructed @c AnyCall object. Return None otherwise.
static Optional<AnyCall> forExpr(const Expr *E) {
if (const auto *ME = dyn_cast<ObjCMessageExpr>(E)) {
return AnyCall(ME);
@@ -127,8 +127,8 @@ public:
}
}
- /// If {@code D} is a callable (Objective-C method or a function), return
- /// a constructed {@code AnyCall} object. Return None otherwise.
+ /// If @c D is a callable (Objective-C method or a function), return
+ /// a constructed @c AnyCall object. Return None otherwise.
// FIXME: block support.
static Optional<AnyCall> forDecl(const Decl *D) {
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
@@ -186,7 +186,7 @@ public:
}
/// \returns Function identifier if it is a named declaration,
- /// {@code nullptr} otherwise.
+ /// @c nullptr otherwise.
const IdentifierInfo *getIdentifier() const {
if (const auto *ND = dyn_cast_or_null<NamedDecl>(D))
return ND->getIdentifier();
diff --git a/clang/include/clang/Analysis/CFG.h b/clang/include/clang/Analysis/CFG.h
index 43fb523c863a..9e32eb8e066a 100644
--- a/clang/include/clang/Analysis/CFG.h
+++ b/clang/include/clang/Analysis/CFG.h
@@ -1307,6 +1307,12 @@ public:
iterator nodes_begin() { return iterator(Blocks.begin()); }
iterator nodes_end() { return iterator(Blocks.end()); }
+
+ llvm::iterator_range<iterator> nodes() { return {begin(), end()}; }
+ llvm::iterator_range<const_iterator> const_nodes() const {
+ return {begin(), end()};
+ }
+
const_iterator nodes_begin() const { return const_iterator(Blocks.begin()); }
const_iterator nodes_end() const { return const_iterator(Blocks.end()); }
@@ -1315,6 +1321,13 @@ public:
const_reverse_iterator rbegin() const { return Blocks.rbegin(); }
const_reverse_iterator rend() const { return Blocks.rend(); }
+ llvm::iterator_range<reverse_iterator> reverse_nodes() {
+ return {rbegin(), rend()};
+ }
+ llvm::iterator_range<const_reverse_iterator> const_reverse_nodes() const {
+ return {rbegin(), rend()};
+ }
+
CFGBlock & getEntry() { return *Entry; }
const CFGBlock & getEntry() const { return *Entry; }
CFGBlock & getExit() { return *Exit; }
@@ -1376,13 +1389,12 @@ public:
// Member templates useful for various batch operations over CFGs.
//===--------------------------------------------------------------------===//
- template <typename CALLBACK>
- void VisitBlockStmts(CALLBACK& O) const {
+ template <typename Callback> void VisitBlockStmts(Callback &O) const {
for (const_iterator I = begin(), E = end(); I != E; ++I)
for (CFGBlock::const_iterator BI = (*I)->begin(), BE = (*I)->end();
BI != BE; ++BI) {
if (Optional<CFGStmt> stmt = BI->getAs<CFGStmt>())
- O(const_cast<Stmt*>(stmt->getStmt()));
+ O(const_cast<Stmt *>(stmt->getStmt()));
}
}
diff --git a/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h b/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
index 68d935c6a400..ab96cd5169a2 100644
--- a/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
+++ b/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
@@ -19,13 +19,14 @@
#include "clang/Analysis/ProgramPoint.h"
#include "llvm/ADT/DenseMap.h"
+namespace clang {
+
//===----------------------------------------------------------------------===//
/// Dataflow Directional Tag Classes. These are used for tag dispatching
/// within the dataflow solver/transfer functions to determine what direction
/// a dataflow analysis flows.
//===----------------------------------------------------------------------===//
-namespace clang {
namespace dataflow {
struct forward_analysis_tag {};
struct backward_analysis_tag {};
diff --git a/clang/include/clang/Analysis/MacroExpansionContext.h b/clang/include/clang/Analysis/MacroExpansionContext.h
new file mode 100644
index 000000000000..57934bfc09d9
--- /dev/null
+++ b/clang/include/clang/Analysis/MacroExpansionContext.h
@@ -0,0 +1,127 @@
+//===- MacroExpansionContext.h - Macro expansion information ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_MACROEXPANSIONCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_MACROEXPANSIONCONTEXT_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+namespace detail {
+class MacroExpansionRangeRecorder;
+} // namespace detail
+
+/// MacroExpansionContext tracks the macro expansions processed by the
+/// Preprocessor. It means that it can track source locations from a single
+/// translation unit. For every macro expansion it can tell you what text will
+/// be substituted.
+///
+/// It was designed to deal with:
+/// - regular macros
+/// - macro functions
+/// - variadic macros
+/// - transitive macro expansions
+/// - macro redefinition
+/// - unbalanced parenthesis
+///
+/// \code{.c}
+/// void bar();
+/// #define retArg(x) x
+/// #define retArgUnclosed retArg(bar()
+/// #define BB CC
+/// #define applyInt BB(int)
+/// #define CC(x) retArgUnclosed
+///
+/// void unbalancedMacros() {
+/// applyInt );
+/// //^~~~~~~~~~^ is the substituted range
+/// // Substituted text is "applyInt )"
+/// // Expanded text is "bar()"
+/// }
+///
+/// #define expandArgUnclosedCommaExpr(x) (x, bar(), 1
+/// #define f expandArgUnclosedCommaExpr
+///
+/// void unbalancedMacros2() {
+/// int x = f(f(1)) )); // Look at the parenthesis!
+/// // ^~~~~~^ is the substituted range
+/// // Substituted text is "f(f(1))"
+/// // Expanded text is "((1,bar(),1,bar(),1"
+/// }
+/// \endcode
+/// \remark Currently we don't respect the whitespaces between expanded tokens,
+/// so the output for this example might differ from the -E compiler
+/// invocation.
+/// \remark All whitespaces are consumed while constructing the expansion.
+/// After all identifier a single space inserted to produce a valid C
+/// code even if identifier follows an other identifiers such as
+/// variable declarations.
+/// \remark MacroExpansionContext object must outlive the Preprocessor
+/// parameter.
+class MacroExpansionContext {
+public:
+ /// Creates a MacroExpansionContext.
+ /// \remark You must call registerForPreprocessor to set the required
+ /// onTokenLexed callback and the PPCallbacks.
+ explicit MacroExpansionContext(const LangOptions &LangOpts);
+
+ /// Register the necessary callbacks to the Preprocessor to record the
+ /// expansion events and the generated tokens. Must ensure that this object
+ /// outlives the given Preprocessor.
+ void registerForPreprocessor(Preprocessor &PP);
+
+ /// \param MacroExpansionLoc Must be the expansion location of a macro.
+ /// \return The textual representation of the token sequence which was
+ /// substituted in place of the macro after the preprocessing.
+ /// If no macro was expanded at that location, returns llvm::None.
+ Optional<StringRef> getExpandedText(SourceLocation MacroExpansionLoc) const;
+
+ /// \param MacroExpansionLoc Must be the expansion location of a macro.
+ /// \return The text from the original source code which were substituted by
+ /// the macro expansion chain from the given location.
+ /// If no macro was expanded at that location, returns llvm::None.
+ Optional<StringRef> getOriginalText(SourceLocation MacroExpansionLoc) const;
+
+ LLVM_DUMP_METHOD void dumpExpansionRangesToStream(raw_ostream &OS) const;
+ LLVM_DUMP_METHOD void dumpExpandedTextsToStream(raw_ostream &OS) const;
+ LLVM_DUMP_METHOD void dumpExpansionRanges() const;
+ LLVM_DUMP_METHOD void dumpExpandedTexts() const;
+
+private:
+ friend class detail::MacroExpansionRangeRecorder;
+ using MacroExpansionText = SmallString<40>;
+ using ExpansionMap = llvm::DenseMap<SourceLocation, MacroExpansionText>;
+ using ExpansionRangeMap = llvm::DenseMap<SourceLocation, SourceLocation>;
+
+ /// Associates the textual representation of the expanded tokens at the given
+ /// macro expansion location.
+ ExpansionMap ExpandedTokens;
+
+ /// Tracks which source location was the last affected by any macro
+ /// substitution starting from a given macro expansion location.
+ ExpansionRangeMap ExpansionRanges;
+
+ Preprocessor *PP = nullptr;
+ SourceManager *SM = nullptr;
+ const LangOptions &LangOpts;
+
+ /// This callback is called by the preprocessor.
+ /// It stores the textual representation of the expanded token sequence for a
+ /// macro expansion location.
+ void onTokenLexed(const Token &Tok);
+};
+} // end namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_MACROEXPANSIONCONTEXT_H
diff --git a/clang/include/clang/Analysis/RetainSummaryManager.h b/clang/include/clang/Analysis/RetainSummaryManager.h
index 6acefb563d8c..b7ccb0317830 100644
--- a/clang/include/clang/Analysis/RetainSummaryManager.h
+++ b/clang/include/clang/Analysis/RetainSummaryManager.h
@@ -613,8 +613,8 @@ class RetainSummaryManager {
const FunctionType *FT,
bool &AllowAnnotations);
- /// Apply the annotation of {@code pd} in function {@code FD}
- /// to the resulting summary stored in out-parameter {@code Template}.
+ /// Apply the annotation of @c pd in function @c FD
+ /// to the resulting summary stored in out-parameter @c Template.
/// \return whether an annotation was applied.
bool applyParamAnnotationEffect(const ParmVarDecl *pd, unsigned parm_idx,
const NamedDecl *FD,
@@ -715,8 +715,8 @@ private:
/// Set argument types for arguments which are not doing anything.
void updateSummaryForArgumentTypes(const AnyCall &C, const RetainSummary *&RS);
- /// Determine whether a declaration {@code D} of correspondent type (return
- /// type for functions/methods) {@code QT} has any of the given attributes,
+ /// Determine whether a declaration @c D of correspondent type (return
+ /// type for functions/methods) @c QT has any of the given attributes,
/// provided they pass necessary validation checks AND tracking the given
/// attribute is enabled.
/// Returns the object kind corresponding to the present attribute, or None,
diff --git a/clang/include/clang/Basic/ABI.h b/clang/include/clang/Basic/ABI.h
index 2401ffa20494..231bad799a42 100644
--- a/clang/include/clang/Basic/ABI.h
+++ b/clang/include/clang/Basic/ABI.h
@@ -37,174 +37,6 @@ enum CXXDtorType {
Dtor_Comdat ///< The COMDAT used for dtors
};
-/// A return adjustment.
-struct ReturnAdjustment {
- /// The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// Holds the ABI-specific information about the virtual return
- /// adjustment, if needed.
- union VirtualAdjustment {
- // Itanium ABI
- struct {
- /// The offset (in bytes), relative to the address point
- /// of the virtual base class offset.
- int64_t VBaseOffsetOffset;
- } Itanium;
-
- // Microsoft ABI
- struct {
- /// The offset (in bytes) of the vbptr, relative to the beginning
- /// of the derived class.
- uint32_t VBPtrOffset;
-
- /// Index of the virtual base in the vbtable.
- uint32_t VBIndex;
- } Microsoft;
-
- VirtualAdjustment() {
- memset(this, 0, sizeof(*this));
- }
-
- bool Equals(const VirtualAdjustment &Other) const {
- return memcmp(this, &Other, sizeof(Other)) == 0;
- }
-
- bool isEmpty() const {
- VirtualAdjustment Zero;
- return Equals(Zero);
- }
-
- bool Less(const VirtualAdjustment &RHS) const {
- return memcmp(this, &RHS, sizeof(RHS)) < 0;
- }
- } Virtual;
-
- ReturnAdjustment() : NonVirtual(0) {}
-
- bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
-
- friend bool operator==(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Equals(RHS.Virtual);
- }
-
- friend bool operator!=(const ReturnAdjustment &LHS, const ReturnAdjustment &RHS) {
- return !(LHS == RHS);
- }
-
- friend bool operator<(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Less(RHS.Virtual);
- }
-};
-
-/// A \c this pointer adjustment.
-struct ThisAdjustment {
- /// The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// Holds the ABI-specific information about the virtual this
- /// adjustment, if needed.
- union VirtualAdjustment {
- // Itanium ABI
- struct {
- /// The offset (in bytes), relative to the address point,
- /// of the virtual call offset.
- int64_t VCallOffsetOffset;
- } Itanium;
-
- struct {
- /// The offset of the vtordisp (in bytes), relative to the ECX.
- int32_t VtordispOffset;
-
- /// The offset of the vbptr of the derived class (in bytes),
- /// relative to the ECX after vtordisp adjustment.
- int32_t VBPtrOffset;
-
- /// The offset (in bytes) of the vbase offset in the vbtable.
- int32_t VBOffsetOffset;
- } Microsoft;
-
- VirtualAdjustment() {
- memset(this, 0, sizeof(*this));
- }
-
- bool Equals(const VirtualAdjustment &Other) const {
- return memcmp(this, &Other, sizeof(Other)) == 0;
- }
-
- bool isEmpty() const {
- VirtualAdjustment Zero;
- return Equals(Zero);
- }
-
- bool Less(const VirtualAdjustment &RHS) const {
- return memcmp(this, &RHS, sizeof(RHS)) < 0;
- }
- } Virtual;
-
- ThisAdjustment() : NonVirtual(0) { }
-
- bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
-
- friend bool operator==(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Equals(RHS.Virtual);
- }
-
- friend bool operator!=(const ThisAdjustment &LHS, const ThisAdjustment &RHS) {
- return !(LHS == RHS);
- }
-
- friend bool operator<(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- if (LHS.NonVirtual < RHS.NonVirtual)
- return true;
-
- return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Less(RHS.Virtual);
- }
-};
-
-class CXXMethodDecl;
-
-/// The \c this pointer adjustment as well as an optional return
-/// adjustment for a thunk.
-struct ThunkInfo {
- /// The \c this pointer adjustment.
- ThisAdjustment This;
-
- /// The return adjustment.
- ReturnAdjustment Return;
-
- /// Holds a pointer to the overridden method this thunk is for,
- /// if needed by the ABI to distinguish different thunks with equal
- /// adjustments. Otherwise, null.
- /// CAUTION: In the unlikely event you need to sort ThunkInfos, consider using
- /// an ABI-specific comparator.
- const CXXMethodDecl *Method;
-
- ThunkInfo() : Method(nullptr) { }
-
- ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return,
- const CXXMethodDecl *Method = nullptr)
- : This(This), Return(Return), Method(Method) {}
-
- friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- return LHS.This == RHS.This && LHS.Return == RHS.Return &&
- LHS.Method == RHS.Method;
- }
-
- bool isEmpty() const {
- return This.isEmpty() && Return.isEmpty() && Method == nullptr;
- }
-};
-
} // end namespace clang
#endif
diff --git a/clang/include/clang/Basic/AddressSpaces.h b/clang/include/clang/Basic/AddressSpaces.h
index a9db52dfcc9c..99bb67fd26d1 100644
--- a/clang/include/clang/Basic/AddressSpaces.h
+++ b/clang/include/clang/Basic/AddressSpaces.h
@@ -44,6 +44,13 @@ enum class LangAS : unsigned {
cuda_constant,
cuda_shared,
+ // SYCL specific address spaces.
+ sycl_global,
+ sycl_global_device,
+ sycl_global_host,
+ sycl_local,
+ sycl_private,
+
// Pointer size and extension address spaces.
ptr32_sptr,
ptr32_uptr,
diff --git a/clang/include/clang/Basic/Attr.td b/clang/include/clang/Basic/Attr.td
index bfd50f6a6779..12d09181a2ea 100644
--- a/clang/include/clang/Basic/Attr.td
+++ b/clang/include/clang/Basic/Attr.td
@@ -368,6 +368,7 @@ def TargetBPF : TargetArch<["bpfel", "bpfeb"]>;
def TargetMips32 : TargetArch<["mips", "mipsel"]>;
def TargetAnyMips : TargetArch<["mips", "mipsel", "mips64", "mips64el"]>;
def TargetMSP430 : TargetArch<["msp430"]>;
+def TargetM68k : TargetArch<["m68k"]>;
def TargetRISCV : TargetArch<["riscv32", "riscv64"]>;
def TargetX86 : TargetArch<["x86"]>;
def TargetAnyX86 : TargetArch<["x86", "x86_64"]>;
@@ -553,6 +554,11 @@ class Attr {
list<Documentation> Documentation;
}
+/// Used to define a set of mutually exclusive attributes.
+class MutualExclusions<list<Attr> Ex> {
+ list<Attr> Exclusions = Ex;
+}
+
/// A type attribute is not processed on a declaration or a statement.
class TypeAttr : Attr;
@@ -636,6 +642,15 @@ def Alias : Attr {
let Documentation = [Undocumented];
}
+def BuiltinAlias : Attr {
+ let Spellings = [CXX11<"clang", "builtin_alias">,
+ C2x<"clang", "builtin_alias">,
+ GNU<"clang_builtin_alias">];
+ let Args = [IdentifierArgument<"BuiltinName">];
+ let Subjects = SubjectList<[Function], ErrorDiag>;
+ let Documentation = [BuiltinAliasDocs];
+}
+
def ArmBuiltinAlias : InheritableAttr, TargetSpecificAttr<TargetAnyArm> {
let Spellings = [Clang<"__clang_arm_builtin_alias">];
let Args = [IdentifierArgument<"BuiltinName">];
@@ -729,7 +744,8 @@ def XRayLogArgs : InheritableAttr {
def PatchableFunctionEntry
: InheritableAttr,
- TargetSpecificAttr<TargetArch<["aarch64", "aarch64_be", "x86", "x86_64"]>> {
+ TargetSpecificAttr<TargetArch<
+ ["aarch64", "aarch64_be", "riscv32", "riscv64", "x86", "x86_64"]>> {
let Spellings = [GCC<"patchable_function_entry">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let Args = [UnsignedArgument<"Count">, DefaultIntArgument<"Offset", 0>];
@@ -772,8 +788,9 @@ def Annotate : InheritableParamAttr {
}
def ARMInterrupt : InheritableAttr, TargetSpecificAttr<TargetARM> {
- // NOTE: If you add any additional spellings, MSP430Interrupt's,
- // MipsInterrupt's and AnyX86Interrupt's spellings must match.
+ // NOTE: If you add any additional spellings, M68kInterrupt's,
+ // MSP430Interrupt's, MipsInterrupt's and AnyX86Interrupt's spellings
+ // must match.
let Spellings = [GCC<"interrupt">];
let Args = [EnumArgument<"Interrupt", "InterruptType",
["IRQ", "FIQ", "SWI", "ABORT", "UNDEF", ""],
@@ -839,6 +856,8 @@ def Availability : InheritableAttr {
.Case("macos_app_extension", "macOS (App Extension)")
.Case("tvos_app_extension", "tvOS (App Extension)")
.Case("watchos_app_extension", "watchOS (App Extension)")
+ .Case("maccatalyst", "macCatalyst")
+ .Case("maccatalyst_app_extension", "macCatalyst (App Extension)")
.Case("swift", "Swift")
.Default(llvm::StringRef());
}
@@ -852,6 +871,8 @@ static llvm::StringRef getPlatformNameSourceSpelling(llvm::StringRef Platform) {
.Case("macos_app_extension", "macOSApplicationExtension")
.Case("tvos_app_extension", "tvOSApplicationExtension")
.Case("watchos_app_extension", "watchOSApplicationExtension")
+ .Case("maccatalyst", "macCatalyst")
+ .Case("maccatalyst_app_extension", "macCatalystApplicationExtension")
.Case("zos", "z/OS")
.Default(Platform);
}
@@ -865,6 +886,8 @@ static llvm::StringRef canonicalizePlatformName(llvm::StringRef Platform) {
.Case("macOSApplicationExtension", "macos_app_extension")
.Case("tvOSApplicationExtension", "tvos_app_extension")
.Case("watchOSApplicationExtension", "watchos_app_extension")
+ .Case("macCatalyst", "maccatalyst")
+ .Case("macCatalystApplicationExtension", "maccatalyst_app_extension")
.Default(Platform);
} }];
let HasCustomParsing = 1;
@@ -915,6 +938,7 @@ def CFAuditedTransfer : InheritableAttr {
let Spellings = [Clang<"cf_audited_transfer">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
// cf_unknown_transfer is an explicit opt-out of cf_audited_transfer.
@@ -924,7 +948,9 @@ def CFUnknownTransfer : InheritableAttr {
let Spellings = [Clang<"cf_unknown_transfer">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[CFAuditedTransfer, CFUnknownTransfer]>;
def CFReturnsRetained : InheritableAttr {
let Spellings = [Clang<"cf_returns_retained">];
@@ -1006,6 +1032,7 @@ def Cold : InheritableAttr {
let Spellings = [GCC<"cold">];
let Subjects = SubjectList<[Function]>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def Common : InheritableAttr {
@@ -1091,6 +1118,7 @@ def CUDADeviceBuiltinSurfaceType : InheritableAttr {
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [CUDADeviceBuiltinSurfaceTypeDocs];
let MeaningfulToClassTemplateDefinition = 1;
+ let SimpleHandler = 1;
}
def CUDADeviceBuiltinTextureType : InheritableAttr {
@@ -1100,7 +1128,10 @@ def CUDADeviceBuiltinTextureType : InheritableAttr {
let Subjects = SubjectList<[CXXRecord]>;
let Documentation = [CUDADeviceBuiltinTextureTypeDocs];
let MeaningfulToClassTemplateDefinition = 1;
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[CUDADeviceBuiltinSurfaceType,
+ CUDADeviceBuiltinTextureType]>;
def CUDAGlobal : InheritableAttr {
let Spellings = [GNU<"global">, Declspec<"__global__">];
@@ -1108,13 +1139,16 @@ def CUDAGlobal : InheritableAttr {
let LangOpts = [CUDA];
let Documentation = [Undocumented];
}
+def : MutualExclusions<[CUDADevice, CUDAGlobal]>;
def CUDAHost : InheritableAttr {
let Spellings = [GNU<"host">, Declspec<"__host__">];
let Subjects = SubjectList<[Function]>;
let LangOpts = [CUDA];
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[CUDAGlobal, CUDAHost]>;
def HIPManaged : InheritableAttr {
let Spellings = [GNU<"managed">, Declspec<"__managed__">];
@@ -1147,6 +1181,7 @@ def CUDAShared : InheritableAttr {
let LangOpts = [CUDA];
let Documentation = [Undocumented];
}
+def : MutualExclusions<[CUDAConstant, CUDAShared, HIPManaged]>;
def SYCLKernel : InheritableAttr {
let Spellings = [Clang<"sycl_kernel">];
@@ -1178,9 +1213,11 @@ def OpenCLKernel : InheritableAttr {
let SimpleHandler = 1;
}
-def OpenCLUnrollHint : InheritableAttr {
+def OpenCLUnrollHint : StmtAttr {
let Spellings = [GNU<"opencl_unroll_hint">];
- let Args = [UnsignedArgument<"UnrollHint">];
+ let Subjects = SubjectList<[ForStmt, CXXForRangeStmt, WhileStmt, DoStmt],
+ ErrorDiag, "'for', 'while', and 'do' statements">;
+ let Args = [UnsignedArgument<"UnrollHint", /*opt*/1>];
let Documentation = [OpenCLUnrollHintDocs];
}
@@ -1286,7 +1323,7 @@ def EmptyBases : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
def AllocSize : InheritableAttr {
let Spellings = [GCC<"alloc_size">];
- let Subjects = SubjectList<[Function]>;
+ let Subjects = SubjectList<[HasFunctionProto]>;
let Args = [ParamIdxArgument<"ElemSizeParam">,
ParamIdxArgument<"NumElemsParam", /*opt*/ 1>];
let TemplateDependent = 1;
@@ -1321,7 +1358,10 @@ def FallThrough : StmtAttr {
let Spellings = [CXX11<"", "fallthrough", 201603>,
C2x<"", "fallthrough", 201904>,
CXX11<"clang", "fallthrough">, GCC<"fallthrough">];
-// let Subjects = [NullStmt];
+ // The attribute only applies to a NullStmt, but we have special fix-it
+ // behavior if applied to a case label.
+ let Subjects = SubjectList<[NullStmt, SwitchCase], ErrorDiag,
+ "empty statements">;
let Documentation = [FallthroughDocs];
}
@@ -1334,15 +1374,23 @@ def Unlikely : StmtAttr {
let Spellings = [CXX11<"", "unlikely", 201803>, C2x<"clang", "unlikely">];
let Documentation = [LikelihoodDocs];
}
+def : MutualExclusions<[Likely, Unlikely]>;
def NoMerge : DeclOrStmtAttr {
let Spellings = [Clang<"nomerge">];
let Documentation = [NoMergeDocs];
let InheritEvenIfAlreadyPresent = 1;
- let Subjects = SubjectList<[Function], ErrorDiag, "functions and statements">;
+ let Subjects = SubjectList<[Function, Stmt], ErrorDiag,
+ "functions and statements">;
let SimpleHandler = 1;
}
+def MustTail : StmtAttr {
+ let Spellings = [Clang<"musttail">];
+ let Documentation = [MustTailDocs];
+ let Subjects = SubjectList<[ReturnStmt], ErrorDiag, "return statements">;
+}
+
def FastCall : DeclOrTypeAttr {
let Spellings = [GCC<"fastcall">, Keyword<"__fastcall">,
Keyword<"_fastcall">];
@@ -1424,7 +1472,9 @@ def Hot : InheritableAttr {
// An AST node is created for this attribute, but not actually used beyond
// semantic checking for mutual exclusion with the Cold attribute.
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[Hot, Cold]>;
def IBAction : InheritableAttr {
let Spellings = [Clang<"ibaction">];
@@ -1459,7 +1509,7 @@ def IFunc : Attr, TargetSpecificAttr<TargetELF> {
def Restrict : InheritableAttr {
let Spellings = [Declspec<"restrict">, GCC<"malloc">];
let Subjects = SubjectList<[Function]>;
- let Documentation = [Undocumented];
+ let Documentation = [RestrictDocs];
}
def LayoutVersion : InheritableAttr, TargetSpecificAttr<TargetMicrosoftCXXABI> {
@@ -1522,8 +1572,8 @@ def MSABI : DeclOrTypeAttr {
}
def MSP430Interrupt : InheritableAttr, TargetSpecificAttr<TargetMSP430> {
- // NOTE: If you add any additional spellings, ARMInterrupt's, MipsInterrupt's
- // and AnyX86Interrupt's spellings must match.
+ // NOTE: If you add any additional spellings, ARMInterrupt's, M68kInterrupt's,
+ // MipsInterrupt's and AnyX86Interrupt's spellings must match.
let Spellings = [GCC<"interrupt">];
let Args = [UnsignedArgument<"Number">];
let ParseKind = "Interrupt";
@@ -1535,11 +1585,13 @@ def Mips16 : InheritableAttr, TargetSpecificAttr<TargetMips32> {
let Spellings = [GCC<"mips16">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [Undocumented];
+ let SimpleHandler = 1;
}
def MipsInterrupt : InheritableAttr, TargetSpecificAttr<TargetMips32> {
// NOTE: If you add any additional spellings, ARMInterrupt's,
- // MSP430Interrupt's and AnyX86Interrupt's spellings must match.
+ // M68kInterrupt's, MSP430Interrupt's and AnyX86Interrupt's spellings
+ // must match.
let Spellings = [GCC<"interrupt">];
let Subjects = SubjectList<[Function]>;
let Args = [EnumArgument<"Interrupt", "InterruptType",
@@ -1552,23 +1604,39 @@ def MipsInterrupt : InheritableAttr, TargetSpecificAttr<TargetMips32> {
let ParseKind = "Interrupt";
let Documentation = [MipsInterruptDocs];
}
+def : MutualExclusions<[Mips16, MipsInterrupt]>;
def MicroMips : InheritableAttr, TargetSpecificAttr<TargetMips32> {
let Spellings = [GCC<"micromips">];
let Subjects = SubjectList<[Function], ErrorDiag>;
let Documentation = [MicroMipsDocs];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[Mips16, MicroMips]>;
def MipsLongCall : InheritableAttr, TargetSpecificAttr<TargetAnyMips> {
let Spellings = [GCC<"long_call">, GCC<"far">];
let Subjects = SubjectList<[Function]>;
let Documentation = [MipsLongCallStyleDocs];
+ let SimpleHandler = 1;
}
def MipsShortCall : InheritableAttr, TargetSpecificAttr<TargetAnyMips> {
let Spellings = [GCC<"short_call">, GCC<"near">];
let Subjects = SubjectList<[Function]>;
let Documentation = [MipsShortCallStyleDocs];
+ let SimpleHandler = 1;
+}
+def : MutualExclusions<[MipsLongCall, MipsShortCall]>;
+
+def M68kInterrupt : InheritableAttr, TargetSpecificAttr<TargetM68k> {
+ // NOTE: If you add any additional spellings, ARMInterrupt's, MipsInterrupt's
+ // MSP430Interrupt's and AnyX86Interrupt's spellings must match.
+ let Spellings = [GNU<"interrupt">];
+ let Args = [UnsignedArgument<"Number">];
+ let ParseKind = "Interrupt";
+ let HasCustomParsing = 1;
+ let Documentation = [Undocumented];
}
def Mode : Attr {
@@ -1636,7 +1704,9 @@ def DisableTailCalls : InheritableAttr {
let Spellings = [Clang<"disable_tail_calls">];
let Subjects = SubjectList<[Function, ObjCMethod]>;
let Documentation = [DisableTailCallsDocs];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[Naked, DisableTailCalls]>;
def NoAlias : InheritableAttr {
let Spellings = [Declspec<"noalias">];
@@ -1658,6 +1728,14 @@ def NoDebug : InheritableAttr {
let Documentation = [NoDebugDocs];
}
+def StandaloneDebug : InheritableAttr {
+ let Spellings = [Clang<"standalone_debug", /*allowInC =*/0>];
+ let Subjects = SubjectList<[CXXRecord]>;
+ let Documentation = [StandaloneDebugDocs];
+ let SimpleHandler = 1;
+ let LangOpts = [CPlusPlus];
+}
+
def NoDuplicate : InheritableAttr {
let Spellings = [Clang<"noduplicate">];
let Subjects = SubjectList<[Function]>;
@@ -1898,11 +1976,20 @@ def NoInstrumentFunction : InheritableAttr {
let SimpleHandler = 1;
}
+def NoProfileFunction : InheritableAttr {
+ let Spellings = [GCC<"no_profile_instrument_function">];
+ let Subjects = SubjectList<[Function]>;
+ let Documentation = [NoProfileInstrumentFunctionDocs];
+ let SimpleHandler = 1;
+}
+
def NotTailCalled : InheritableAttr {
let Spellings = [Clang<"not_tail_called">];
let Subjects = SubjectList<[Function]>;
let Documentation = [NotTailCalledDocs];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[AlwaysInline, NotTailCalled]>;
def NoStackProtector : InheritableAttr {
let Spellings = [Clang<"no_stack_protector">];
@@ -2085,6 +2172,7 @@ def ObjCNonRuntimeProtocol : Attr {
let Subjects = SubjectList<[ObjCProtocol], ErrorDiag>;
let LangOpts = [ObjC];
let Documentation = [ObjCNonRuntimeProtocolDocs];
+ let SimpleHandler = 1;
}
def ObjCRuntimeName : Attr {
@@ -2219,12 +2307,14 @@ def SwiftBridgedTypedef : InheritableAttr {
let Spellings = [GNU<"swift_bridged_typedef">];
let Subjects = SubjectList<[TypedefName], ErrorDiag>;
let Documentation = [SwiftBridgedTypedefDocs];
+ let SimpleHandler = 1;
}
def SwiftObjCMembers : Attr {
let Spellings = [GNU<"swift_objc_members">];
let Subjects = SubjectList<[ObjCInterface], ErrorDiag>;
let Documentation = [SwiftObjCMembersDocs];
+ let SimpleHandler = 1;
}
def SwiftError : InheritableAttr {
@@ -2256,6 +2346,7 @@ def SwiftNewType : InheritableAttr {
def SwiftPrivate : InheritableAttr {
let Spellings = [GNU<"swift_private">];
let Documentation = [SwiftPrivateDocs];
+ let SimpleHandler = 1;
}
def NoDeref : TypeAttr {
@@ -2374,11 +2465,21 @@ def SwiftCall : DeclOrTypeAttr {
let Documentation = [SwiftCallDocs];
}
+def SwiftAsyncCall : DeclOrTypeAttr {
+ let Spellings = [Clang<"swiftasynccall">];
+ let Documentation = [SwiftAsyncCallDocs];
+}
+
def SwiftContext : ParameterABIAttr {
let Spellings = [Clang<"swift_context">];
let Documentation = [SwiftContextDocs];
}
+def SwiftAsyncContext : ParameterABIAttr {
+ let Spellings = [Clang<"swift_async_context">];
+ let Documentation = [SwiftAsyncContextDocs];
+}
+
def SwiftErrorResult : ParameterABIAttr {
let Spellings = [Clang<"swift_error_result">];
let Documentation = [SwiftErrorResultDocs];
@@ -2399,6 +2500,16 @@ def SwiftAsync : InheritableAttr {
let Documentation = [SwiftAsyncDocs];
}
+def SwiftAsyncError : InheritableAttr {
+ let Spellings = [Clang<"swift_async_error">];
+ let Subjects = SubjectList<[Function, ObjCMethod]>;
+ let Args = [EnumArgument<"Convention", "ConventionKind",
+ ["none", "nonnull_error", "zero_argument", "nonzero_argument"],
+ ["None", "NonNullError", "ZeroArgument", "NonZeroArgument"]>,
+ UnsignedArgument<"HandlerParamIdx", /*opt=*/1>];
+ let Documentation = [SwiftAsyncErrorDocs];
+}
+
def Suppress : StmtAttr {
let Spellings = [CXX11<"gsl", "suppress">];
let Args = [VariadicStringArgument<"DiagnosticIdentifiers">];
@@ -2636,7 +2747,14 @@ def Unused : InheritableAttr {
def Used : InheritableAttr {
let Spellings = [GCC<"used">];
let Subjects = SubjectList<[NonLocalVar, Function, ObjCMethod]>;
- let Documentation = [Undocumented];
+ let Documentation = [UsedDocs];
+ let SimpleHandler = 1;
+}
+
+def Retain : InheritableAttr {
+ let Spellings = [GCC<"retain">];
+ let Subjects = SubjectList<[NonLocalVar, Function, ObjCMethod]>;
+ let Documentation = [RetainDocs];
let SimpleHandler = 1;
}
@@ -2758,7 +2876,7 @@ def LTOVisibilityPublic : InheritableAttr {
def AnyX86Interrupt : InheritableAttr, TargetSpecificAttr<TargetAnyX86> {
// NOTE: If you add any additional spellings, ARMInterrupt's,
- // MSP430Interrupt's and MipsInterrupt's spellings must match.
+ // M68kInterrupt's, MSP430Interrupt's and MipsInterrupt's spellings must match.
let Spellings = [GCC<"interrupt">];
let Subjects = SubjectList<[HasFunctionProto]>;
let ParseKind = "Interrupt";
@@ -2802,6 +2920,10 @@ def NoSanitize : InheritableAttr {
}
return Mask;
}
+
+ bool hasCoverage() const {
+ return llvm::is_contained(sanitizers(), "coverage");
+ }
}];
}
@@ -3199,6 +3321,7 @@ def Pointer : InheritableAttr {
let Args = [TypeArgument<"DerefType", /*opt=*/1>];
let Documentation = [LifetimePointerDocs];
}
+def : MutualExclusions<[Owner, Pointer]>;
// Microsoft-related attributes
@@ -3426,6 +3549,7 @@ def LoopHint : Attr {
}];
let Documentation = [LoopHintDocs, UnrollHintDocs];
+ let HasCustomParsing = 1;
}
def CapturedRecord : InheritableAttr {
@@ -3509,6 +3633,7 @@ def OMPDeclareTargetDecl : InheritableAttr {
void printPrettyPragma(raw_ostream &OS, const PrintingPolicy &Policy) const;
static llvm::Optional<MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD);
+ static llvm::Optional<OMPDeclareTargetDeclAttr*> getActiveAttr(const ValueDecl *VD);
static llvm::Optional<DevTypeTy> getDeviceType(const ValueDecl *VD);
static llvm::Optional<SourceLocation> getLocation(const ValueDecl *VD);
}];
@@ -3570,6 +3695,7 @@ def InternalLinkage : InheritableAttr {
let Subjects = SubjectList<[Var, Function, CXXRecord]>;
let Documentation = [InternalLinkageDocs];
}
+def : MutualExclusions<[Common, InternalLinkage]>;
def ExcludeFromExplicitInstantiation : InheritableAttr {
let Spellings = [Clang<"exclude_from_explicit_instantiation">];
@@ -3597,18 +3723,22 @@ def AlwaysDestroy : InheritableAttr {
let Subjects = SubjectList<[Var]>;
let Documentation = [AlwaysDestroyDocs];
}
+def : MutualExclusions<[NoDestroy, AlwaysDestroy]>;
def SpeculativeLoadHardening : InheritableAttr {
let Spellings = [Clang<"speculative_load_hardening">];
let Subjects = SubjectList<[Function, ObjCMethod], ErrorDiag>;
let Documentation = [SpeculativeLoadHardeningDocs];
+ let SimpleHandler = 1;
}
def NoSpeculativeLoadHardening : InheritableAttr {
let Spellings = [Clang<"no_speculative_load_hardening">];
let Subjects = SubjectList<[Function, ObjCMethod], ErrorDiag>;
let Documentation = [NoSpeculativeLoadHardeningDocs];
+ let SimpleHandler = 1;
}
+def : MutualExclusions<[SpeculativeLoadHardening, NoSpeculativeLoadHardening]>;
def Uninitialized : InheritableAttr {
let Spellings = [Clang<"uninitialized", 0>];
@@ -3621,6 +3751,7 @@ def LoaderUninitialized : Attr {
let Spellings = [Clang<"loader_uninitialized">];
let Subjects = SubjectList<[GlobalVar]>;
let Documentation = [LoaderUninitializedDocs];
+ let SimpleHandler = 1;
}
def ObjCExternallyRetained : InheritableAttr {
@@ -3637,6 +3768,14 @@ def NoBuiltin : Attr {
let Documentation = [NoBuiltinDocs];
}
+def UsingIfExists : InheritableAttr {
+ let Spellings = [Clang<"using_if_exists", 0>];
+ let Subjects = SubjectList<[Using,
+ UnresolvedUsingTypename,
+ UnresolvedUsingValue], ErrorDiag>;
+ let Documentation = [UsingIfExistsDocs];
+}
+
// FIXME: This attribute is not inheritable, it will not be propagated to
// redecls. [[clang::lifetimebound]] has the same problems. This should be
// fixed in TableGen (by probably adding a new inheritable flag).
diff --git a/clang/include/clang/Basic/AttrDocs.td b/clang/include/clang/Basic/AttrDocs.td
index 170a0fe3d4c4..c265a877e3b1 100644
--- a/clang/include/clang/Basic/AttrDocs.td
+++ b/clang/include/clang/Basic/AttrDocs.td
@@ -57,6 +57,55 @@ global variable or function should be in after translation.
let Heading = "section, __declspec(allocate)";
}
+def UsedDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This attribute, when attached to a function or variable definition, indicates
+that there may be references to the entity which are not apparent in the source
+code. For example, it may be referenced from inline ``asm``, or it may be
+found through a dynamic symbol or section lookup.
+
+The compiler must emit the definition even if it appears to be unused, and it
+must not apply optimizations which depend on fully understanding how the entity
+is used.
+
+Whether this attribute has any effect on the linker depends on the target and
+the linker. Most linkers support the feature of section garbage collection
+(``--gc-sections``), also known as "dead stripping" (``ld64 -dead_strip``) or
+discarding unreferenced sections (``link.exe /OPT:REF``). On COFF and Mach-O
+targets (Windows and Apple platforms), the `used` attribute prevents symbols
+from being removed by linker section GC. On ELF targets, it has no effect on its
+own, and the linker may remove the definition if it is not otherwise referenced.
+This linker GC can be avoided by also adding the ``retain`` attribute. Note
+that ``retain`` requires special support from the linker; see that attribute's
+documentation for further information.
+ }];
+}
+
+def RetainDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+This attribute, when attached to a function or variable definition, prevents
+section garbage collection in the linker. It does not prevent other discard
+mechanisms, such as archive member selection, and COMDAT group resolution.
+
+If the compiler does not emit the definition, e.g. because it was not used in
+the translation unit or the compiler was able to eliminate all of the uses,
+this attribute has no effect. This attribute is typically combined with the
+``used`` attribute to force the definition to be emitted and preserved into the
+final linked image.
+
+This attribute is only necessary on ELF targets; other targets prevent section
+garbage collection by the linker when using the ``used`` attribute alone.
+Using the attributes together should result in consistent behavior across
+targets.
+
+This attribute requires the linker to support the ``SHF_GNU_RETAIN`` extension.
+This support is available in GNU ``ld`` and ``gold`` as of binutils 2.36, as
+well as in ``ld.lld`` 13.
+ }];
+}
+
def InitPriorityDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
@@ -394,6 +443,32 @@ calls.
}];
}
+def MustTailDocs : Documentation {
+ let Category = DocCatStmt;
+ let Content = [{
+If a ``return`` statement is marked ``musttail``, this indicates that the
+compiler must generate a tail call for the program to be correct, even when
+optimizations are disabled. This guarantees that the call will not cause
+unbounded stack growth if it is part of a recursive cycle in the call graph.
+
+If the callee is a virtual function that is implemented by a thunk, there is
+no guarantee in general that the thunk tail-calls the implementation of the
+virtual function, so such a call in a recursive cycle can still result in
+unbounded stack growth.
+
+``clang::musttail`` can only be applied to a ``return`` statement whose value
+is the result of a function call (even functions returning void must use
+``return``, although no value is returned). The target function must have the
+same number of arguments as the caller. The types of the return value and all
+arguments must be similar according to C++ rules (differing only in cv
+qualifiers or array size), including the implicit "this" argument, if any.
+Any variables in scope, including all arguments to the function and the
+return value must be trivially destructible. The calling convention of the
+caller and callee must match, and they must not be variadic functions or have
+old style K&R C function declarations.
+ }];
+}
+
def AssertCapabilityDocs : Documentation {
let Category = DocCatFunction;
let Heading = "assert_capability, assert_shared_capability";
@@ -1048,8 +1123,18 @@ def NoDebugDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
The ``nodebug`` attribute allows you to suppress debugging information for a
-function or method, or for a variable that is not a parameter or a non-static
-data member.
+function or method, for a variable that is not a parameter or a non-static
+data member, or for a typedef or using declaration.
+ }];
+}
+
+def StandaloneDebugDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``standalone_debug`` attribute causes debug info to be emitted for a record
+type regardless of the debug info optimizations that are enabled with
+-fno-standalone-debug. This attribute only has an effect when debug info
+optimizations are enabled (e.g. with -fno-standalone-debug), and is C++-only.
}];
}
@@ -2474,17 +2559,33 @@ This attribute accepts a single parameter that must be one of the following:
}];
}
+def NoProfileInstrumentFunctionDocs : Documentation {
+ let Category = DocCatFunction;
+ let Content = [{
+Use the ``no_profile_instrument_function`` attribute on a function declaration
+to denote that the compiler should not instrument the function with
+profile-related instrumentation, such as via the
+``-fprofile-generate`` / ``-fprofile-instr-generate`` /
+``-fcs-profile-generate`` / ``-fprofile-arcs`` flags.
+}];
+}
+
def NoSanitizeDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
Use the ``no_sanitize`` attribute on a function or a global variable
declaration to specify that a particular instrumentation or set of
-instrumentations should not be applied. The attribute takes a list of
-string literals, which have the same meaning as values accepted by the
-``-fno-sanitize=`` flag. For example,
-``__attribute__((no_sanitize("address", "thread")))`` specifies that
-AddressSanitizer and ThreadSanitizer should not be applied to the
-function or variable.
+instrumentations should not be applied.
+
+The attribute takes a list of string literals with the following accepted
+values:
+* all values accepted by ``-fno-sanitize=``;
+* ``coverage``, to disable SanitizerCoverage instrumentation.
+
+For example, ``__attribute__((no_sanitize("address", "thread")))`` specifies
+that AddressSanitizer and ThreadSanitizer should not be applied to the function
+or variable. Using ``__attribute__((no_sanitize("coverage")))`` specifies that
+SanitizerCoverage should not be applied to the function.
See :ref:`Controlling Code Generation <controlling-code-generation>` for a
full list of supported sanitizer flags.
@@ -2967,16 +3068,55 @@ It is only supported when using the Microsoft C++ ABI.
def LifetimeBoundDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
-The ``lifetimebound`` attribute indicates that a resource owned by
-a function parameter or implicit object parameter
-is retained by the return value of the annotated function
-(or, for a parameter of a constructor, in the value of the constructed object).
-It is only supported in C++.
+The ``lifetimebound`` attribute on a function parameter or implicit object
+parameter indicates that objects that are referred to by that parameter may
+also be referred to by the return value of the annotated function (or, for a
+parameter of a constructor, by the value of the constructed object). It is only
+supported in C++.
+
+By default, a reference is considered to refer to its referenced object, a
+pointer is considered to refer to its pointee, a ``std::initializer_list<T>``
+is considered to refer to its underlying array, and aggregates (arrays and
+simple ``struct``\s) are considered to refer to all objects that their
+transitive subobjects refer to.
+
+Clang warns if it is able to detect that an object or reference refers to
+another object with a shorter lifetime. For example, Clang will warn if a
+function returns a reference to a local variable, or if a reference is bound to
+a temporary object whose lifetime is not extended. By using the
+``lifetimebound`` attribute, this determination can be extended to look through
+user-declared functions. For example:
+
+.. code-block:: c++
+
+ // Returns m[key] if key is present, or default_value if not.
+ template<typename T, typename U>
+ const U &get_or_default(const std::map<T, U> &m [[clang::lifetimebound]],
+ const T &key, /* note, not lifetimebound */
+ const U &default_value [[clang::lifetimebound]]);
+
+ std::map<std::string, std::string> m;
+ // warning: temporary "bar"s that might be bound to local reference 'val'
+ // will be destroyed at the end of the full-expression
+ const std::string &val = get_or_default(m, "foo"s, "bar"s);
+
+ // No warning in this case.
+ std::string def_val = "bar"s;
+ const std::string &val = get_or_default(m, "foo"s, def_val);
+
+The attribute can be applied to the implicit ``this`` parameter of a member
+function by writing the attribute after the function type:
+
+.. code-block:: c++
-This attribute provides an experimental implementation of the facility
-described in the C++ committee paper `P0936R0 <http://wg21.link/p0936r0>`_,
-and is subject to change as the design of the corresponding functionality
-changes.
+ struct string {
+ // The returned pointer should not outlive ``*this``.
+ const char *data() const [[clang::lifetimebound]];
+ };
+
+This attribute is inspired by the C++ committee paper `P0936R0
+<http://wg21.link/p0936r0>`_, but does not affect whether temporary objects
+have their lifetimes extended.
}];
}
@@ -3119,7 +3259,9 @@ def UnrollHintDocs : Documentation {
let Content = [{
Loop unrolling optimization hints can be specified with ``#pragma unroll`` and
``#pragma nounroll``. The pragma is placed immediately before a for, while,
-do-while, or c++11 range-based for loop.
+do-while, or c++11 range-based for loop. GCC's loop unrolling hints
+``#pragma GCC unroll`` and ``#pragma GCC nounroll`` are also supported and have
+identical semantics to ``#pragma unroll`` and ``#pragma nounroll``.
Specifying ``#pragma unroll`` without a parameter directs the loop unroller to
attempt to fully unroll the loop if the trip count is known at compile time and
@@ -3586,6 +3728,16 @@ system) and does not imply undefined behavior, making it more widely applicable.
}];
}
+def RestrictDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "malloc";
+ let Content = [{
+The ``malloc`` attribute indicates that the function acts like a system memory
+allocation function, returning a pointer to allocated storage disjoint from the
+storage for any other object accessible to the caller.
+ }];
+}
+
def ReturnsNonNullDocs : Documentation {
let Category = NullabilityDocs;
let Content = [{
@@ -4057,9 +4209,8 @@ def NotTailCalledDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
The ``not_tail_called`` attribute prevents tail-call optimization on statically
-bound calls. It has no effect on indirect calls. Virtual functions, objective-c
-methods, and functions marked as ``always_inline`` cannot be marked as
-``not_tail_called``.
+bound calls. Objective-c methods, and functions marked as ``always_inline``
+cannot be marked as ``not_tail_called``.
For example, it prevents tail-call optimization in the following case:
@@ -4085,28 +4236,25 @@ However, it doesn't prevent tail-call optimization in this case:
return (*fn)(a);
}
-Marking virtual functions as ``not_tail_called`` is an error:
+Generally, marking an overriding virtual function as ``not_tail_called`` is
+not useful, because this attribute is a property of the static type. Calls
+made through a pointer or reference to the base class type will respect
+the ``not_tail_called`` attribute of the base class's member function,
+regardless of the runtime destination of the call:
.. code-block:: c++
- class Base {
- public:
- // not_tail_called on a virtual function is an error.
- [[clang::not_tail_called]] virtual int foo1();
-
- virtual int foo2();
-
- // Non-virtual functions can be marked ``not_tail_called``.
- [[clang::not_tail_called]] int foo3();
- };
-
- class Derived1 : public Base {
- public:
- int foo1() override;
-
- // not_tail_called on a virtual function is an error.
- [[clang::not_tail_called]] int foo2() override;
+ struct Foo { virtual void f(); };
+ struct Bar : Foo {
+ [[clang::not_tail_called]] void f() override;
};
+ void callera(Bar& bar) {
+ Foo& foo = bar;
+ // not_tail_called has no effect on here, even though the
+ // underlying method is f from Bar.
+ foo.f();
+ bar.f(); // No tail-call optimization on here.
+ }
}];
}
@@ -4379,7 +4527,8 @@ def SwiftContextDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
The ``swift_context`` attribute marks a parameter of a ``swiftcall``
-function as having the special context-parameter ABI treatment.
+or ``swiftasynccall`` function as having the special context-parameter
+ABI treatment.
This treatment generally passes the context value in a special register
which is normally callee-preserved.
@@ -4392,6 +4541,47 @@ A context parameter must have pointer or reference type.
}];
}
+def SwiftAsyncCallDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``swiftasynccall`` attribute indicates that a function is
+compatible with the low-level conventions of Swift async functions,
+provided it declares the right formal arguments.
+
+In most respects, this is similar to the ``swiftcall`` attribute, except for
+the following:
+
+- A parameter may be marked ``swift_async_context``, ``swift_context``
+ or ``swift_indirect_result`` (with the same restrictions on parameter
+ ordering as ``swiftcall``) but the parameter attribute
+ ``swift_error_result`` is not permitted.
+
+- A ``swiftasynccall`` function must have return type ``void``.
+
+- Within a ``swiftasynccall`` function, a call to a ``swiftasynccall``
+ function that is the immediate operand of a ``return`` statement is
+ guaranteed to be performed as a tail call. This syntax is allowed even
+ in C as an extension (a call to a void-returning function cannot be a
+ return operand in standard C). If something in the calling function would
+ semantically be performed after a guaranteed tail call, such as the
+ non-trivial destruction of a local variable or temporary,
+ then the program is ill-formed.
+ }];
+}
+
+def SwiftAsyncContextDocs : Documentation {
+ let Category = DocCatVariable;
+ let Content = [{
+The ``swift_async_context`` attribute marks a parameter of a ``swiftasynccall``
+function as having the special asynchronous context-parameter ABI treatment.
+
+If the function is not ``swiftasynccall``, this attribute only generates
+extended frame information.
+
+A context parameter must have pointer or reference type.
+ }];
+}
+
def SwiftErrorResultDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
@@ -4431,7 +4621,8 @@ def SwiftIndirectResultDocs : Documentation {
let Category = DocCatVariable;
let Content = [{
The ``swift_indirect_result`` attribute marks a parameter of a ``swiftcall``
-function as having the special indirect-result ABI treatment.
+or ``swiftasynccall`` function as having the special indirect-result ABI
+treatment.
This treatment gives the parameter the target's normal indirect-result
ABI treatment, which may involve passing it differently from an ordinary
@@ -4486,6 +4677,47 @@ argument is the index of the completion handler parameter.
}];
}
+def SwiftAsyncErrorDocs : Documentation {
+ let Category = SwiftDocs;
+ let Heading = "swift_async_error";
+ let Content = [{
+The ``swift_async_error`` attribute specifies how an error state will be
+represented in a swift async method. It's a bit analogous to the ``swift_error``
+attribute for the generated async method. The ``swift_async_error`` attribute
+can indicate a variety of different ways of representing an error.
+
+- ``__attribute__((swift_async_error(zero_argument, N)))``, specifies that the
+ async method is considered to have failed if the Nth argument to the
+ completion handler is zero.
+
+- ``__attribute__((swift_async_error(nonzero_argument, N)))``, specifies that
+ the async method is considered to have failed if the Nth argument to the
+ completion handler is non-zero.
+
+- ``__attribute__((swift_async_error(nonnull_error)))``, specifies that the
+ async method is considered to have failed if the ``NSError *`` argument to the
+ completion handler is non-null.
+
+- ``__attribute__((swift_async_error(none)))``, specifies that the async method
+ cannot fail.
+
+
+For instance:
+
+.. code-block:: objc
+
+ @interface MyClass : NSObject
+ -(void)asyncMethod:(void (^)(char, int, float))handler
+ __attribute__((swift_async(swift_private, 1)))
+ __attribute__((swift_async_error(zero_argument, 2)));
+ @end
+
+Here, the ``swift_async`` attribute specifies that ``handler`` is the completion
+handler for this method, and the ``swift_async_error`` attribute specifies that
+the ``int`` parameter is the one that represents the error.
+}];
+}
+
def SuppressDocs : Documentation {
let Category = DocCatStmt;
let Content = [{
@@ -4526,6 +4758,26 @@ the old mangled name and the new code will use the new mangled name with tags.
}];
}
+def BuiltinAliasDocs : Documentation {
+ let Category = DocCatFunction;
+ let Heading = "clang::builtin_alias, clang_builtin_alias";
+ let Content = [{
+This attribute is used in the implementation of the C intrinsics.
+It allows the C intrinsic functions to be declared using the names defined
+in target builtins, and still be recognized as clang builtins equivalent to the
+underlying name. For example, ``riscv_vector.h`` declares the function ``vadd``
+with ``__attribute__((clang_builtin_alias(__builtin_rvv_vadd_vv_i8m1)))``.
+This ensures that both functions are recognized as that clang builtin,
+and in the latter case, the choice of which builtin to identify the
+function as can be deferred until after overload resolution.
+
+This attribute can only be used to set up the aliases for certain ARM/RISC-V
+C intrinsic functions; it is intended for use only inside ``arm_*.h`` and
+``riscv_*.h`` and is not a general mechanism for declaring arbitrary aliases
+for clang builtin functions.
+ }];
+}
+
def PreferredNameDocs : Documentation {
let Category = DocCatDecl;
let Content = [{
@@ -4714,6 +4966,9 @@ def PatchableFunctionEntryDocs : Documentation {
before the function entry and N-M NOPs after the function entry. This attribute
takes precedence over the command line option ``-fpatchable-function-entry=N,M``.
``M`` defaults to 0 if omitted.
+
+This attribute is only supported on
+aarch64/aarch64-be/riscv32/riscv64/i386/x86-64 targets.
}];
}
@@ -5517,6 +5772,10 @@ This attribute can only be used to set up the aliases for certain Arm
intrinsic functions; it is intended for use only inside ``arm_*.h``
and is not a general mechanism for declaring arbitrary aliases for
clang builtin functions.
+
+In order to avoid duplicating the attribute definitions for similar
+purpose for other architecture, there is a general form for the
+attribute `clang_builtin_alias`.
}];
}
@@ -5557,6 +5816,31 @@ once.
}];
}
+def UsingIfExistsDocs : Documentation {
+ let Category = DocCatDecl;
+ let Content = [{
+The ``using_if_exists`` attribute applies to a using-declaration. It allows
+programmers to import a declaration that potentially does not exist, instead
+deferring any errors to the point of use. For instance:
+
+.. code-block:: c++
+
+ namespace empty_namespace {};
+ __attribute__((using_if_exists))
+ using empty_namespace::does_not_exist; // no error!
+
+ does_not_exist x; // error: use of unresolved 'using_if_exists'
+
+The C++ spelling of the attribte (`[[clang::using_if_exists]]`) is also
+supported as a clang extension, since ISO C++ doesn't support attributes in this
+position. If the entity referred to by the using-declaration is found by name
+lookup, the attribute has no effect. This attribute is useful for libraries
+(primarily, libc++) that wish to redeclare a set of declarations in another
+namespace, when the availability of those declarations is difficult or
+impossible to detect at compile time with the preprocessor.
+ }];
+}
+
def HandleDocs : DocumentationCategory<"Handle Attributes"> {
let Content = [{
Handles are a way to identify resources like files, sockets, and processes.
diff --git a/clang/include/clang/Basic/AttributeCommonInfo.h b/clang/include/clang/Basic/AttributeCommonInfo.h
index f4a5db84aa9f..4be598e109fd 100644
--- a/clang/include/clang/Basic/AttributeCommonInfo.h
+++ b/clang/include/clang/Basic/AttributeCommonInfo.h
@@ -155,6 +155,12 @@ public:
bool isC2xAttribute() const { return SyntaxUsed == AS_C2x; }
+ /// The attribute is spelled [[]] in either C or C++ mode, including standard
+ /// attributes spelled with a keyword, like alignas.
+ bool isStandardAttributeSyntax() const {
+ return isCXX11Attribute() || isC2xAttribute();
+ }
+
bool isKeywordAttribute() const {
return SyntaxUsed == AS_Keyword || SyntaxUsed == AS_ContextSensitiveKeyword;
}
diff --git a/clang/include/clang/Basic/Builtins.def b/clang/include/clang/Basic/Builtins.def
index ab1b5866c8a7..0e3898537bcf 100644
--- a/clang/include/clang/Basic/Builtins.def
+++ b/clang/include/clang/Basic/Builtins.def
@@ -24,7 +24,8 @@
// c -> char
// s -> short
// i -> int
-// h -> half
+// h -> half (__fp16, OpenCL)
+// x -> half (_Float16)
// f -> float
// d -> double
// z -> size_t
@@ -529,6 +530,7 @@ BUILTIN(__builtin_rotateright32, "UZiUZiUZi", "nc")
BUILTIN(__builtin_rotateright64, "UWiUWiUWi", "nc")
// Random GCC builtins
+BUILTIN(__builtin_calloc, "v*zz", "nF")
BUILTIN(__builtin_constant_p, "i.", "nctu")
BUILTIN(__builtin_classify_type, "i.", "nctu")
BUILTIN(__builtin___CFStringMakeConstantString, "FC*cC*", "nc")
@@ -542,6 +544,8 @@ BUILTIN(__builtin_bcmp, "ivC*vC*z", "Fn")
BUILTIN(__builtin_bcopy, "vv*v*z", "n")
BUILTIN(__builtin_bzero, "vv*z", "nF")
BUILTIN(__builtin_fprintf, "iP*cC*.", "Fp:1:")
+BUILTIN(__builtin_free, "vv*", "nF")
+BUILTIN(__builtin_malloc, "v*z", "nF")
BUILTIN(__builtin_memchr, "v*vC*iz", "nF")
BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF")
BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF")
@@ -577,6 +581,7 @@ BUILTIN(__builtin_wmemchr, "w*wC*wz", "nF")
BUILTIN(__builtin_wmemcmp, "iwC*wC*z", "nF")
BUILTIN(__builtin_wmemcpy, "w*w*wC*z", "nF")
BUILTIN(__builtin_wmemmove, "w*w*wC*z", "nF")
+BUILTIN(__builtin_realloc, "v*v*z", "nF")
BUILTIN(__builtin_return_address, "v*IUi", "n")
BUILTIN(__builtin_extract_return_addr, "v*v*", "n")
BUILTIN(__builtin_frame_address, "v*IUi", "n")
@@ -586,8 +591,9 @@ BUILTIN(__builtin_longjmp, "vv**i", "r")
BUILTIN(__builtin_unwind_init, "v", "")
BUILTIN(__builtin_eh_return_data_regno, "iIi", "nc")
BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
-BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
+BUILTIN(__builtin_sprintf, "ic*cC*.", "nFP:1:")
BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
+BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
BUILTIN(__builtin_thread_pointer, "v*", "nc")
BUILTIN(__builtin_launder, "v*v*", "nt")
LANGBUILTIN(__builtin_is_constant_evaluated, "b", "n", CXX_LANG)
@@ -975,6 +981,8 @@ LIBBUILTIN(strtol, "LicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
LIBBUILTIN(strtoll, "LLicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
LIBBUILTIN(strtoul, "ULicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
LIBBUILTIN(strtoull, "ULLicC*c**i", "f", "stdlib.h", ALL_LANGUAGES)
+// C11 stdlib.h
+LIBBUILTIN(aligned_alloc, "v*zz", "f", "stdlib.h", ALL_LANGUAGES)
// C99 string.h
LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
LIBBUILTIN(memcmp, "ivC*vC*z", "f", "string.h", ALL_LANGUAGES)
@@ -1056,6 +1064,8 @@ LIBBUILTIN(longjmp, "vJi", "frT", "setjmp.h", ALL_LANGUAGES)
// all languages, because losing this attribute would result in miscompilation
// when these functions are used in non-GNU mode. PR16138.
LIBBUILTIN(alloca, "v*z", "f", "stdlib.h", ALL_GNU_LANGUAGES)
+// POSIX malloc.h
+LIBBUILTIN(memalign, "v*zz", "f", "malloc.h", ALL_GNU_LANGUAGES)
// POSIX string.h
LIBBUILTIN(memccpy, "v*v*vC*iz", "f", "string.h", ALL_GNU_LANGUAGES)
LIBBUILTIN(mempcpy, "v*v*vC*z", "f", "string.h", ALL_GNU_LANGUAGES)
@@ -1568,22 +1578,22 @@ BUILTIN(__builtin_nontemporal_store, "v.", "t")
BUILTIN(__builtin_nontemporal_load, "v.", "t")
// Coroutine intrinsics.
-BUILTIN(__builtin_coro_resume, "vv*", "")
-BUILTIN(__builtin_coro_destroy, "vv*", "")
-BUILTIN(__builtin_coro_done, "bv*", "n")
-BUILTIN(__builtin_coro_promise, "v*v*IiIb", "n")
-
-BUILTIN(__builtin_coro_size, "z", "n")
-BUILTIN(__builtin_coro_frame, "v*", "n")
-BUILTIN(__builtin_coro_noop, "v*", "n")
-BUILTIN(__builtin_coro_free, "v*v*", "n")
-
-BUILTIN(__builtin_coro_id, "v*Iiv*v*v*", "n")
-BUILTIN(__builtin_coro_alloc, "b", "n")
-BUILTIN(__builtin_coro_begin, "v*v*", "n")
-BUILTIN(__builtin_coro_end, "bv*Ib", "n")
-BUILTIN(__builtin_coro_suspend, "cIb", "n")
-BUILTIN(__builtin_coro_param, "bv*v*", "n")
+LANGBUILTIN(__builtin_coro_resume, "vv*", "", COR_LANG)
+LANGBUILTIN(__builtin_coro_destroy, "vv*", "", COR_LANG)
+LANGBUILTIN(__builtin_coro_done, "bv*", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_promise, "v*v*IiIb", "n", COR_LANG)
+
+LANGBUILTIN(__builtin_coro_size, "z", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_frame, "v*", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_noop, "v*", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_free, "v*v*", "n", COR_LANG)
+
+LANGBUILTIN(__builtin_coro_id, "v*Iiv*v*v*", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_alloc, "b", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_begin, "v*v*", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_end, "bv*Ib", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_suspend, "cIb", "n", COR_LANG)
+LANGBUILTIN(__builtin_coro_param, "bv*v*", "n", COR_LANG)
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Pipe functions.
// We need the generic prototype, since the packet type could be anything.
@@ -1636,8 +1646,8 @@ LANGBUILTIN(__builtin_load_halff, "fhC*", "nc", ALL_OCLC_LANGUAGES)
BUILTIN(__builtin_os_log_format_buffer_size, "zcC*.", "p:0:nut")
BUILTIN(__builtin_os_log_format, "v*v*cC*.", "p:0:nt")
-// OpenMP 4.0
-LANGBUILTIN(omp_is_initial_device, "i", "nc", OMP_LANG)
+// CUDA/HIP
+LANGBUILTIN(__builtin_get_device_side_mangled_name, "cC*.", "ncT", CUDA_LANG)
// Builtins for XRay
BUILTIN(__xray_customevent, "vcC*z", "")
@@ -1648,6 +1658,9 @@ BUILTIN(__builtin_ms_va_start, "vc*&.", "nt")
BUILTIN(__builtin_ms_va_end, "vc*&", "n")
BUILTIN(__builtin_ms_va_copy, "vc*&c*&", "n")
+// Arithmetic Fence: to prevent FP reordering and reassociation optimizations
+LANGBUILTIN(__arithmetic_fence, "v.", "t", ALL_LANGUAGES)
+
#undef BUILTIN
#undef LIBBUILTIN
#undef LANGBUILTIN
diff --git a/clang/include/clang/Basic/Builtins.h b/clang/include/clang/Basic/Builtins.h
index 15bfcf797917..cdaaee48c32d 100644
--- a/clang/include/clang/Basic/Builtins.h
+++ b/clang/include/clang/Basic/Builtins.h
@@ -36,6 +36,8 @@ enum LanguageID {
OCLC20_LANG = 0x20, // builtin for OpenCL C 2.0 only.
OCLC1X_LANG = 0x40, // builtin for OpenCL C 1.x only.
OMP_LANG = 0x80, // builtin requires OpenMP.
+ CUDA_LANG = 0x100, // builtin requires CUDA.
+ COR_LANG = 0x200, // builtin requires use of 'fcoroutine-ts' option.
ALL_LANGUAGES = C_LANG | CXX_LANG | OBJC_LANG, // builtin for all languages.
ALL_GNU_LANGUAGES = ALL_LANGUAGES | GNU_LANG, // builtin requires GNU mode.
ALL_MS_LANGUAGES = ALL_LANGUAGES | MS_LANG, // builtin requires MS mode.
@@ -178,10 +180,6 @@ public:
strchr(getRecord(ID).Type, 'A') != nullptr;
}
- /// Completely forget that the given ID was ever considered a builtin,
- /// e.g., because the user provided a conflicting signature.
- void forgetBuiltin(unsigned ID, IdentifierTable &Table);
-
/// If this is a library function that comes from a specific
/// header, retrieve that header name.
const char *getHeaderName(unsigned ID) const {
diff --git a/clang/include/clang/Basic/BuiltinsAArch64.def b/clang/include/clang/Basic/BuiltinsAArch64.def
index b35510f8b691..1dac5d2371d4 100644
--- a/clang/include/clang/Basic/BuiltinsAArch64.def
+++ b/clang/include/clang/Basic/BuiltinsAArch64.def
@@ -99,6 +99,20 @@ BUILTIN(__builtin_arm_tcommit, "v", "n")
BUILTIN(__builtin_arm_tcancel, "vWUIi", "n")
BUILTIN(__builtin_arm_ttest, "WUi", "nc")
+// Armv8.5-A FP rounding intrinsics
+BUILTIN(__builtin_arm_frint32zf, "ff", "")
+BUILTIN(__builtin_arm_frint32z, "dd", "")
+BUILTIN(__builtin_arm_frint64zf, "ff", "")
+BUILTIN(__builtin_arm_frint64z, "dd", "")
+BUILTIN(__builtin_arm_frint32xf, "ff", "")
+BUILTIN(__builtin_arm_frint32x, "dd", "")
+BUILTIN(__builtin_arm_frint64xf, "ff", "")
+BUILTIN(__builtin_arm_frint64x, "dd", "")
+
+// Armv8.5-A Random number generation intrinsics
+BUILTIN(__builtin_arm_rndr, "iWUi*", "n")
+BUILTIN(__builtin_arm_rndrrs, "iWUi*", "n")
+
// Armv8.7-A load/store 64-byte intrinsics
BUILTIN(__builtin_arm_ld64b, "vvC*WUi*", "n")
BUILTIN(__builtin_arm_st64b, "vv*WUiC*", "n")
diff --git a/clang/include/clang/Basic/BuiltinsAMDGPU.def b/clang/include/clang/Basic/BuiltinsAMDGPU.def
index 123a7ad212da..3570431d952c 100644
--- a/clang/include/clang/Basic/BuiltinsAMDGPU.def
+++ b/clang/include/clang/Basic/BuiltinsAMDGPU.def
@@ -9,6 +9,11 @@
// This file defines the AMDGPU-specific builtin function database. Users of
// this file must define the BUILTIN macro to make use of this information.
//
+// Note: (unsigned) long int type should be avoided in builtin definitions
+// since it has different size on Linux (64 bit) and Windows (32 bit).
+// (unsigned) long long int type should also be avoided, which is 64 bit for
+// C/C++/HIP but is 128 bit for OpenCL. Use `W` as width modifier in builtin
+// definitions since it is fixed for 64 bit.
//===----------------------------------------------------------------------===//
// The format of this database matches clang/Basic/Builtins.def.
@@ -44,12 +49,14 @@ BUILTIN(__builtin_amdgcn_grid_size_z, "Ui", "nc")
BUILTIN(__builtin_amdgcn_mbcnt_hi, "UiUiUi", "nc")
BUILTIN(__builtin_amdgcn_mbcnt_lo, "UiUiUi", "nc")
+TARGET_BUILTIN(__builtin_amdgcn_s_memtime, "WUi", "n", "s-memtime-inst")
+
//===----------------------------------------------------------------------===//
// Instruction builtins.
//===----------------------------------------------------------------------===//
BUILTIN(__builtin_amdgcn_s_getreg, "UiIi", "n")
BUILTIN(__builtin_amdgcn_s_setreg, "vIiUi", "n")
-BUILTIN(__builtin_amdgcn_s_getpc, "LUi", "n")
+BUILTIN(__builtin_amdgcn_s_getpc, "WUi", "n")
BUILTIN(__builtin_amdgcn_s_waitcnt, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_sendmsg, "vIiUi", "n")
BUILTIN(__builtin_amdgcn_s_sendmsghalt, "vIiUi", "n")
@@ -63,6 +70,7 @@ BUILTIN(__builtin_amdgcn_ds_gws_sema_v, "vUi", "n")
BUILTIN(__builtin_amdgcn_ds_gws_sema_br, "vUiUi", "n")
BUILTIN(__builtin_amdgcn_ds_gws_sema_p, "vUi", "n")
BUILTIN(__builtin_amdgcn_fence, "vUicC*", "n")
+BUILTIN(__builtin_amdgcn_groupstaticsize, "Ui", "n")
BUILTIN(__builtin_amdgcn_atomic_inc32, "UZiUZiD*UZiUicC*", "n")
BUILTIN(__builtin_amdgcn_atomic_inc64, "UWiUWiD*UWiUicC*", "n")
@@ -105,16 +113,15 @@ BUILTIN(__builtin_amdgcn_cubeid, "ffff", "nc")
BUILTIN(__builtin_amdgcn_cubesc, "ffff", "nc")
BUILTIN(__builtin_amdgcn_cubetc, "ffff", "nc")
BUILTIN(__builtin_amdgcn_cubema, "ffff", "nc")
-BUILTIN(__builtin_amdgcn_s_memtime, "LUi", "n")
BUILTIN(__builtin_amdgcn_s_sleep, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_incperflevel, "vIi", "n")
BUILTIN(__builtin_amdgcn_s_decperflevel, "vIi", "n")
-BUILTIN(__builtin_amdgcn_uicmp, "LUiUiUiIi", "nc")
-BUILTIN(__builtin_amdgcn_uicmpl, "LUiLUiLUiIi", "nc")
-BUILTIN(__builtin_amdgcn_sicmp, "LUiiiIi", "nc")
-BUILTIN(__builtin_amdgcn_sicmpl, "LUiLiLiIi", "nc")
-BUILTIN(__builtin_amdgcn_fcmp, "LUiddIi", "nc")
-BUILTIN(__builtin_amdgcn_fcmpf, "LUiffIi", "nc")
+BUILTIN(__builtin_amdgcn_uicmp, "WUiUiUiIi", "nc")
+BUILTIN(__builtin_amdgcn_uicmpl, "WUiWUiWUiIi", "nc")
+BUILTIN(__builtin_amdgcn_sicmp, "WUiiiIi", "nc")
+BUILTIN(__builtin_amdgcn_sicmpl, "WUiWiWiIi", "nc")
+BUILTIN(__builtin_amdgcn_fcmp, "WUiddIi", "nc")
+BUILTIN(__builtin_amdgcn_fcmpf, "WUiffIi", "nc")
BUILTIN(__builtin_amdgcn_ds_swizzle, "iiIi", "nc")
BUILTIN(__builtin_amdgcn_ds_permute, "iii", "nc")
BUILTIN(__builtin_amdgcn_ds_bpermute, "iii", "nc")
@@ -140,9 +147,9 @@ BUILTIN(__builtin_amdgcn_sad_u8, "UiUiUiUi", "nc")
BUILTIN(__builtin_amdgcn_msad_u8, "UiUiUiUi", "nc")
BUILTIN(__builtin_amdgcn_sad_hi_u8, "UiUiUiUi", "nc")
BUILTIN(__builtin_amdgcn_sad_u16, "UiUiUiUi", "nc")
-BUILTIN(__builtin_amdgcn_qsad_pk_u16_u8, "LUiLUiUiLUi", "nc")
-BUILTIN(__builtin_amdgcn_mqsad_pk_u16_u8, "LUiLUiUiLUi", "nc")
-BUILTIN(__builtin_amdgcn_mqsad_u32_u8, "V4UiLUiUiV4Ui", "nc")
+BUILTIN(__builtin_amdgcn_qsad_pk_u16_u8, "WUiWUiUiWUi", "nc")
+BUILTIN(__builtin_amdgcn_mqsad_pk_u16_u8, "WUiWUiUiWUi", "nc")
+BUILTIN(__builtin_amdgcn_mqsad_u32_u8, "V4UiWUiUiV4Ui", "nc")
//===----------------------------------------------------------------------===//
// CI+ only builtins.
@@ -177,10 +184,11 @@ TARGET_BUILTIN(__builtin_amdgcn_frexp_manth, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_frexp_exph, "sh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_fracth, "hh", "nc", "16-bit-insts")
TARGET_BUILTIN(__builtin_amdgcn_classh, "bhi", "nc", "16-bit-insts")
-TARGET_BUILTIN(__builtin_amdgcn_s_memrealtime, "LUi", "n", "s-memrealtime")
+TARGET_BUILTIN(__builtin_amdgcn_s_memrealtime, "WUi", "n", "s-memrealtime")
TARGET_BUILTIN(__builtin_amdgcn_mov_dpp, "iiIiIiIiIb", "nc", "dpp")
TARGET_BUILTIN(__builtin_amdgcn_update_dpp, "iiiIiIiIiIb", "nc", "dpp")
TARGET_BUILTIN(__builtin_amdgcn_s_dcache_wb, "v", "n", "gfx8-insts")
+TARGET_BUILTIN(__builtin_amdgcn_perm, "UiUiUiUi", "nc", "gfx8-insts")
//===----------------------------------------------------------------------===//
// GFX9+ only builtins.
@@ -192,13 +200,13 @@ TARGET_BUILTIN(__builtin_amdgcn_fmed3h, "hhhh", "nc", "gfx9-insts")
// Deep learning builtins.
//===----------------------------------------------------------------------===//
-TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot2-insts")
+TARGET_BUILTIN(__builtin_amdgcn_fdot2, "fV2hV2hfIb", "nc", "dot7-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot2, "SiV2SsV2SsSiIb", "nc", "dot2-insts")
TARGET_BUILTIN(__builtin_amdgcn_udot2, "UiV2UsV2UsUiIb", "nc", "dot2-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot4, "SiSiSiSiIb", "nc", "dot1-insts")
-TARGET_BUILTIN(__builtin_amdgcn_udot4, "UiUiUiUiIb", "nc", "dot2-insts")
+TARGET_BUILTIN(__builtin_amdgcn_udot4, "UiUiUiUiIb", "nc", "dot7-insts")
TARGET_BUILTIN(__builtin_amdgcn_sdot8, "SiSiSiSiIb", "nc", "dot1-insts")
-TARGET_BUILTIN(__builtin_amdgcn_udot8, "UiUiUiUiIb", "nc", "dot2-insts")
+TARGET_BUILTIN(__builtin_amdgcn_udot8, "UiUiUiUiIb", "nc", "dot7-insts")
//===----------------------------------------------------------------------===//
// GFX10+ only builtins.
@@ -208,9 +216,20 @@ TARGET_BUILTIN(__builtin_amdgcn_permlanex16, "UiUiUiUiUiIbIb", "nc", "gfx10-inst
TARGET_BUILTIN(__builtin_amdgcn_mov_dpp8, "UiUiIUi", "nc", "gfx10-insts")
//===----------------------------------------------------------------------===//
+// Raytracing builtins.
+// By default the 1st argument is i32 and the 4/5-th arguments are float4.
+// Postfix l indicates the 1st argument is i64.
+// Postfix h indicates the 4/5-th arguments are half4.
+//===----------------------------------------------------------------------===//
+TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray, "V4UiUifV4fV4fV4fV4Ui", "nc", "gfx10-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_h, "V4UiUifV4fV4hV4hV4Ui", "nc", "gfx10-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_l, "V4UiWUifV4fV4fV4fV4Ui", "nc", "gfx10-insts")
+TARGET_BUILTIN(__builtin_amdgcn_image_bvh_intersect_ray_lh, "V4UiWUifV4fV4hV4hV4Ui", "nc", "gfx10-insts")
+
+//===----------------------------------------------------------------------===//
// Special builtins.
//===----------------------------------------------------------------------===//
-BUILTIN(__builtin_amdgcn_read_exec, "LUi", "nc")
+BUILTIN(__builtin_amdgcn_read_exec, "WUi", "nc")
BUILTIN(__builtin_amdgcn_read_exec_lo, "Ui", "nc")
BUILTIN(__builtin_amdgcn_read_exec_hi, "Ui", "nc")
@@ -258,5 +277,13 @@ TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_4x4x2bf16, "V4fV2sV2sV4fIiIiIi", "nc",
TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x4bf16, "V16fV2sV2sV16fIiIiIi", "nc", "mai-insts")
TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x8bf16, "V4fV2sV2sV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x4bf16_1k, "V32fV4sV4sV32fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x4bf16_1k, "V16fV4sV4sV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_4x4x4bf16_1k, "V4fV4sV4sV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_32x32x8bf16_1k, "V16fV4sV4sV16fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f32_16x16x16bf16_1k, "V4fV4sV4sV4fIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f64_16x16x4f64, "V4dddV4dIiIiIi", "nc", "mai-insts")
+TARGET_BUILTIN(__builtin_amdgcn_mfma_f64_4x4x4f64, "ddddIiIiIi", "nc", "mai-insts")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsHexagon.def b/clang/include/clang/Basic/BuiltinsHexagon.def
index 28aa222166f5..0001bd556117 100644
--- a/clang/include/clang/Basic/BuiltinsHexagon.def
+++ b/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -17,8 +17,10 @@
# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
#endif
+#pragma push_macro("V68")
+#define V68 "v68"
#pragma push_macro("V67")
-#define V67 "v67"
+#define V67 "v67|" V68
#pragma push_macro("V66")
#define V66 "v66|" V67
#pragma push_macro("V65")
@@ -32,8 +34,10 @@
#pragma push_macro("V5")
#define V5 "v5|" V55
+#pragma push_macro("HVXV68")
+#define HVXV68 "hvxv68"
#pragma push_macro("HVXV67")
-#define HVXV67 "hvxv67"
+#define HVXV67 "hvxv67|" HVXV68
#pragma push_macro("HVXV66")
#define HVXV66 "hvxv66|" HVXV67
#pragma push_macro("HVXV65")
@@ -123,6 +127,7 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","", "
#pragma pop_macro("HVXV65")
#pragma pop_macro("HVXV66")
#pragma pop_macro("HVXV67")
+#pragma pop_macro("HVXV68")
#pragma pop_macro("V5")
#pragma pop_macro("V55")
@@ -131,6 +136,7 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_rtt_acc_128B,"V64iV64iV32iLLi","", "
#pragma pop_macro("V65")
#pragma pop_macro("V66")
#pragma pop_macro("V67")
+#pragma pop_macro("V68")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsHexagonDep.def b/clang/include/clang/Basic/BuiltinsHexagonDep.def
index b694e4c35d3b..152c9c4dd8ad 100644
--- a/clang/include/clang/Basic/BuiltinsHexagonDep.def
+++ b/clang/include/clang/Basic/BuiltinsHexagonDep.def
@@ -11,96 +11,389 @@
// V5 Scalar Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeq, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgt, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsset, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclr, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsset, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclr, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgti, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgei, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmplt, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpltu, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclri, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclri, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpltei, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteui, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneq, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplte, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteu, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_and, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_or, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_xor, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_andn, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_not, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_orn, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_and_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_and_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_or_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_or_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_and_andn, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_and_orn, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_or_andn, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C4_or_orn, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_any8, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_all8, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_muxii, "iiIiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_muxir, "iiiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_muxri, "iiIii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_add, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_and, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_andir, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiIiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_max, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_min, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_not, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_or, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_orir, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_roundsat, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subri, "iIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrih, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfril, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrsi, "iIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddb_map, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpbeq, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeqi, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeq_any, "iLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgtui, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgt, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubb_map, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_bitsplit, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_bitspliti, "LLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_boundscheck, "iiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbeq, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbeqi, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtu, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtui, "iiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgt, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgti, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpheqi, "iLLiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgti, "iLLiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgtui, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpbgtui, "iiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpheq, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgt, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgtu, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmpheqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgt, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgti, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgtu, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_cmphgtui, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_tlbmatch, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeq_any, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbeqi, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgt, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpbgtui, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpheqi, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgti, "iLLiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmphgtui, "iLLiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpweqi, "iLLiIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpwgti, "iLLiIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_A4_vcmpwgtui, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_boundscheck, "iiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_tlbmatch, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuh, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminw, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_A5_vaddhubs, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_all8, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_and, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_andn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_any8, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsclri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_bitsset, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgei, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgti, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmplt, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_cmpltu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxii, "iiIiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxir, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_muxri, "iiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_not, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_or, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_orn, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "ii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_C2_tfrpr, "ii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_C2_tfrrp, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C2_xor, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_and_orn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplte, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpltei, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteu, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmplteui, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneq, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "iiIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_C4_fastcorner9, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclr, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsclri, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_nbitsset, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_andn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_C4_or_orn, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2df, "dLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2sf, "fLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d_chop, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2sf, "fd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud_chop, "LLid", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw_chop, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w_chop, "id", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d_chop, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2df, "df", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud_chop, "LLif", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw_chop, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w_chop, "if", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2df, "dLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2sf, "fLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2df, "di", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2sf, "fi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2df, "di", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2sf, "fi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfclass, "idUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpeq, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpge, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpgt, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpuo, "idd", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_n, "dUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_p, "dUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfadd, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfclass, "ifUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpeq, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpge, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpgt, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpuo, "iff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupd, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupn, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupr, "ff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_lib, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_sc, "ffffi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms_lib, "ffff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_n, "fUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_p, "fUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmax, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmin, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmpy, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_sfsub, "fff", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_accii, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_macsin, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_macsip, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0, "iiii", "", V5)
@@ -109,14 +402,6 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0, "iiii", "", V5)
@@ -125,14 +410,6 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0, "iii", "", V5)
@@ -141,14 +418,22 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0, "iii", "", V5)
@@ -157,6 +442,14 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0, "iii", "", V5)
@@ -165,6 +458,9 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1_sat, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0, "LLiLLiii", "", V5)
@@ -173,14 +469,6 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0, "LLiii", "", V5)
@@ -189,6 +477,14 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0, "LLiii", "", V5)
@@ -197,6 +493,9 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysu_up, "iii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0, "iiii", "", V5)
@@ -205,14 +504,6 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0, "Uiii", "", V5)
@@ -221,6 +512,15 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1, "Uiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0, "LLiLLiii", "", V5)
@@ -229,14 +529,6 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0, "ULLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1, "ULLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0, "ULLiii", "", V5)
@@ -245,632 +537,340 @@ TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0, "ULLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1, "ULLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0, "ULLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1, "ULLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_macsip, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_macsin, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpy_up_s1_sat, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpysu_up, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mac_up_s1_sat, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_nac_up_s1_sat, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_mpyui, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_accii, "iiiIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_nacci, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_naccii, "iiiIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_subacc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addr, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr_u2, "iiUIii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addi, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addi, "iUIiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s1, "LLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2su_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2su_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybuu, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbuu, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybsu, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbsu, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybuu, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybsu, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbuu, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbsu, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmpybsu, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmacbsu, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyl_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_hmmpyh_s1, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s0, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vmpy2su_s1, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vraddh, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_wh, "iLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_wh, "iLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_whc, "iLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_whc, "iLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate_acc, "LLiLLiLLiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate, "LLiLLiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vcnegh, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vrcnegh, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw_acc, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh_acc, "LLiLLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_add, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_max, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_min, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrsi, "iIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiIii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiIiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfril, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_tfrih, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_and, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_or, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_not, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M2_xor_xacc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiIii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_and_and, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_and_andn, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_and_or, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_and_xor, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_wh, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyi_whc, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_wh, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_cmpyr_whc, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mac_up_s1_sat, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addi, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyri_addr_u2, "iiUIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addi, "iUIiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_mpyrr_addr, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_nac_up_s1_sat, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_or_and, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_or_andn, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_or_or, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_or_xor, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_pmpyw_acc, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vpmpyh_acc, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_acc_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyeh_s1, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s0, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_acc_s1, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s0, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_vrmpyoh_s1, "LLiLLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_andn, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_subri, "iIii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_andir, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_orir, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_roundsat, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddb_map, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A5_vaddhubs, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubhr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddhr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vraddh, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubb_map, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminh, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxh, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuh, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuh, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminw, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxw, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrminuw, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_vrmaxuw, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminb, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxb, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfadd, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfsub, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmpy, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma, "ffff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_sc, "ffffi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms, "ffff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffma_lib, "ffff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffms_lib, "ffff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpeq, "iff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpgt, "iff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpge, "iff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfcmpuo, "iff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmax, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfmin, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfclass, "ifUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_p, "fUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sfimm_n, "fUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupn, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupd, "fff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_sffixupr, "ff", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpeq, "idd", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpgt, "idd", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpge, "idd", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfcmpuo, "idd", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfclass, "idUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_p, "dUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfimm_n, "dUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2df, "df", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2sf, "fd", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2sf, "fi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_uw2df, "di", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2sf, "fi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_w2df, "di", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2sf, "fLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_ud2df, "dLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2sf, "fLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_d2df, "dLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw, "if", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w, "if", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud, "LLif", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d, "LLif", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw, "id", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w, "id", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud, "LLid", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d, "LLid", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2uw_chop, "if", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2w_chop, "if", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2ud_chop, "LLif", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_sf2d_chop, "LLif", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2uw_chop, "id", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2w_chop, "id", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2ud_chop, "LLid", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_conv_df2d_chop, "LLid", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmacbsu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vdmpybsu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbsu, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmacbuu, "LLiLLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybsu, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vmpybuu, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbsu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmacbuu, "LLiLLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybsu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_M5_vrmpybuu, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_xor, "LLiLLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_p_xor, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_xor, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_xor, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd, "iiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_lsli, "iIii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_asl_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_asl_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_asl_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_asl_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_lsr_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_lsr_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_lsr_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_lsr_ri, "iUIiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_insert, "iiiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_bitspliti, "LLiiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A4_bitsplit, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_extract, "iiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_brevp, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1p, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_extractu, "iiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiUIiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp, "LLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_extractup, "LLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insert, "iiiUIiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_insert_rp, "iiiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_extract_rp, "iiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiUIiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_insertp_rp, "LLiLLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp_rp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_i, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_sat, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLiUIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_xor, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw, "LLiLLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vcnegh, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vrcnegh, "LLiLLiLLii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vrndpackwh, "iLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathb, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack, "LLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S5_popcountp, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_parity, "iii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_addi_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_andi_lsr_ri, "iUIiiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S4_clbaddi, "iiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S4_clbpnorm, "iLLi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_S4_clbpaddi, "iLLiIi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_brevp, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_ct0p, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_ct1p, "iLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_Y2_dcfetch, "vv*", "", V5)
-TARGET_BUILTIN(__builtin_HEXAGON_Y2_dczeroa, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_clbpnorm, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extract, "iiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extract_rp, "iiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp, "LLiLLiUIiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_extractp_rp, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_lsli, "iIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_i, "iiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ntstbit_r, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiiIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_ori_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_parity, "iii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiIii", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_asl_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_subi_lsr_ri, "iUIiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate, "LLiLLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vrcrotate_acc, "LLiLLiLLiiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxaddsubw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddh, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddhr, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S4_vxsubaddw, "LLiLLiLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_asrhub_sat, "iLLiUIi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_popcountp, "iLLi", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_S5_vasrhrnd_goodsyntax, "LLiLLiUIi", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_Y2_dccleana, "vv*", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_Y2_dccleaninva, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dcfetch, "vv*", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_Y2_dcinva, "vv*", "", V5)
+TARGET_BUILTIN(__builtin_HEXAGON_Y2_dczeroa, "vv*", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_Y4_l2fetch, "vv*i", "", V5)
TARGET_BUILTIN(__builtin_HEXAGON_Y5_l2fetch, "vv*LLi", "", V5)
// V60 Scalar Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r, "iiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p, "LLiLLiUIi", "", V60)
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_acc, "iiiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_acc, "LLiLLiLLiUIi", "", V60)
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_nac, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_and, "LLiLLiLLiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_nac, "LLiLLiLLiUIi", "", V60)
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_xacc, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_or, "LLiLLiLLiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_xacc, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r, "iiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_acc, "iiiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_and, "iiiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_nac, "iiiUIi", "", V60)
TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_or, "iiiUIi", "", V60)
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_and, "LLiLLiLLiUIi", "", V60)
-TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_p_or, "LLiLLiLLiUIi", "", V60)
+TARGET_BUILTIN(__builtin_HEXAGON_S6_rol_i_r_xacc, "iiiUIi", "", V60)
// V62 Scalar Instructions.
@@ -886,288 +886,201 @@ TARGET_BUILTIN(__builtin_HEXAGON_A6_vcmpbeq_notany, "iLLiLLi", "", V65)
// V66 Scalar Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_M2_mnaci, "iiii", "", V66)
TARGET_BUILTIN(__builtin_HEXAGON_F2_dfadd, "ddd", "", V66)
TARGET_BUILTIN(__builtin_HEXAGON_F2_dfsub, "ddd", "", V66)
+TARGET_BUILTIN(__builtin_HEXAGON_M2_mnaci, "iiii", "", V66)
TARGET_BUILTIN(__builtin_HEXAGON_S2_mask, "iUIiUIi", "", V66)
// V67 Scalar Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw, "LLiLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw_acc, "LLiLLiLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc, "LLiLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_clip, "iiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_ri, "LLiLLiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_rr, "LLiLLii", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_A7_vclip, "LLiLLiUIi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmax, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmin, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyfix, "ddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyhh, "dddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpylh, "dddd", "", V67)
+TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyll, "ddd", "", V67)
TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiw, "LLiLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiw_acc, "LLiLLiLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiwc, "LLiLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyiwc_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrw_acc, "LLiLLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc, "LLiLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_dcmpyrwc_acc, "LLiLLiLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_vdmpy, "LLiLLiLLi", "", V67)
TARGET_BUILTIN(__builtin_HEXAGON_M7_vdmpy_acc, "LLiLLiLLiLLi", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrw, "iLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrwc, "iLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiw, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiw_rnd, "iLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiwc, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiwc_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrw, "iLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrw_rnd, "iLLiLLi", "", "audio")
+TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrwc, "iLLiLLi", "", "audio")
TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyrwc_rnd, "iLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiw_rnd, "iLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_M7_wcmpyiwc_rnd, "iLLiLLi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_ri, "LLiLLiUIi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_A7_croundd_rr, "LLiLLii", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_A7_clip, "iiUIi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_A7_vclip, "LLiLLiUIi", "", "audio")
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmax, "ddd", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmin, "ddd", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyfix, "ddd", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyll, "ddd", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpylh, "dddd", "", V67)
-TARGET_BUILTIN(__builtin_HEXAGON_F2_dfmpyhh, "dddd", "", V67)
+
+// V68 Scalar Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmlink, "vv*v*", "", V68)
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmpause, "i", "", V68)
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmpoll, "i", "", V68)
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmresume, "vv*", "", V68)
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmstart, "vv*", "", V68)
+TARGET_BUILTIN(__builtin_HEXAGON_Y6_dmwait, "i", "", V68)
// V60 HVX Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai, "vV64bv*V16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw, "iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw_128B, "iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_hi, "V16iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_hi_128B, "V32iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lo, "V16iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lo_128B, "V32iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw, "V16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw_128B, "V32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not, "V64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not_128B, "V128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n_128B, "V128bV128bV128b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2, "V64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2_128B, "V128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor, "V64bV64bV64b", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor_128B, "V128bV128bV128b", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai, "vV64bv*V16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nqpred_ai_128B, "vV128bv*V32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai, "vV64bv*V16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai, "vV64bv*V16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai_128B, "vV128bv*V32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi, "V16iV16iV16iUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi, "V16iV16iV16iUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vror, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vror_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob, "V32iV32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob_128B, "V64iV64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh, "V32iV32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh_128B, "V64iV64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh, "V32iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh_128B, "V64iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat, "V16iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_128B, "V32iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc, "V16iV16iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat, "V16iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "V32iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "V16iV16iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi, "V32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_128B, "V64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi, "V32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_128B, "V64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc, "V32iV32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi, "V32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_128B, "V64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai, "vV64bv*V16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vS32b_qpred_ai_128B, "vV128bv*V32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat_128B, "V32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_dv, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddb_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq_128B, "V32iV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_dv, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddh_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_128B, "V64iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubsat_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv_128B, "V64iV64iV64i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddw_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq_128B, "V32iV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwsat_dv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi, "V16iV16iV16iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_valignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vand, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vand_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt, "V16iV64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_128B, "V32iV128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc, "V16iV16iV64bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc_128B, "V32iV32iV128bi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt, "V64bV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_128B, "V128bV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc, "V64bV64bV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc_128B, "V128bV128bV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslw_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhubsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrw_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwh_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhrndsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwhsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhsat_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp_128B, "V64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgub, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgub_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgubrnd, "V16iV16iV16i", "", HVXV60)
@@ -1176,208 +1089,76 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguh, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguhrnd, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguhrnd_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavghrnd_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgw, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgw_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgwrnd, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgwrnd_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffub_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsdiffw_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine_128B, "V64iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vd0, "V16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vd0_128B, "V32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsh_sat_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsw_sat_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv_128B, "V64iV64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc, "V32iV32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc, "V16iV16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub, "V32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_128B, "V64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc, "V32iV32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc_128B, "V64iV64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus, "V32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_128B, "V64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc, "V32iV32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc_128B, "V64iV64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_128B, "V64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc_128B, "V64iV64iV64ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh, "V32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_128B, "V64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc, "V32iV32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "V64iV64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh, "V32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_128B, "V64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc, "V32iV32iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc_128B, "V64iV64iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vand, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vand_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vor, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vor_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt, "V16iV64bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_128B, "V32iV128bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc, "V16iV16iV64bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandqrt_acc_128B, "V32iV32iV128bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt, "V64bV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_128B, "V128bV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc, "V64bV64bV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvrt_acc_128B, "V128bV128bV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd, "V32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd_128B, "V64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpybus_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhb_dv_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat, "V16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_128B, "V32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc, "V16iV16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsat_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat, "V16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_128B, "V32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc, "V16iV16iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsuisat_acc_128B, "V32iV32iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhsusat_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdmpyhvsat_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vdsaduh_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh, "V64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_128B, "V128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_and, "V64bV64bV16iV16i", "", HVXV60)
@@ -1386,6 +1167,14 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_or, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_or_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_xor, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_veqh_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_veqw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb, "V64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_128B, "V128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_and, "V64bV64bV16iV16i", "", HVXV60)
@@ -1394,30 +1183,14 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_or, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_or_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_xor, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtb_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_veqb_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh, "V64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_128B, "V128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor, "V64bV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgth_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub, "V64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_128B, "V128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_and, "V64bV64bV16iV16i", "", HVXV60)
@@ -1426,296 +1199,543 @@ TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_or, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_or_128B, "V128bV128bV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_xor, "V64bV64bV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtub_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or, "V64bV64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_128B, "V128bV128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and, "V64bV64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_128B, "V128bV128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not, "V64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_not_128B, "V128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor, "V64bV64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_xor_128B, "V128bV128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n, "V64bV64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_and_n_128B, "V128bV128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n, "V64bV64bV64b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_or_n_128B, "V128bV128bV128b", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2, "V64bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2_128B, "V128bi", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux, "V16iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux_128B, "V32iV128bV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap, "V32iV64bV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap_128B, "V64iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuh_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtuw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw, "V64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_128B, "V128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_and_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_or_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor, "V64bV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgtw_xor_128B, "V128bV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi, "V16iV16iV16iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlalignbi_128B, "V32iV32iV32iUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrhv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrw_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrwv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc, "V16iV16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "V32iV32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh, "V32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_128B, "V64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc, "V32iV32iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "V64iV64iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxub, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxub_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxuh, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxw, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vminuh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vminw, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vminw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabus_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabusv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuuv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybus_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybusv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpybv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsat_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhsrs_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhss_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhus_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyhvsrs_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyieoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiewuh_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyih_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyihb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiowh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwb_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwh_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_sacc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyub_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyubv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh, "V32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_128B, "V64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc, "V32iV32iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuh_acc_128B, "V64iV64iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc, "V32iV32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhv_acc_128B, "V64iV64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmux_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnot_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vor, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vor_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackeh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhb_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackhub_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackob_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwh_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpackwuh_sat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth_128B, "V32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybus_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybusv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpybv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc, "V16iV16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyub_acc_128B, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc, "V16iV16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrmpyubv_acc_128B, "V32iV32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vror, "V16iV16ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vror_128B, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundhub_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vroundwuh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi, "V32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_128B, "V64iV64iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc, "V32iV32iV32iiUIi", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrsadubi_acc_128B, "V64iV64iV64iiUIi", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsathub, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsathub_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatwh, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatwh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb_128B, "V32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffeb, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffeb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh, "V16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh_128B, "V32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffob, "V16iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffob_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufeh_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh_128B, "V32iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffvdd, "V32iV16iV16ii", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffvdd_128B, "V64iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd, "V32iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealvdd_128B, "V64iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh_128B, "V64iV32iV32i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeb, "V32iV16iV16i", "", HVXV60)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeb_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealh_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdealb4w_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffh_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vshuffb_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw, "iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_extractw_128B, "iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr, "V16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vinsertwr_128B, "V32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw, "V16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatw_128B, "V32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vassignp_128B, "V64iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vassign_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine, "V32iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcombine_128B, "V64iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vdelta_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta, "V16iV16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrdelta_128B, "V32iV32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0w_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vcl0h_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamtw_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnormamth_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth, "V16iV16i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vpopcounth_128B, "V32iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb, "V16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_128B, "V32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc, "V16iV16iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracc_128B, "V32iV32iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh, "V32iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_128B, "V64iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc, "V32iV32iV16iV16ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracc_128B, "V64iV64iV32iV32ii", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_hi, "V16iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_hi_128B, "V32iV64i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lo, "V16iV32i", "", HVXV60)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lo_128B, "V32iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoeh_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vshufoh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubb_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubh_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububh_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsububsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw, "V32iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuhw_128B, "V64iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubw_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwnq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq, "V16iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwq_128B, "V32iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubwsat_dv_128B, "V64iV64iV64i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap, "V32iV64bV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vswap_128B, "V64iV128bV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpybus_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb, "V32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_128B, "V64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc, "V32iV32iV32ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vtmpyhb_acc_128B, "V64iV64iV64ii", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob, "V32iV32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackob_128B, "V64iV64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh, "V32iV32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackoh_128B, "V64iV64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackub_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vunpackuh_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor, "V16iV16iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vxor_128B, "V32iV32iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzb_128B, "V64iV32i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh, "V32iV16i", "", HVXV60)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vzh_128B, "V64iV32i", "", HVXV60)
// V62 HVX Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb, "V16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb_128B, "V32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat, "V16iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat, "V16iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat, "V16iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat_128B, "V32iV32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb, "V16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb_128B, "V32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath, "V16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath_128B, "V32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2, "V64bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2_128B, "V128bi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh, "V64bV64bV64b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh_128B, "V128bV128bV128b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw, "V64bV64bV64b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw_128B, "V128bV128bV128b", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat, "V16iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_128B, "V32iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv, "V32iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddbsat_dv_128B, "V64iV64iV64i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv_128B, "V64iV64iV64i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarry, "V16iV16iV16iv*", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarry_128B, "V32iV32iV32iv*", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry, "V16iV16iV16iv*", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry_128B, "V32iV32iV32iv*", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw_128B, "V32iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc, "V32iV32iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddhw_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc, "V32iV32iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc, "V32iV32iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddubh_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64, "V32iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64_128B, "V64iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc, "V32iV32iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb, "V32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_128B, "V64iV64ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc, "V32iV32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc_128B, "V64iV64iV64ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub, "V16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_128B, "V32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc, "V16iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddububb_sat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduhw_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vadduwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt, "V16iV64bi", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_128B, "V32iV128bi", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc, "V16iV16iV64bi", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandnqrt_acc_128B, "V32iV32iV128bi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv, "V16iV64bV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv_128B, "V32iV128bV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvnqv, "V16iV64bV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvnqv_128B, "V32iV128bV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2, "V64bi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_pred_scalar2v2_128B, "V128bi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw, "V64bV64bV64b", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqw_128B, "V128bV128bV128b", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh, "V64bV64bV64b", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_shuffeqh_128B, "V128bV128bV128b", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv, "V16iV64bV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vandvqv_128B, "V32iV128bV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrhbsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrwuhrndsat_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb, "V16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlsrb_128B, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci, "V16iV16iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "V32iV32iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi, "V16iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi_128B, "V32iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm, "V32iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm_128B, "V64iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci, "V32iV32iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "V64iV64iV32iV32iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi, "V32iV16iV16iUIi", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi_128B, "V64iV32iV32iUIi", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxb, "V16iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmaxb_128B, "V32iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vminb, "V16iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vminb_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_128B, "V64iV64ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhb_acc_128B, "V64iV64iV64ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64, "V32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyewuh_64_128B, "V64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub, "V16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_128B, "V32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc, "V16iV16iV16ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyiwub_acc_128B, "V32iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc, "V32iV32iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyowh_64_acc_128B, "V64iV64iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduhub_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrounduwuh_128B, "V32iV32iV32i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatuwuh, "V16iV16iV16i", "", HVXV62)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatuwuh_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath, "V16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplath_128B, "V32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb, "V16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_lvsplatb_128B, "V32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbw_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh, "V16iV16iV16i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddclbh_128B, "V32iV32iV32i", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi, "V16iV16iV16iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvbi_128B, "V32iV32iV32iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci, "V16iV16iV16iV16iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_oracci_128B, "V32iV32iV32iV32iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi, "V32iV16iV16iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwhi_128B, "V64iV32iV32iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci, "V32iV32iV16iV16iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_oracci_128B, "V64iV64iV32iV32iUIi", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm, "V16iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvvb_nm_128B, "V32iV32iV32ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm, "V32iV16iV16ii", "", HVXV62)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlutvwh_nm_128B, "V64iV32iV32ii", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubbsat_dv_128B, "V64iV64iV64i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry, "V16iV16iV16iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubcarry_128B, "V32iV32iV32iv*", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubububb_sat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat, "V16iV16iV16i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_128B, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv, "V32iV32iV32i", "", HVXV62)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vsubuwsat_dv_128B, "V64iV64iV64i", "", HVXV62)
// V65 HVX Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat, "V16iV16iV16ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat_128B, "V32iV32iV32ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat, "V16iV16iV16ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat_128B, "V32iV32iV32ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat, "V16iV16iV16ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb, "V16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_128B, "V32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat, "V16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat_128B, "V32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_acc, "V16iV16iV16ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaslh_acc_128B, "V32iV32iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_acc, "V16iV16iV16ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vasrh_acc_128B, "V32iV32iV32ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw, "V16iV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw_128B, "V32iV32iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd, "V16iV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubrndsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruhubsat_128B, "V32iV32iV32ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat, "V16iV16iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasruwuhsat_128B, "V32iV32iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgb, "V16iV16iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgb_128B, "V32iV32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgbrnd, "V16iV16iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vavgbrnd_128B, "V32iV32iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb, "V16iV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguw_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vavguwrnd_128B, "V32iV32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vdd0, "V32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vdd0_128B, "V64i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb, "V16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_128B, "V32iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat, "V16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vabsb_sat_128B, "V32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh, "vv*iiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh_128B, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq, "vv*V64biiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq_128B, "vv*V128biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw_128B, "vv*iiV64i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq, "vv*V64biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq_128B, "vv*V128biiV64i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw, "vv*iiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw_128B, "vv*iiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq, "vv*V64biiV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq_128B, "vv*V128biiV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4, "V16iV16iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4_128B, "V32iV32iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu, "V32iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_128B, "V64iV64ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc, "V32iV32iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpabuu_acc_128B, "V64iV64iV64ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc, "V32iV32iV16ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc_128B, "V64iV64iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahhsat, "V16iV16iV16iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpahhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat, "V16iV16iV16iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpauhuhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat, "V16iV16iV16iLLi", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpsuhuhsat_128B, "V32iV32iV32iLLi", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4, "V16iV16iLLi", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vlut4_128B, "V32iV32iLLi", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc, "V32iV32iV16ii", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyh_acc_128B, "V64iV64iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe, "V16iV16ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_128B, "V32iV32ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc, "V16iV16iV16ii", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vmpyuhe_acc_128B, "V32iV32iV32ii", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw, "vv*iiV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermw_128B, "vv*iiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh, "vv*iiV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermh_128B, "vv*iiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw, "vv*iiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhw_128B, "vv*iiV64i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq, "vv*V64biiV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermwq_128B, "vv*V128biiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq, "vv*V64biiV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhq_128B, "vv*V128biiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq, "vv*V64biiV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vgathermhwq_128B, "vv*V128biiV64i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw, "viiV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb, "V16iV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vnavgb_128B, "V32iV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb_128B, "V32iV128b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh_128B, "V32iV128b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw, "V16iV64b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw_128B, "V32iV128b", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh, "viiV16iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_128B, "viiV32iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add, "viiV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add_128B, "viiV32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_add, "viiV16iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermh_add_128B, "viiV32iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq, "vV64biiV16iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq_128B, "vV128biiV32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhq, "vV64biiV16iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhq_128B, "vV128biiV32iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw, "viiV32iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_128B, "viiV64iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq, "vV64biiV32iV16i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq_128B, "vV128biiV64iV32i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add, "viiV32iV16i", "", HVXV65)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhw_add_128B, "viiV64iV32i", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb, "V16iV64b", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqb_128B, "V32iV128b", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh, "V16iV64b", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqh_128B, "V32iV128b", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw, "V16iV64b", "", HVXV65)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vprefixqw_128B, "V32iV128b", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq, "vV64biiV32iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermhwq_128B, "vV128biiV64iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add, "viiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermw_add_128B, "viiV32iV32i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq, "vV64biiV16iV16i", "", HVXV65)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vscattermwq_128B, "vV128biiV32iV32i", "", HVXV65)
// V66 HVX Instructions.
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr, "V16iV16iV16i", "", HVXV66)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr_128B, "V32iV32iV32i", "", HVXV66)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into, "V32iV32iV16iV16i", "", HVXV66)
-TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into_128B, "V64iV64iV32iV32i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat, "V16iV16iV16iV64b", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vaddcarrysat_128B, "V32iV32iV32iV128b", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into, "V32iV32iV16iV16i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vasr_into_128B, "V64iV64iV32iV32i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr, "V16iV16iV16i", "", HVXV66)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_vrotr_128B, "V32iV32iV32i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw, "V16iV16iV16i", "", HVXV66)
TARGET_BUILTIN(__builtin_HEXAGON_V6_vsatdw_128B, "V32iV32iV32i", "", HVXV66)
+
+// V68 HVX Instructions.
+
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyhubs10, "V32iV32iV32iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyhubs10_128B, "V64iV64iV64iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyhubs10_vxx, "V32iV32iV32iV32iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyhubs10_vxx_128B, "V64iV64iV64iV64iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10, "V32iV32iV32iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_128B, "V64iV64iV64iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_vxx, "V32iV32iV32iV32iUIi", "", HVXV68)
+TARGET_BUILTIN(__builtin_HEXAGON_V6_v6mpyvubs10_vxx_128B, "V64iV64iV64iV64iUIi", "", HVXV68)
diff --git a/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def b/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
index 9478a1b3fd14..93f560fc5adc 100644
--- a/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
+++ b/clang/include/clang/Basic/BuiltinsHexagonMapCustomDep.def
@@ -8,66 +8,68 @@
// Automatically generated file, do not edit!
//===----------------------------------------------------------------------===//
-CUSTOM_BUILTIN_MAPPING(M2_mpysmi, 0)
-CUSTOM_BUILTIN_MAPPING(M2_dpmpyss_s0, 0)
-CUSTOM_BUILTIN_MAPPING(M2_dpmpyuu_s0, 0)
-CUSTOM_BUILTIN_MAPPING(M2_mpyi, 0)
-CUSTOM_BUILTIN_MAPPING(M2_mpyui, 0)
CUSTOM_BUILTIN_MAPPING(A2_add, 0)
-CUSTOM_BUILTIN_MAPPING(A2_sub, 0)
CUSTOM_BUILTIN_MAPPING(A2_addi, 0)
CUSTOM_BUILTIN_MAPPING(A2_addp, 0)
-CUSTOM_BUILTIN_MAPPING(A2_subp, 0)
+CUSTOM_BUILTIN_MAPPING(A2_and, 0)
+CUSTOM_BUILTIN_MAPPING(A2_andir, 0)
CUSTOM_BUILTIN_MAPPING(A2_neg, 0)
-CUSTOM_BUILTIN_MAPPING(A2_zxtb, 0)
+CUSTOM_BUILTIN_MAPPING(A2_not, 0)
+CUSTOM_BUILTIN_MAPPING(A2_or, 0)
+CUSTOM_BUILTIN_MAPPING(A2_orir, 0)
+CUSTOM_BUILTIN_MAPPING(A2_sub, 0)
+CUSTOM_BUILTIN_MAPPING(A2_subp, 0)
+CUSTOM_BUILTIN_MAPPING(A2_subri, 0)
CUSTOM_BUILTIN_MAPPING(A2_sxtb, 0)
-CUSTOM_BUILTIN_MAPPING(A2_zxth, 0)
CUSTOM_BUILTIN_MAPPING(A2_sxth, 0)
-CUSTOM_BUILTIN_MAPPING(A2_and, 0)
-CUSTOM_BUILTIN_MAPPING(A2_or, 0)
CUSTOM_BUILTIN_MAPPING(A2_xor, 0)
-CUSTOM_BUILTIN_MAPPING(A2_not, 0)
-CUSTOM_BUILTIN_MAPPING(A2_subri, 0)
-CUSTOM_BUILTIN_MAPPING(A2_andir, 0)
-CUSTOM_BUILTIN_MAPPING(A2_orir, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asr_i_r, 0)
-CUSTOM_BUILTIN_MAPPING(S2_lsr_i_r, 0)
+CUSTOM_BUILTIN_MAPPING(A2_zxtb, 0)
+CUSTOM_BUILTIN_MAPPING(A2_zxth, 0)
+CUSTOM_BUILTIN_MAPPING(M2_dpmpyss_s0, 0)
+CUSTOM_BUILTIN_MAPPING(M2_dpmpyuu_s0, 0)
+CUSTOM_BUILTIN_MAPPING(M2_mpyi, 0)
+CUSTOM_BUILTIN_MAPPING(M2_mpysmi, 0)
+CUSTOM_BUILTIN_MAPPING(M2_mpyui, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asl_i_p, 0)
CUSTOM_BUILTIN_MAPPING(S2_asl_i_r, 0)
CUSTOM_BUILTIN_MAPPING(S2_asr_i_p, 0)
+CUSTOM_BUILTIN_MAPPING(S2_asr_i_r, 0)
CUSTOM_BUILTIN_MAPPING(S2_lsr_i_p, 0)
-CUSTOM_BUILTIN_MAPPING(S2_asl_i_p, 0)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(S2_lsr_i_r, 0)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_n, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_and_n_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_not, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_not_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_n, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_or_n_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_xor_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai, 64)
CUSTOM_BUILTIN_MAPPING(V6_vS32b_nqpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai, 64)
CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_nqpred_ai_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddbq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_nt_qpred_ai_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vS32b_qpred_ai_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddbnq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddbnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubbnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddbq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddhnq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddhnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubhnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vaddwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddhq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddwnq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddwnq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwnq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubwnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vaddwq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandqrt, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandqrt_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandqrt_acc, 64)
@@ -76,30 +78,14 @@ CUSTOM_BUILTIN_MAPPING(V6_vandvrt, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandvrt_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandvrt_acc_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgth_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqb_xor_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_veqh, 64)
CUSTOM_BUILTIN_MAPPING(V6_veqh_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_veqh_and, 64)
@@ -108,6 +94,14 @@ CUSTOM_BUILTIN_MAPPING(V6_veqh_or, 64)
CUSTOM_BUILTIN_MAPPING(V6_veqh_or_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_veqh_xor, 64)
CUSTOM_BUILTIN_MAPPING(V6_veqh_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_veqw_xor_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtb, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtb_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtb_and, 64)
@@ -116,30 +110,14 @@ CUSTOM_BUILTIN_MAPPING(V6_vgtb_or, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtb_or_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtb_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_veqb_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgth_xor_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtub, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtub_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtub_and, 64)
@@ -148,59 +126,81 @@ CUSTOM_BUILTIN_MAPPING(V6_vgtub_or, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtub_or_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgtub_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_not, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_not_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_xor, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_xor_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_n, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_and_n_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_n, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_or_n_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuh_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtuw_xor_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_and, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_and_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_or, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_or_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgtw_xor_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vmux, 64)
CUSTOM_BUILTIN_MAPPING(V6_vmux_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubbq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwnq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwnq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubwq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vswap, 64)
CUSTOM_BUILTIN_MAPPING(V6_vswap_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2, 64)
+CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqh, 64)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqh_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqw, 64)
+CUSTOM_BUILTIN_MAPPING(V6_shuffeqw_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarry, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarry_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vsubcarry, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vsubcarry_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandnqrt, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandnqrt_acc_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vandvqv, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vandvqv_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vandvnqv, 64)
CUSTOM_BUILTIN_MAPPING(V6_vandvnqv_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2, 64)
-CUSTOM_BUILTIN_MAPPING(V6_pred_scalar2v2_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqw, 64)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqw_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqh, 64)
-CUSTOM_BUILTIN_MAPPING(V6_shuffeqh_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vgathermwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vandvqv, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vandvqv_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarry, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vsubcarry_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgathermhq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgathermhq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq, 64)
CUSTOM_BUILTIN_MAPPING(V6_vgathermhwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermwq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhq_128B, 128)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq, 64)
-CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vgathermwq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqb, 64)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqb_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqh, 64)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqh_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqw, 64)
CUSTOM_BUILTIN_MAPPING(V6_vprefixqw_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermhwq_128B, 128)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermwq, 64)
+CUSTOM_BUILTIN_MAPPING(V6_vscattermwq_128B, 128)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat, 64)
CUSTOM_BUILTIN_MAPPING(V6_vaddcarrysat_128B, 128)
diff --git a/clang/include/clang/Basic/BuiltinsLe64.def b/clang/include/clang/Basic/BuiltinsLe64.def
deleted file mode 100644
index 776492cd21b3..000000000000
--- a/clang/include/clang/Basic/BuiltinsLe64.def
+++ /dev/null
@@ -1,18 +0,0 @@
-//==- BuiltinsLe64.def - Le64 Builtin function database ----------*- C++ -*-==//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines the Le64-specific builtin function database. Users of this
-// file must define the BUILTIN macro to make use of this information.
-//
-//===----------------------------------------------------------------------===//
-
-// The format of this database matches clang/Basic/Builtins.def.
-
-BUILTIN(__clear_cache, "vv*v*", "i")
-
-#undef BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsNVPTX.def b/clang/include/clang/Basic/BuiltinsNVPTX.def
index d149fa0127b9..3c96900136a4 100644
--- a/clang/include/clang/Basic/BuiltinsNVPTX.def
+++ b/clang/include/clang/Basic/BuiltinsNVPTX.def
@@ -21,7 +21,9 @@
#pragma push_macro("SM_72")
#pragma push_macro("SM_75")
#pragma push_macro("SM_80")
-#define SM_80 "sm_80"
+#pragma push_macro("SM_86")
+#define SM_86 "sm_86"
+#define SM_80 "sm_80|" SM_86
#define SM_75 "sm_75|" SM_80
#define SM_72 "sm_72|" SM_75
#define SM_70 "sm_70|" SM_72
@@ -35,7 +37,11 @@
#pragma push_macro("PTX64")
#pragma push_macro("PTX65")
#pragma push_macro("PTX70")
-#define PTX70 "ptx70"
+#pragma push_macro("PTX71")
+#pragma push_macro("PTX72")
+#define PTX72 "ptx72"
+#define PTX71 "ptx71|" PTX72
+#define PTX70 "ptx70|" PTX71
#define PTX65 "ptx65|" PTX70
#define PTX64 "ptx64|" PTX65
#define PTX63 "ptx63|" PTX64
@@ -450,12 +456,45 @@ TARGET_BUILTIN(__nvvm_match_any_sync_i64, "WiUiWi", "", PTX60)
TARGET_BUILTIN(__nvvm_match_all_sync_i32p, "UiUiUii*", "", PTX60)
TARGET_BUILTIN(__nvvm_match_all_sync_i64p, "WiUiWii*", "", PTX60)
+// Redux
+TARGET_BUILTIN(__nvvm_redux_sync_add, "iii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_min, "iii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_max, "iii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_umin, "UiUii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_umax, "UiUii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_and, "iii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_xor, "iii", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_redux_sync_or, "iii", "", AND(SM_80,PTX70))
+
// Membar
BUILTIN(__nvvm_membar_cta, "v", "")
BUILTIN(__nvvm_membar_gl, "v", "")
BUILTIN(__nvvm_membar_sys, "v", "")
+// mbarrier
+
+TARGET_BUILTIN(__nvvm_mbarrier_init, "vWi*i", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_init_shared, "vWi*3i", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mbarrier_inval, "vWi*", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_inval_shared, "vWi*3", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mbarrier_arrive, "WiWi*", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_shared, "WiWi*3", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_noComplete, "WiWi*i", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_noComplete_shared, "WiWi*3i", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_drop, "WiWi*", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_drop_shared, "WiWi*3", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_drop_noComplete, "WiWi*i", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_arrive_drop_noComplete_shared, "WiWi*3i", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mbarrier_test_wait, "bWi*Wi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_mbarrier_test_wait_shared, "bWi*3Wi", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_mbarrier_pending_count, "iWi", "", AND(SM_80,PTX70))
+
// Memcpy, Memset
BUILTIN(__nvvm_memcpy, "vUc*Uc*zi","")
@@ -685,6 +724,7 @@ TARGET_BUILTIN(__hmma_m8n32k16_mma_f16f32, "vi*iC*iC*fC*IiIi", "", AND(SM_70,PTX
TARGET_BUILTIN(__bmma_m8n8k128_ld_a_b1, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_ld_b_b1, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_ld_c, "vi*iC*UiIi", "", AND(SM_75,PTX63))
+TARGET_BUILTIN(__bmma_m8n8k128_mma_and_popc_b1, "vi*iC*iC*iC*Ii", "", AND(SM_75,PTX71))
TARGET_BUILTIN(__bmma_m8n8k128_mma_xor_popc_b1, "vi*iC*iC*iC*Ii", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__bmma_m8n8k128_st_c_i32, "vi*iC*UiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__imma_m16n16k16_ld_a_s8, "vi*iC*UiIi", "", AND(SM_72,PTX63))
@@ -720,6 +760,44 @@ TARGET_BUILTIN(__imma_m8n8k32_mma_s4, "vi*iC*iC*iC*IiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__imma_m8n8k32_mma_u4, "vi*iC*iC*iC*IiIi", "", AND(SM_75,PTX63))
TARGET_BUILTIN(__imma_m8n8k32_st_c_i32, "vi*iC*UiIi", "", AND(SM_75,PTX63))
+// Builtins to support double and alternate float WMMA instructions on sm_80
+TARGET_BUILTIN(__dmma_m8n8k4_ld_a, "vd*dC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__dmma_m8n8k4_ld_b, "vd*dC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__dmma_m8n8k4_ld_c, "vd*dC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__dmma_m8n8k4_st_c_f64, "vd*dC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__dmma_m8n8k4_mma_f64, "vd*dC*dC*dC*IiIi", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__mma_bf16_m16n16k16_ld_a, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m16n16k16_ld_b, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m16n16k16_mma_f32, "vf*iC*iC*fC*IiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m8n32k16_ld_a, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m8n32k16_ld_b, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m8n32k16_mma_f32, "vf*iC*iC*fC*IiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m32n8k16_ld_a, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m32n8k16_ld_b, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_bf16_m32n8k16_mma_f32, "vf*iC*iC*fC*IiIi", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__mma_tf32_m16n16k8_ld_a, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_tf32_m16n16k8_ld_b, "vi*iC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_tf32_m16n16k8_ld_c, "vf*fC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_m16n16k8_st_c_f32, "vf*fC*UiIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__mma_tf32_m16n16k8_mma_f32, "vf*iC*iC*fC*IiIi", "", AND(SM_80,PTX70))
+
+// Async Copy
+TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive, "vWi*", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_shared, "vWi*3", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc, "vWi*", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_mbarrier_arrive_noinc_shared, "vWi*3", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_4, "vv*3vC*1", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_8, "vv*3vC*1", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_ca_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_cg_shared_global_16, "vv*3vC*1", "", AND(SM_80,PTX70))
+
+TARGET_BUILTIN(__nvvm_cp_async_commit_group, "v", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_wait_group, "vIi", "", AND(SM_80,PTX70))
+TARGET_BUILTIN(__nvvm_cp_async_wait_all, "v", "", AND(SM_80,PTX70))
+
#undef BUILTIN
#undef TARGET_BUILTIN
#pragma pop_macro("AND")
@@ -728,9 +806,12 @@ TARGET_BUILTIN(__imma_m8n8k32_st_c_i32, "vi*iC*UiIi", "", AND(SM_75,PTX63))
#pragma pop_macro("SM_72")
#pragma pop_macro("SM_75")
#pragma pop_macro("SM_80")
+#pragma pop_macro("SM_86")
#pragma pop_macro("PTX60")
#pragma pop_macro("PTX61")
#pragma pop_macro("PTX63")
#pragma pop_macro("PTX64")
#pragma pop_macro("PTX65")
#pragma pop_macro("PTX70")
+#pragma pop_macro("PTX71")
+#pragma pop_macro("PTX72")
diff --git a/clang/include/clang/Basic/BuiltinsPPC.def b/clang/include/clang/Basic/BuiltinsPPC.def
index 39c66f5daeb1..dfe97af300f4 100644
--- a/clang/include/clang/Basic/BuiltinsPPC.def
+++ b/clang/include/clang/Basic/BuiltinsPPC.def
@@ -20,11 +20,131 @@
// MMA builtins that are using their own format documented below.
#if defined(BUILTIN) && !defined(CUSTOM_BUILTIN)
-# define CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE) BUILTIN(__builtin_##ID, "i.", "t")
+# define CUSTOM_BUILTIN(ID, INTR, TYPES, ACCUMULATE) \
+ BUILTIN(__builtin_##ID, "i.", "t")
#elif defined(CUSTOM_BUILTIN) && !defined(BUILTIN)
# define BUILTIN(ID, TYPES, ATTRS)
#endif
+#define UNALIASED_CUSTOM_BUILTIN(ID, TYPES, ACCUMULATE) \
+ CUSTOM_BUILTIN(ID, ID, TYPES, ACCUMULATE)
+
+// XL Compatibility built-ins
+BUILTIN(__builtin_ppc_popcntb, "ULiULi", "")
+BUILTIN(__builtin_ppc_poppar4, "iUi", "")
+BUILTIN(__builtin_ppc_poppar8, "iULLi", "")
+BUILTIN(__builtin_ppc_eieio, "v", "")
+BUILTIN(__builtin_ppc_iospace_eieio, "v", "")
+BUILTIN(__builtin_ppc_isync, "v", "")
+BUILTIN(__builtin_ppc_lwsync, "v", "")
+BUILTIN(__builtin_ppc_iospace_lwsync, "v", "")
+BUILTIN(__builtin_ppc_sync, "v", "")
+BUILTIN(__builtin_ppc_iospace_sync, "v", "")
+BUILTIN(__builtin_ppc_dcbfl, "vvC*", "")
+BUILTIN(__builtin_ppc_dcbflp, "vvC*", "")
+BUILTIN(__builtin_ppc_dcbst, "vvC*", "")
+BUILTIN(__builtin_ppc_dcbt, "vv*", "")
+BUILTIN(__builtin_ppc_dcbtst, "vv*", "")
+BUILTIN(__builtin_ppc_dcbz, "vv*", "")
+BUILTIN(__builtin_ppc_icbt, "vv*", "")
+BUILTIN(__builtin_ppc_fric, "dd", "")
+BUILTIN(__builtin_ppc_frim, "dd", "")
+BUILTIN(__builtin_ppc_frims, "ff", "")
+BUILTIN(__builtin_ppc_frin, "dd", "")
+BUILTIN(__builtin_ppc_frins, "ff", "")
+BUILTIN(__builtin_ppc_frip, "dd", "")
+BUILTIN(__builtin_ppc_frips, "ff", "")
+BUILTIN(__builtin_ppc_friz, "dd", "")
+BUILTIN(__builtin_ppc_frizs, "ff", "")
+BUILTIN(__builtin_ppc_fsel, "dddd", "")
+BUILTIN(__builtin_ppc_fsels, "ffff", "")
+BUILTIN(__builtin_ppc_frsqrte, "dd", "")
+BUILTIN(__builtin_ppc_frsqrtes, "ff", "")
+BUILTIN(__builtin_ppc_fsqrt, "dd", "")
+BUILTIN(__builtin_ppc_fsqrts, "ff", "")
+BUILTIN(__builtin_ppc_compare_and_swap, "iiD*i*i", "")
+BUILTIN(__builtin_ppc_compare_and_swaplp, "iLiD*Li*Li", "")
+BUILTIN(__builtin_ppc_fetch_and_add, "iiD*i", "")
+BUILTIN(__builtin_ppc_fetch_and_addlp, "LiLiD*Li", "")
+BUILTIN(__builtin_ppc_fetch_and_and, "UiUiD*Ui", "")
+BUILTIN(__builtin_ppc_fetch_and_andlp, "ULiULiD*ULi", "")
+BUILTIN(__builtin_ppc_fetch_and_or, "UiUiD*Ui", "")
+BUILTIN(__builtin_ppc_fetch_and_orlp, "ULiULiD*ULi", "")
+BUILTIN(__builtin_ppc_fetch_and_swap, "UiUiD*Ui", "")
+BUILTIN(__builtin_ppc_fetch_and_swaplp, "ULiULiD*ULi", "")
+BUILTIN(__builtin_ppc_ldarx, "LiLiD*", "")
+BUILTIN(__builtin_ppc_lwarx, "iiD*", "")
+BUILTIN(__builtin_ppc_lharx, "isD*", "")
+BUILTIN(__builtin_ppc_lbarx, "UiUcD*", "")
+BUILTIN(__builtin_ppc_stdcx, "iLiD*Li", "")
+BUILTIN(__builtin_ppc_stwcx, "iiD*i", "")
+BUILTIN(__builtin_ppc_sthcx, "isD*s", "")
+BUILTIN(__builtin_ppc_stbcx, "icD*i", "")
+BUILTIN(__builtin_ppc_tdw, "vLLiLLiIUi", "")
+BUILTIN(__builtin_ppc_tw, "viiIUi", "")
+BUILTIN(__builtin_ppc_trap, "vi", "")
+BUILTIN(__builtin_ppc_trapd, "vLi", "")
+BUILTIN(__builtin_ppc_fcfid, "dd", "")
+BUILTIN(__builtin_ppc_fcfud, "dd", "")
+BUILTIN(__builtin_ppc_fctid, "dd", "")
+BUILTIN(__builtin_ppc_fctidz, "dd", "")
+BUILTIN(__builtin_ppc_fctiw, "dd", "")
+BUILTIN(__builtin_ppc_fctiwz, "dd", "")
+BUILTIN(__builtin_ppc_fctudz, "dd", "")
+BUILTIN(__builtin_ppc_fctuwz, "dd", "")
+BUILTIN(__builtin_ppc_swdiv_nochk, "ddd", "")
+BUILTIN(__builtin_ppc_swdivs_nochk, "fff", "")
+BUILTIN(__builtin_ppc_alignx, "vIivC*", "nc")
+BUILTIN(__builtin_ppc_rdlam, "UWiUWiUWiUWIi", "nc")
+// Compare
+BUILTIN(__builtin_ppc_cmpeqb, "LLiLLiLLi", "")
+BUILTIN(__builtin_ppc_cmprb, "iCIiii", "")
+BUILTIN(__builtin_ppc_setb, "LLiLLiLLi", "")
+BUILTIN(__builtin_ppc_cmpb, "LLiLLiLLi", "")
+// Multiply
+BUILTIN(__builtin_ppc_mulhd, "LLiLiLi", "")
+BUILTIN(__builtin_ppc_mulhdu, "ULLiULiULi", "")
+BUILTIN(__builtin_ppc_mulhw, "iii", "")
+BUILTIN(__builtin_ppc_mulhwu, "UiUiUi", "")
+BUILTIN(__builtin_ppc_maddhd, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_ppc_maddhdu, "ULLiULLiULLiULLi", "")
+BUILTIN(__builtin_ppc_maddld, "LLiLLiLLiLLi", "")
+// Rotate
+BUILTIN(__builtin_ppc_rlwnm, "UiUiIUiIUi", "")
+BUILTIN(__builtin_ppc_rlwimi, "UiUiUiIUiIUi", "")
+BUILTIN(__builtin_ppc_rldimi, "ULLiULLiULLiIUiIULLi", "")
+// load
+BUILTIN(__builtin_ppc_load2r, "UiUs*", "")
+BUILTIN(__builtin_ppc_load4r, "UiUi*", "")
+BUILTIN(__builtin_ppc_load8r, "ULLiULLi*", "")
+// store
+BUILTIN(__builtin_ppc_store2r, "vUiUs*", "")
+BUILTIN(__builtin_ppc_store4r, "vUiUi*", "")
+BUILTIN(__builtin_ppc_store8r, "vULLiULLi*", "")
+BUILTIN(__builtin_ppc_extract_exp, "Uid", "")
+BUILTIN(__builtin_ppc_extract_sig, "ULLid", "")
+BUILTIN(__builtin_ppc_mtfsb0, "vUIi", "")
+BUILTIN(__builtin_ppc_mtfsb1, "vUIi", "")
+BUILTIN(__builtin_ppc_mtfsf, "vUIiUi", "")
+BUILTIN(__builtin_ppc_mtfsfi, "vUIiUIi", "")
+BUILTIN(__builtin_ppc_insert_exp, "ddULLi", "")
+BUILTIN(__builtin_ppc_fmsub, "dddd", "")
+BUILTIN(__builtin_ppc_fmsubs, "ffff", "")
+BUILTIN(__builtin_ppc_fnmadd, "dddd", "")
+BUILTIN(__builtin_ppc_fnmadds, "ffff", "")
+BUILTIN(__builtin_ppc_fnmsub, "dddd", "")
+BUILTIN(__builtin_ppc_fnmsubs, "ffff", "")
+BUILTIN(__builtin_ppc_fre, "dd", "")
+BUILTIN(__builtin_ppc_fres, "ff", "")
+BUILTIN(__builtin_ppc_dcbtstt, "vv*", "")
+BUILTIN(__builtin_ppc_dcbtt, "vv*", "")
+BUILTIN(__builtin_ppc_mftbu, "Ui","")
+BUILTIN(__builtin_ppc_mfmsr, "Ui", "")
+BUILTIN(__builtin_ppc_mfspr, "ULiIi", "")
+BUILTIN(__builtin_ppc_mtmsr, "vUi", "")
+BUILTIN(__builtin_ppc_mtspr, "vIiULi", "")
+BUILTIN(__builtin_ppc_stfiw, "viC*d", "")
+
BUILTIN(__builtin_ppc_get_timebase, "ULLi", "n")
// This is just a placeholder, the types and attributes are wrong.
@@ -39,6 +159,7 @@ BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "")
BUILTIN(__builtin_altivec_vaddeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
BUILTIN(__builtin_altivec_vaddcuq, "V1ULLLiV1ULLLiV1ULLLi","")
BUILTIN(__builtin_altivec_vaddecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
+BUILTIN(__builtin_altivec_vadduqm, "V1ULLLiV16UcV16Uc","")
BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "")
BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "")
@@ -49,6 +170,7 @@ BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "")
BUILTIN(__builtin_altivec_vsubeuqm, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
BUILTIN(__builtin_altivec_vsubcuq, "V1ULLLiV1ULLLiV1ULLLi","")
BUILTIN(__builtin_altivec_vsubecuq, "V1ULLLiV1ULLLiV1ULLLiV1ULLLi","")
+BUILTIN(__builtin_altivec_vsubuqm, "V1ULLLiV16UcV16Uc","")
BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "")
BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "")
@@ -75,11 +197,11 @@ BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
-BUILTIN(__builtin_altivec_lvx, "V4iivC*", "")
-BUILTIN(__builtin_altivec_lvxl, "V4iivC*", "")
-BUILTIN(__builtin_altivec_lvebx, "V16civC*", "")
-BUILTIN(__builtin_altivec_lvehx, "V8sivC*", "")
-BUILTIN(__builtin_altivec_lvewx, "V4iivC*", "")
+BUILTIN(__builtin_altivec_lvx, "V4iLivC*", "")
+BUILTIN(__builtin_altivec_lvxl, "V4iLivC*", "")
+BUILTIN(__builtin_altivec_lvebx, "V16cLivC*", "")
+BUILTIN(__builtin_altivec_lvehx, "V8sLivC*", "")
+BUILTIN(__builtin_altivec_lvewx, "V4iLivC*", "")
BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
@@ -131,11 +253,11 @@ BUILTIN(__builtin_altivec_vpkudum, "V4UiV2ULLiV2ULLi", "")
BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "")
-BUILTIN(__builtin_altivec_stvx, "vV4iiv*", "")
-BUILTIN(__builtin_altivec_stvxl, "vV4iiv*", "")
-BUILTIN(__builtin_altivec_stvebx, "vV16civ*", "")
-BUILTIN(__builtin_altivec_stvehx, "vV8siv*", "")
-BUILTIN(__builtin_altivec_stvewx, "vV4iiv*", "")
+BUILTIN(__builtin_altivec_stvx, "vV4iLiv*", "")
+BUILTIN(__builtin_altivec_stvxl, "vV4iLiv*", "")
+BUILTIN(__builtin_altivec_stvebx, "vV16cLiv*", "")
+BUILTIN(__builtin_altivec_stvehx, "vV8sLiv*", "")
+BUILTIN(__builtin_altivec_stvewx, "vV4iLiv*", "")
BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "")
@@ -437,13 +559,13 @@ BUILTIN(__builtin_altivec_vrlqnm, "V1ULLLiV1ULLLiV1ULLLi", "")
// VSX built-ins.
-BUILTIN(__builtin_vsx_lxvd2x, "V2divC*", "")
-BUILTIN(__builtin_vsx_lxvw4x, "V4iivC*", "")
+BUILTIN(__builtin_vsx_lxvd2x, "V2dLivC*", "")
+BUILTIN(__builtin_vsx_lxvw4x, "V4iLivC*", "")
BUILTIN(__builtin_vsx_lxvd2x_be, "V2dSLLivC*", "")
BUILTIN(__builtin_vsx_lxvw4x_be, "V4iSLLivC*", "")
-BUILTIN(__builtin_vsx_stxvd2x, "vV2div*", "")
-BUILTIN(__builtin_vsx_stxvw4x, "vV4iiv*", "")
+BUILTIN(__builtin_vsx_stxvd2x, "vV2dLiv*", "")
+BUILTIN(__builtin_vsx_stxvw4x, "vV4iLiv*", "")
BUILTIN(__builtin_vsx_stxvd2x_be, "vV2dSLLivC*", "")
BUILTIN(__builtin_vsx_stxvw4x_be, "vV4iSLLivC*", "")
@@ -451,6 +573,8 @@ BUILTIN(__builtin_vsx_lxvl, "V4ivC*ULLi", "")
BUILTIN(__builtin_vsx_lxvll, "V4ivC*ULLi", "")
BUILTIN(__builtin_vsx_stxvl, "vV4iv*ULLi", "")
BUILTIN(__builtin_vsx_stxvll, "vV4iv*ULLi", "")
+BUILTIN(__builtin_vsx_ldrmb, "V16UcCc*Ii", "")
+BUILTIN(__builtin_vsx_strmb, "vCc*IiV16Uc", "")
BUILTIN(__builtin_vsx_xvmaxdp, "V2dV2dV2d", "")
BUILTIN(__builtin_vsx_xvmaxsp, "V4fV4fV4f", "")
@@ -544,6 +668,8 @@ BUILTIN(__builtin_vsx_xvxsigsp, "V4UiV4f", "")
// Conversion builtins
BUILTIN(__builtin_vsx_xvcvdpsxws, "V4SiV2d", "")
BUILTIN(__builtin_vsx_xvcvdpuxws, "V4UiV2d", "")
+BUILTIN(__builtin_vsx_xvcvspsxds, "V2SLLiV4f", "")
+BUILTIN(__builtin_vsx_xvcvspuxds, "V2ULLiV4f", "")
BUILTIN(__builtin_vsx_xvcvsxwdp, "V2dV4Si", "")
BUILTIN(__builtin_vsx_xvcvuxwdp, "V2dV4Ui", "")
BUILTIN(__builtin_vsx_xvcvspdp, "V2dV4f", "")
@@ -596,6 +722,12 @@ BUILTIN(__builtin_truncf128_round_to_odd, "dLLd", "")
BUILTIN(__builtin_vsx_scalar_extract_expq, "ULLiLLd", "")
BUILTIN(__builtin_vsx_scalar_insert_exp_qp, "LLdLLdULLi", "")
+// Fastmath by default builtins
+BUILTIN(__builtin_ppc_rsqrtf, "V4fV4f", "")
+BUILTIN(__builtin_ppc_rsqrtd, "V2dV2d", "")
+BUILTIN(__builtin_ppc_recipdivf, "V4fV4fV4f", "")
+BUILTIN(__builtin_ppc_recipdivd, "V2dV2dV2d", "")
+
// HTM builtins
BUILTIN(__builtin_tbegin, "UiUIi", "")
BUILTIN(__builtin_tend, "UiUIi", "")
@@ -664,90 +796,103 @@ BUILTIN(__builtin_dcbf, "vvC*", "")
// Because these built-ins rely on target-dependent types and to avoid pervasive
// change, they are type checked manually in Sema using custom type descriptors.
// The first argument of the CUSTOM_BUILTIN macro is the name of the built-in
-// with its prefix, the second argument specifies the type of the function
+// with its prefix, the second argument is the name of the intrinsic this
+// built-in generates, the third argument specifies the type of the function
// (result value, then each argument) as follows:
// i -> Unsigned integer followed by the greatest possible value for that
// argument or 0 if no constraint on the value.
// (e.g. i15 for a 4-bits value)
-// V -> Vector type used with MMA builtins (vector unsigned char)
+// V -> Vector type used with MMA built-ins (vector unsigned char)
// W -> PPC Vector type followed by the size of the vector type.
// (e.g. W512 for __vector_quad)
// any other descriptor -> Fall back to generic type descriptor decoding.
// The 'C' suffix can be used as a suffix to specify the const type.
// The '*' suffix can be used as a suffix to specify a pointer to a type.
-// The third argument is set to true if the builtin accumulates its result into
+// The fourth argument is set to true if the built-in accumulates its result into
// its given accumulator.
-CUSTOM_BUILTIN(vsx_lxvp, "W256SLLiW256C*", false)
-CUSTOM_BUILTIN(vsx_stxvp, "vW256SLLiW256C*", false)
-CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false)
-CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false)
-
-CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false)
-CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false)
-CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true)
-CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true)
-CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false)
-CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false)
-CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false)
-CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false)
-CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false)
-CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false)
-CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false)
-CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false)
-CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false)
-CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true)
-CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true)
-CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true)
-CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true)
-CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true)
-CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true)
-CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true)
-CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true)
-CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true)
-CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true)
-CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true)
-CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true)
-CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true)
-CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false)
-CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false)
-CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true)
-CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true)
-CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true)
+// Provided builtins with _mma_ prefix for compatibility.
+CUSTOM_BUILTIN(mma_lxvp, vsx_lxvp, "W256SLLiW256C*", false)
+CUSTOM_BUILTIN(mma_stxvp, vsx_stxvp, "vW256SLLiW256C*", false)
+CUSTOM_BUILTIN(mma_assemble_pair, vsx_assemble_pair, "vW256*VV", false)
+CUSTOM_BUILTIN(mma_disassemble_pair, vsx_disassemble_pair, "vv*W256*", false)
+
+// UNALIASED_CUSTOM_BUILTIN macro is used for built-ins that have
+// the same name as that of the intrinsic they generate, i.e. the
+// ID and INTR are the same.
+// This avoids repeating the ID and INTR in the macro expression.
+
+UNALIASED_CUSTOM_BUILTIN(vsx_lxvp, "W256SLLiW256C*", false)
+UNALIASED_CUSTOM_BUILTIN(vsx_stxvp, "vW256SLLiW256C*", false)
+UNALIASED_CUSTOM_BUILTIN(vsx_assemble_pair, "vW256*VV", false)
+UNALIASED_CUSTOM_BUILTIN(vsx_disassemble_pair, "vv*W256*", false)
+
+UNALIASED_CUSTOM_BUILTIN(mma_assemble_acc, "vW512*VVVV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_disassemble_acc, "vv*W512*", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xxmtacc, "vW512*", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xxmfacc, "vW512*", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xxsetaccz, "vW512*", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2s, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32ger, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64ger, "vW512*W256V", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8, "vW512*VVi15i15i255", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4, "vW512*VVi15i15i15", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2, "vW512*VVi15i15i3", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2s, "vW512*VVi15i15i3", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2, "vW512*VVi15i15i3", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32ger, "vW512*VVi15i15", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64ger, "vW512*W256Vi15i3", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi4ger8pp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4pp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi8ger4spp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2pp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvi16ger2spp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi4ger8pp, "vW512*VVi15i15i255", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4pp, "vW512*VVi15i15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi8ger4spp, "vW512*VVi15i15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2pp, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvi16ger2spp, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2pn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2np, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf16ger2nn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pp, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2pn, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2np, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf16ger2nn, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gerpn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf32gernn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpp, "vW512*VVi15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gerpn, "vW512*VVi15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernp, "vW512*VVi15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf32gernn, "vW512*VVi15i15", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpp, "vW512*W256V", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gerpn, "vW512*W256V", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernp, "vW512*W256V", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvf64gernn, "vW512*W256V", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpp, "vW512*W256Vi15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gerpn, "vW512*W256Vi15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernp, "vW512*W256Vi15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvf64gernn, "vW512*W256Vi15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2, "vW512*VV", false)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2, "vW512*VVi15i15i3", false)
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pp, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2pn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2np, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_xvbf16ger2nn, "vW512*VV", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pp, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2pn, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2np, "vW512*VVi15i15i3", true)
+UNALIASED_CUSTOM_BUILTIN(mma_pmxvbf16ger2nn, "vW512*VVi15i15i3", true)
// FIXME: Obviously incomplete.
#undef BUILTIN
#undef CUSTOM_BUILTIN
+#undef UNALIASED_CUSTOM_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsRISCV.def b/clang/include/clang/Basic/BuiltinsRISCV.def
new file mode 100644
index 000000000000..b2b4950f92bd
--- /dev/null
+++ b/clang/include/clang/Basic/BuiltinsRISCV.def
@@ -0,0 +1,63 @@
+//==- BuiltinsRISCV.def - RISC-V Builtin function database -------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RISC-V-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(BUILTIN) && !defined(TARGET_BUILTIN)
+# define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+#include "clang/Basic/riscv_vector_builtins.inc"
+
+// Zbb extension
+TARGET_BUILTIN(__builtin_riscv_orc_b_32, "ZiZi", "nc", "experimental-zbb")
+TARGET_BUILTIN(__builtin_riscv_orc_b_64, "WiWi", "nc", "experimental-zbb,64bit")
+
+// Zbc extension
+TARGET_BUILTIN(__builtin_riscv_clmul, "LiLiLi", "nc", "experimental-zbc")
+TARGET_BUILTIN(__builtin_riscv_clmulh, "LiLiLi", "nc", "experimental-zbc")
+TARGET_BUILTIN(__builtin_riscv_clmulr, "LiLiLi", "nc", "experimental-zbc")
+
+// Zbe extension
+TARGET_BUILTIN(__builtin_riscv_bcompress_32, "ZiZiZi", "nc", "experimental-zbe")
+TARGET_BUILTIN(__builtin_riscv_bcompress_64, "WiWiWi", "nc",
+ "experimental-zbe,64bit")
+TARGET_BUILTIN(__builtin_riscv_bdecompress_32, "ZiZiZi", "nc",
+ "experimental-zbe")
+TARGET_BUILTIN(__builtin_riscv_bdecompress_64, "WiWiWi", "nc",
+ "experimental-zbe,64bit")
+
+// Zbp extension
+TARGET_BUILTIN(__builtin_riscv_grev_32, "ZiZiZi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_grev_64, "WiWiWi", "nc", "experimental-zbp,64bit")
+TARGET_BUILTIN(__builtin_riscv_gorc_32, "ZiZiZi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_gorc_64, "WiWiWi", "nc", "experimental-zbp,64bit")
+TARGET_BUILTIN(__builtin_riscv_shfl_32, "ZiZiZi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_shfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
+TARGET_BUILTIN(__builtin_riscv_unshfl_32, "ZiZiZi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_unshfl_64, "WiWiWi", "nc", "experimental-zbp,64bit")
+TARGET_BUILTIN(__builtin_riscv_xperm_n, "LiLiLi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_xperm_b, "LiLiLi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_xperm_h, "LiLiLi", "nc", "experimental-zbp")
+TARGET_BUILTIN(__builtin_riscv_xperm_w, "WiWiWi", "nc", "experimental-zbp,64bit")
+
+// Zbr extension
+TARGET_BUILTIN(__builtin_riscv_crc32_b, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32_h, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32_w, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32c_b, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32c_h, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32c_w, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr")
+TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr")
+
+#undef BUILTIN
+#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsSystemZ.def b/clang/include/clang/Basic/BuiltinsSystemZ.def
index 5ea6671e623b..079e41136488 100644
--- a/clang/include/clang/Basic/BuiltinsSystemZ.def
+++ b/clang/include/clang/Basic/BuiltinsSystemZ.def
@@ -291,5 +291,12 @@ TARGET_BUILTIN(__builtin_s390_vlbrh, "V8UsV8Us", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vlbrf, "V4UiV4Ui", "nc", "vector")
TARGET_BUILTIN(__builtin_s390_vlbrg, "V2ULLiV2ULLi", "nc", "vector")
+// NNP-assist facility intrinsics.
+TARGET_BUILTIN(__builtin_s390_vclfnhs, "V4fV8UsIi", "nc", "nnp-assist")
+TARGET_BUILTIN(__builtin_s390_vclfnls, "V4fV8UsIi", "nc", "nnp-assist")
+TARGET_BUILTIN(__builtin_s390_vcrnfs, "V8UsV4fV4fIi", "nc", "nnp-assist")
+TARGET_BUILTIN(__builtin_s390_vcfn, "V8UsV8UsIi", "nc", "nnp-assist")
+TARGET_BUILTIN(__builtin_s390_vcnf, "V8UsV8UsIi", "nc", "nnp-assist")
+
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def
index bb7d6d379e58..04ec45aa3b74 100644
--- a/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -66,37 +66,22 @@ TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i64_f64, "LLid", "nc", "nontrappi
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i64_f64, "LLid", "nc", "nontrapping-fptoint")
// SIMD builtins
-TARGET_BUILTIN(__builtin_wasm_swizzle_v8x16, "V16ScV16ScV16Sc", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i8x16, "iV16ScIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i8x16, "iV16UcIUi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_s_i16x8, "iV8sIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_u_i16x8, "iV8UsIUi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_i32x4, "iV4iIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_i64x2, "LLiV2LLiIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_f32x4, "fV4fIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extract_lane_f64x2, "dV2dIi", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_replace_lane_i8x16, "V16ScV16ScIii", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_i16x8, "V8sV8sIii", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_i32x4, "V4iV4iIii", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_i64x2, "V2LLiV2LLiIiLLi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_f32x4, "V4fV4fIif", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_replace_lane_f64x2, "V2dV2dIid", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_add_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_add_saturate_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_sub_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_sub_saturate_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_swizzle_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_add_sat_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_add_sat_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_add_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_add_sat_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_sub_sat_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_sub_sat_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_sub_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_sub_sat_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_abs_i8x16, "V16ScV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_abs_i16x8, "V8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_abs_i32x4, "V4iV4i", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_abs_i64x2, "V2LLiV2LLi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_s_i8x16, "V16ScV16ScV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_u_i8x16, "V16UcV16UcV16Uc", "nc", "simd128")
@@ -116,22 +101,7 @@ TARGET_BUILTIN(__builtin_wasm_avgr_u_i16x8, "V8UsV8UsV8Us", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_popcnt_i8x16, "V16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_q15mulr_saturate_s_i16x8, "V8sV8sV8s", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i8x16_s_i16x8, "V8sV16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i8x16_s_i16x8, "V8sV16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i8x16_u_i16x8, "V8UsV16UcV16Uc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i8x16_u_i16x8, "V8UsV16UcV16Uc", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i16x8_s_i32x4, "V4iV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i16x8_s_i32x4, "V4iV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i16x8_u_i32x4, "V4UiV8UsV8Us", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i16x8_u_i32x4, "V4UiV8UsV8Us", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i32x4_s_i64x2, "V2LLiV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i32x4_s_i64x2, "V2LLiV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_low_i32x4_u_i64x2, "V2ULLiV4UiV4Ui", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_extmul_high_i32x4_u_i64x2, "V2ULLiV4UiV4Ui", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_q15mulr_sat_s_i16x8, "V8sV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extadd_pairwise_i8x16_s_i16x8, "V8sV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_extadd_pairwise_i8x16_u_i16x8, "V8UsV16Uc", "nc", "simd128")
@@ -141,21 +111,13 @@ TARGET_BUILTIN(__builtin_wasm_extadd_pairwise_i16x8_u_i32x4, "V4UiV8Us", "nc", "
TARGET_BUILTIN(__builtin_wasm_bitselect, "V4iV4iV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_signselect_i8x16, "V16ScV16ScV16ScV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_signselect_i16x8, "V8sV8sV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_signselect_i32x4, "V4iV4iV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_signselect_i64x2, "V2LLiV2LLiV2LLiV2LLi", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_shuffle_v8x16, "V16ScV16ScV16ScIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIi", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_shuffle_i8x16, "V16ScV16ScV16ScIiIiIiIiIiIiIiIiIiIiIiIiIiIiIiIi", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_any_true_i8x16, "iV16Sc", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_any_true_i16x8, "iV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_any_true_i32x4, "iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_any_true_i64x2, "iV2LLi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_any_true_v128, "iV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i8x16, "iV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i16x8, "iV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_all_true_i32x4, "iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_all_true_i64x2, "iV2LLi", "nc", "unimplemented-simd128")
+TARGET_BUILTIN(__builtin_wasm_all_true_i64x2, "iV2LLi", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_bitmask_i8x16, "iV16Sc", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_bitmask_i16x8, "iV8s", "nc", "simd128")
@@ -167,12 +129,8 @@ TARGET_BUILTIN(__builtin_wasm_abs_f64x2, "V2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_f32x4, "V4fV4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_max_f32x4, "V4fV4fV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_pmin_f32x4, "V4fV4fV4f", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_pmax_f32x4, "V4fV4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_min_f64x2, "V2dV2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_max_f64x2, "V2dV2dV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_pmin_f64x2, "V2dV2dV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_pmax_f64x2, "V2dV2dV2d", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_ceil_f32x4, "V4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_floor_f32x4, "V4fV4f", "nc", "simd128")
@@ -188,47 +146,16 @@ TARGET_BUILTIN(__builtin_wasm_dot_s_i32x4_i16x8, "V4iV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_sqrt_f32x4, "V4fV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_sqrt_f64x2, "V2dV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_qfma_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_qfms_f32x4, "V4fV4fV4fV4f", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_qfma_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128")
-TARGET_BUILTIN(__builtin_wasm_qfms_f64x2, "V2dV2dV2dV2d", "nc", "unimplemented-simd128")
-
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_s_i32x4_f32x4, "V4iV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_trunc_saturate_u_i32x4_f32x4, "V4iV4f", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_s_i8x16_i16x8, "V16ScV8sV8s", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8UsV8Us", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8sV8s", "nc", "simd128")
TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_widen_low_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_widen_high_s_i32x4_i64x2, "V2LLiV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_widen_low_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_widen_high_u_i32x4_i64x2, "V2LLUiV4Ui", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_convert_low_s_i32x4_f64x2, "V2dV4i", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_convert_low_u_i32x4_f64x2, "V2dV4Ui", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_demote_zero_f64x2_f32x4, "V4fV2d", "nc", "simd128")
-TARGET_BUILTIN(__builtin_wasm_promote_low_f32x4_f64x2, "V2dV4f", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*V16ScIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*V8sIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*V4iIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*V2LLiIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_store8_lane, "vSc*V16ScIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_store16_lane, "vs*V8sIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_store32_lane, "vi*V4iIi", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_store64_lane, "vLLi*V2LLiIi", "n", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_eq_i64x2, "V2LLiV2LLiV2LLi", "nc", "simd128")
-
-TARGET_BUILTIN(__builtin_wasm_prefetch_t, "vv*", "n", "simd128")
-TARGET_BUILTIN(__builtin_wasm_prefetch_nt, "vv*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4iV4i", "nc", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4, "V4iV2d", "nc", "simd128")
+TARGET_BUILTIN(__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4, "V4UiV2d", "nc", "simd128")
#undef BUILTIN
#undef TARGET_BUILTIN
diff --git a/clang/include/clang/Basic/BuiltinsX86.def b/clang/include/clang/Basic/BuiltinsX86.def
index 16fb7dd7b0e6..18e541fe9cb5 100644
--- a/clang/include/clang/Basic/BuiltinsX86.def
+++ b/clang/include/clang/Basic/BuiltinsX86.def
@@ -1878,6 +1878,10 @@ TARGET_BUILTIN(__builtin_ia32_reduce_and_d512, "iV16i", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_and_q512, "OiV8Oi", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fadd_pd512, "ddV8d", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fadd_ps512, "ffV16f", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_pd512, "dV8d", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmax_ps512, "fV16f", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_pd512, "dV8d", "ncV:512:", "avx512f")
+TARGET_BUILTIN(__builtin_ia32_reduce_fmin_ps512, "fV16f", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fmul_pd512, "ddV8d", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_fmul_ps512, "ffV16f", "ncV:512:", "avx512f")
TARGET_BUILTIN(__builtin_ia32_reduce_mul_d512, "iV16i", "ncV:512:", "avx512f")
diff --git a/clang/include/clang/Basic/BuiltinsX86_64.def b/clang/include/clang/Basic/BuiltinsX86_64.def
index 974ba35b3233..ce2b1decdf6c 100644
--- a/clang/include/clang/Basic/BuiltinsX86_64.def
+++ b/clang/include/clang/Basic/BuiltinsX86_64.def
@@ -101,10 +101,16 @@ TARGET_BUILTIN(__builtin_ia32_testui, "Uc", "n", "uintr")
TARGET_BUILTIN(__builtin_ia32_senduipi, "vUWi", "n", "uintr")
// AMX internal builtin
+TARGET_BUILTIN(__builtin_ia32_tile_loadconfig_internal, "vvC*", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tileloadd64_internal, "V256iUsUsvC*z", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tileloaddt164_internal, "V256iUsUsvC*z", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tdpbssd_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbsud_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbusd_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-int8")
+TARGET_BUILTIN(__builtin_ia32_tdpbuud_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-int8")
TARGET_BUILTIN(__builtin_ia32_tilestored64_internal, "vUsUsv*zV256i", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tilezero_internal, "V256iUsUs", "n", "amx-tile")
+TARGET_BUILTIN(__builtin_ia32_tdpbf16ps_internal, "V256iUsUsUsV256iV256iV256i", "n", "amx-bf16")
// AMX
TARGET_BUILTIN(__builtin_ia32_tile_loadconfig, "vvC*", "n", "amx-tile")
TARGET_BUILTIN(__builtin_ia32_tile_storeconfig, "vvC*", "n", "amx-tile")
diff --git a/clang/include/clang/Basic/CodeGenOptions.def b/clang/include/clang/Basic/CodeGenOptions.def
index 5c8af65326ed..e3202cf88756 100644
--- a/clang/include/clang/Basic/CodeGenOptions.def
+++ b/clang/include/clang/Basic/CodeGenOptions.def
@@ -39,9 +39,9 @@ CODEGENOPT(AssumeSaneOperatorNew , 1, 1) ///< implicit __attribute__((malloc)) o
CODEGENOPT(Autolink , 1, 1) ///< -fno-autolink
CODEGENOPT(ObjCAutoRefCountExceptions , 1, 0) ///< Whether ARC should be EH-safe.
CODEGENOPT(Backchain , 1, 0) ///< -mbackchain
-CODEGENOPT(IgnoreXCOFFVisibility , 1, 0) ///< -mignore-xcoff-visibility
CODEGENOPT(ControlFlowGuardNoChecks , 1, 0) ///< -cfguard-no-checks
CODEGENOPT(ControlFlowGuard , 1, 0) ///< -cfguard
+CODEGENOPT(EHContGuard , 1, 0) ///< -ehcontguard
CODEGENOPT(CXAAtExit , 1, 1) ///< Use __cxa_atexit for calling destructors.
CODEGENOPT(RegisterGlobalDtorsWithAtExit, 1, 1) ///< Use atexit or __cxa_atexit to register global destructors.
CODEGENOPT(CXXCtorDtorAliases, 1, 0) ///< Emit complete ctors/dtors as linker
@@ -63,12 +63,17 @@ CODEGENOPT(DisableLifetimeMarkers, 1, 0) ///< Don't emit any lifetime markers
CODEGENOPT(DisableO0ImplyOptNone , 1, 0) ///< Don't annonate function with optnone at O0
CODEGENOPT(ExperimentalStrictFloatingPoint, 1, 0) ///< Enables the new, experimental
///< strict floating point.
+CODEGENOPT(EnableNoundefAttrs, 1, 0) ///< Enable emitting `noundef` attributes on IR call arguments and return values
CODEGENOPT(LegacyPassManager, 1, 0) ///< Use the legacy pass manager.
CODEGENOPT(DebugPassManager, 1, 0) ///< Prints debug information for the new
///< pass manager.
CODEGENOPT(DisableRedZone , 1, 0) ///< Set when -mno-red-zone is enabled.
CODEGENOPT(EmitCallSiteInfo, 1, 0) ///< Emit call site info only in the case of
///< '-g' + 'O>0' level.
+CODEGENOPT(EnableDIPreservationVerify, 1, 0) ///< Enable di preservation verify
+ ///< each (it means check
+ ///< the original debug info
+ ///< metadata preservation).
CODEGENOPT(IndirectTlsSegRefs, 1, 0) ///< Set when -mno-tls-direct-seg-refs
///< is specified.
CODEGENOPT(DisableTailCalls , 1, 0) ///< Do not emit tail calls.
@@ -167,7 +172,8 @@ CODEGENOPT(NoInlineLineTables, 1, 0) ///< Whether debug info should contain
CODEGENOPT(StackClashProtector, 1, 0) ///< Set when -fstack-clash-protection is enabled.
CODEGENOPT(NoImplicitFloat , 1, 0) ///< Set when -mno-implicit-float is enabled.
CODEGENOPT(NullPointerIsValid , 1, 0) ///< Assume Null pointer deference is defined.
-CODEGENOPT(CorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt
+CODEGENOPT(OpenCLCorrectlyRoundedDivSqrt, 1, 0) ///< -cl-fp32-correctly-rounded-divide-sqrt
+CODEGENOPT(HIPCorrectlyRoundedDivSqrt, 1, 1) ///< -fno-hip-fp32-correctly-rounded-divide-sqrt
CODEGENOPT(UniqueInternalLinkageNames, 1, 0) ///< Internal Linkage symbols get unique names.
CODEGENOPT(SplitMachineFunctions, 1, 0) ///< Split machine functions using profile information.
@@ -206,6 +212,10 @@ CODEGENOPT(NewStructPathTBAA , 1, 0) ///< Whether or not to use enhanced struct-
CODEGENOPT(SaveTempLabels , 1, 0) ///< Save temporary labels.
CODEGENOPT(SanitizeAddressUseAfterScope , 1, 0) ///< Enable use-after-scope detection
///< in AddressSanitizer
+ENUM_CODEGENOPT(SanitizeAddressUseAfterReturn,
+ llvm::AsanDetectStackUseAfterReturnMode, 2,
+ llvm::AsanDetectStackUseAfterReturnMode::Runtime
+ ) ///< Set detection mode for stack-use-after-return.
CODEGENOPT(SanitizeAddressPoisonCustomArrayCookie, 1,
0) ///< Enable poisoning operator new[] which is not a replaceable
///< global allocation function in AddressSanitizer
@@ -214,6 +224,9 @@ CODEGENOPT(SanitizeAddressGlobalsDeadStripping, 1, 0) ///< Enable linker dead st
CODEGENOPT(SanitizeAddressUseOdrIndicator, 1, 0) ///< Enable ODR indicator globals
CODEGENOPT(SanitizeMemoryTrackOrigins, 2, 0) ///< Enable tracking origins in
///< MemorySanitizer
+ENUM_CODEGENOPT(SanitizeAddressDtor, llvm::AsanDtorKind, 2,
+ llvm::AsanDtorKind::Global) ///< Set how ASan global
+ ///< destructors are emitted.
CODEGENOPT(SanitizeMemoryUseAfterDtor, 1, 0) ///< Enable use-after-delete detection
///< in MemorySanitizer
CODEGENOPT(SanitizeCfiCrossDso, 1, 0) ///< Enable cross-dso support in CFI.
@@ -266,6 +279,9 @@ CODEGENOPT(VectorizeLoop , 1, 0) ///< Run loop vectorizer.
CODEGENOPT(VectorizeSLP , 1, 0) ///< Run SLP vectorizer.
CODEGENOPT(ProfileSampleAccurate, 1, 0) ///< Sample profile is accurate.
+/// Treat loops as finite: language, always, never.
+ENUM_CODEGENOPT(FiniteLoops, FiniteLoopsKind, 2, FiniteLoopsKind::Language)
+
/// Attempt to use register sized accesses to bit-fields in structures, when
/// possible.
CODEGENOPT(UseRegisterSizedBitfieldAccess , 1, 0)
@@ -281,7 +297,9 @@ VALUE_CODEGENOPT(StackAlignment , 32, 0) ///< Overrides default stack
///< alignment, if not 0.
VALUE_CODEGENOPT(StackProbeSize , 32, 4096) ///< Overrides default stack
///< probe size, even if 0.
+VALUE_CODEGENOPT(WarnStackSize , 32, UINT_MAX) ///< Set via -fwarn-stack-size.
CODEGENOPT(NoStackArgProbe, 1, 0) ///< Set when -mno-stack-arg-probe is used
+CODEGENOPT(DebugStrictDwarf, 1, 1) ///< Whether or not to use strict DWARF info.
CODEGENOPT(DebugColumnInfo, 1, 0) ///< Whether or not to use column information
///< in debug info.
@@ -329,7 +347,7 @@ ENUM_CODEGENOPT(DebugInfo, codegenoptions::DebugInfoKind, 4, codegenoptions::NoD
CODEGENOPT(MacroDebugInfo, 1, 0)
/// Tune the debug info for this debugger.
-ENUM_CODEGENOPT(DebuggerTuning, llvm::DebuggerKind, 2,
+ENUM_CODEGENOPT(DebuggerTuning, llvm::DebuggerKind, 3,
llvm::DebuggerKind::Default)
/// Dwarf version. Version zero indicates to LLVM that no DWARF should be
@@ -359,7 +377,7 @@ ENUM_CODEGENOPT(DefaultTLSModel, TLSModel, 2, GeneralDynamicTLSModel)
VALUE_CODEGENOPT(TLSSize, 8, 0)
/// The default stack protector guard offset to use.
-VALUE_CODEGENOPT(StackProtectorGuardOffset, 32, (unsigned)-1)
+VALUE_CODEGENOPT(StackProtectorGuardOffset, 32, INT_MAX)
/// Number of path components to strip when emitting checks. (0 == full
/// filename)
@@ -414,6 +432,11 @@ CODEGENOPT(PassByValueIsNoAlias, 1, 0)
/// according to the field declaring type width.
CODEGENOPT(AAPCSBitfieldWidth, 1, 1)
+/// Sets the IEEE bit in the expected default floating point mode register.
+/// Floating point opcodes that support exception flag gathering quiet and
+/// propagate signaling NaN inputs per IEEE 754-2008 (AMDGPU Only)
+CODEGENOPT(EmitIEEENaNCompliantInsts, 1, 1)
+
#undef CODEGENOPT
#undef ENUM_CODEGENOPT
#undef VALUE_CODEGENOPT
diff --git a/clang/include/clang/Basic/CodeGenOptions.h b/clang/include/clang/Basic/CodeGenOptions.h
index 73d41e3293c6..617c255641ef 100644
--- a/clang/include/clang/Basic/CodeGenOptions.h
+++ b/clang/include/clang/Basic/CodeGenOptions.h
@@ -20,6 +20,7 @@
#include "llvm/Support/CodeGen.h"
#include "llvm/Support/Regex.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <map>
#include <memory>
#include <string>
@@ -54,11 +55,12 @@ public:
};
enum VectorLibrary {
- NoLibrary, // Don't use any vector library.
- Accelerate, // Use the Accelerate framework.
- LIBMVEC, // GLIBC vector math library.
- MASSV, // IBM MASS vector library.
- SVML // Intel short vector math library.
+ NoLibrary, // Don't use any vector library.
+ Accelerate, // Use the Accelerate framework.
+ LIBMVEC, // GLIBC vector math library.
+ MASSV, // IBM MASS vector library.
+ SVML, // Intel short vector math library.
+ Darwin_libsystem_m // Use Darwin's libsytem_m vector functions.
};
enum ObjCDispatchMethodKind {
@@ -74,23 +76,6 @@ public:
LocalExecTLSModel
};
- /// Clang versions with different platform ABI conformance.
- enum class ClangABI {
- /// Attempt to be ABI-compatible with code generated by Clang 3.8.x
- /// (SVN r257626). This causes <1 x long long> to be passed in an
- /// integer register instead of an SSE register on x64_64.
- Ver3_8,
-
- /// Attempt to be ABI-compatible with code generated by Clang 4.0.x
- /// (SVN r291814). This causes move operations to be ignored when
- /// determining whether a class type can be passed or returned directly.
- Ver4,
-
- /// Conform to the underlying platform's C and C++ ABIs as closely
- /// as we can.
- Latest
- };
-
enum StructReturnConventionKind {
SRCK_Default, // No special option was passed.
SRCK_OnStack, // Small structs on the stack (-fpcc-struct-return).
@@ -140,6 +125,12 @@ public:
All, // Keep all frame pointers.
};
+ enum FiniteLoopsKind {
+ Language, // Not specified, use language standard.
+ Always, // All loops are assumed to be finite.
+ Never, // No loop is assumed to be finite.
+ };
+
/// The code model to use (-mcmodel).
std::string CodeModel;
@@ -166,6 +157,9 @@ public:
/// The string to embed in debug information as the current working directory.
std::string DebugCompilationDir;
+ /// The string to embed in coverage mapping as the current working directory.
+ std::string CoverageCompilationDir;
+
/// The string to embed in the debug information for the compile unit, if
/// non-empty.
std::string DwarfDebugFlags;
@@ -175,11 +169,15 @@ public:
std::string RecordCommandLine;
std::map<std::string, std::string> DebugPrefixMap;
- std::map<std::string, std::string> ProfilePrefixMap;
+ std::map<std::string, std::string> CoveragePrefixMap;
/// The ABI to use for passing floating point arguments.
std::string FloatABI;
+ /// The file to use for dumping bug report by `Debugify` for original
+ /// debug info.
+ std::string DIBugsReportFilePath;
+
/// The floating-point denormal mode to use.
llvm::DenormalMode FPDenormalMode = llvm::DenormalMode::getIEEE();
@@ -278,27 +276,52 @@ public:
/// -fsymbol-partition (see https://lld.llvm.org/Partitions.html).
std::string SymbolPartition;
- /// Regular expression to select optimizations for which we should enable
- /// optimization remarks. Transformation passes whose name matches this
- /// expression (and support this feature), will emit a diagnostic
- /// whenever they perform a transformation. This is enabled by the
- /// -Rpass=regexp flag.
- std::shared_ptr<llvm::Regex> OptimizationRemarkPattern;
-
- /// Regular expression to select optimizations for which we should enable
- /// missed optimization remarks. Transformation passes whose name matches this
- /// expression (and support this feature), will emit a diagnostic
- /// whenever they tried but failed to perform a transformation. This is
- /// enabled by the -Rpass-missed=regexp flag.
- std::shared_ptr<llvm::Regex> OptimizationRemarkMissedPattern;
-
- /// Regular expression to select optimizations for which we should enable
- /// optimization analyses. Transformation passes whose name matches this
- /// expression (and support this feature), will emit a diagnostic
- /// whenever they want to explain why they decided to apply or not apply
- /// a given transformation. This is enabled by the -Rpass-analysis=regexp
- /// flag.
- std::shared_ptr<llvm::Regex> OptimizationRemarkAnalysisPattern;
+ enum RemarkKind {
+ RK_Missing, // Remark argument not present on the command line.
+ RK_Enabled, // Remark enabled via '-Rgroup'.
+ RK_EnabledEverything, // Remark enabled via '-Reverything'.
+ RK_Disabled, // Remark disabled via '-Rno-group'.
+ RK_DisabledEverything, // Remark disabled via '-Rno-everything'.
+ RK_WithPattern, // Remark pattern specified via '-Rgroup=regexp'.
+ };
+
+ /// Optimization remark with an optional regular expression pattern.
+ struct OptRemark {
+ RemarkKind Kind;
+ std::string Pattern;
+ std::shared_ptr<llvm::Regex> Regex;
+
+ /// By default, optimization remark is missing.
+ OptRemark() : Kind(RK_Missing), Pattern(""), Regex(nullptr) {}
+
+ /// Returns true iff the optimization remark holds a valid regular
+ /// expression.
+ bool hasValidPattern() const { return Regex != nullptr; }
+
+ /// Matches the given string against the regex, if there is some.
+ bool patternMatches(StringRef String) const {
+ return hasValidPattern() && Regex->match(String);
+ }
+ };
+
+ /// Selected optimizations for which we should enable optimization remarks.
+ /// Transformation passes whose name matches the contained (optional) regular
+ /// expression (and support this feature), will emit a diagnostic whenever
+ /// they perform a transformation.
+ OptRemark OptimizationRemark;
+
+ /// Selected optimizations for which we should enable missed optimization
+ /// remarks. Transformation passes whose name matches the contained (optional)
+ /// regular expression (and support this feature), will emit a diagnostic
+ /// whenever they tried but failed to perform a transformation.
+ OptRemark OptimizationRemarkMissed;
+
+ /// Selected optimizations for which we should enable optimization analyses.
+ /// Transformation passes whose name matches the contained (optional) regular
+ /// expression (and support this feature), will emit a diagnostic whenever
+ /// they want to explain why they decided to apply or not apply a given
+ /// transformation.
+ OptRemark OptimizationRemarkAnalysis;
/// Set of files defining the rules for the symbol rewriting.
std::vector<std::string> RewriteMapFiles;
@@ -341,14 +364,21 @@ public:
/// other styles we may implement in the future.
std::string StackProtectorGuard;
- /// The TLS base register when StackProtectorGuard is "tls".
+ /// The TLS base register when StackProtectorGuard is "tls", or register used
+ /// to store the stack canary for "sysreg".
/// On x86 this can be "fs" or "gs".
+ /// On AArch64 this can only be "sp_el0".
std::string StackProtectorGuardReg;
- /// Path to blocklist file specifying which objects
+ /// Path to ignorelist file specifying which objects
/// (files, functions) listed for instrumentation by sanitizer
/// coverage pass should actually not be instrumented.
- std::vector<std::string> SanitizeCoverageBlocklistFiles;
+ std::vector<std::string> SanitizeCoverageIgnorelistFiles;
+
+ /// Name of the stack usage file (i.e., .su file) if user passes
+ /// -fstack-usage. If empty, it can be implied that -fstack-usage is not
+ /// passed on the command line.
+ std::string StackUsageOutput;
/// Executable and command-line used to create a given CompilerInvocation.
/// Most of the time this will be the full -cc1 command.
@@ -380,10 +410,6 @@ public:
CodeGenOptions();
- /// Is this a libc/libm function that is no longer recognized as a
- /// builtin because a -fno-builtin-* option has been specified?
- bool isNoBuiltinFunc(const char *Name) const;
-
const std::vector<std::string> &getNoBuiltinFuncs() const {
return NoBuiltinFuncs;
}
@@ -426,6 +452,12 @@ public:
bool hasMaybeUnusedDebugInfo() const {
return getDebugInfo() >= codegenoptions::UnusedTypeInfo;
}
+
+ // Check if any one of SanitizeCoverage* is enabled.
+ bool hasSanitizeCoverage() const {
+ return SanitizeCoverageType || SanitizeCoverageIndirectCalls ||
+ SanitizeCoverageTraceCmp;
+ }
};
} // end namespace clang
diff --git a/clang/include/clang/Basic/Cuda.h b/clang/include/clang/Basic/Cuda.h
index b3a2e99fe931..aa12724cbf0c 100644
--- a/clang/include/clang/Basic/Cuda.h
+++ b/clang/include/clang/Basic/Cuda.h
@@ -29,7 +29,9 @@ enum class CudaVersion {
CUDA_101,
CUDA_102,
CUDA_110,
- LATEST = CUDA_110,
+ CUDA_111,
+ CUDA_112,
+ LATEST = CUDA_112,
LATEST_SUPPORTED = CUDA_101,
};
const char *CudaVersionToString(CudaVersion V);
@@ -55,6 +57,7 @@ enum class CudaArch {
SM_72,
SM_75,
SM_80,
+ SM_86,
GFX600,
GFX601,
GFX602,
@@ -75,14 +78,18 @@ enum class CudaArch {
GFX906,
GFX908,
GFX909,
+ GFX90a,
GFX90c,
GFX1010,
GFX1011,
GFX1012,
+ GFX1013,
GFX1030,
GFX1031,
GFX1032,
GFX1033,
+ GFX1034,
+ GFX1035,
LAST,
};
diff --git a/clang/include/clang/Basic/DarwinSDKInfo.h b/clang/include/clang/Basic/DarwinSDKInfo.h
new file mode 100644
index 000000000000..918dc7c8becc
--- /dev/null
+++ b/clang/include/clang/Basic/DarwinSDKInfo.h
@@ -0,0 +1,157 @@
+//===--- DarwinSDKInfo.h - SDK Information parser for darwin ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+#define LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/VersionTuple.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+namespace llvm {
+namespace json {
+class Object;
+} // end namespace json
+} // end namespace llvm
+
+namespace clang {
+
+/// The information about the darwin SDK that was used during this compilation.
+class DarwinSDKInfo {
+public:
+ /// A value that describes two os-environment pairs that can be used as a key
+ /// to the version map in the SDK.
+ struct OSEnvPair {
+ public:
+ using StorageType = uint64_t;
+
+ constexpr OSEnvPair(llvm::Triple::OSType FromOS,
+ llvm::Triple::EnvironmentType FromEnv,
+ llvm::Triple::OSType ToOS,
+ llvm::Triple::EnvironmentType ToEnv)
+ : Value(((StorageType(FromOS) * StorageType(llvm::Triple::LastOSType) +
+ StorageType(FromEnv))
+ << 32ull) |
+ (StorageType(ToOS) * StorageType(llvm::Triple::LastOSType) +
+ StorageType(ToEnv))) {}
+
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// macOS -> Mac Catalyst version mapping.
+ static inline constexpr OSEnvPair macOStoMacCatalystPair() {
+ return OSEnvPair(llvm::Triple::MacOSX, llvm::Triple::UnknownEnvironment,
+ llvm::Triple::IOS, llvm::Triple::MacABI);
+ }
+
+ /// Returns the os-environment mapping pair that's used to represent the
+ /// Mac Catalyst -> macOS version mapping.
+ static inline constexpr OSEnvPair macCatalystToMacOSPair() {
+ return OSEnvPair(llvm::Triple::IOS, llvm::Triple::MacABI,
+ llvm::Triple::MacOSX, llvm::Triple::UnknownEnvironment);
+ }
+
+ private:
+ StorageType Value;
+
+ friend class DarwinSDKInfo;
+ };
+
+ /// Represents a version mapping that maps from a version of one target to a
+ /// version of a related target.
+ ///
+ /// e.g. "macOS_iOSMac":{"10.15":"13.1"} is an example of a macOS -> Mac
+ /// Catalyst version map.
+ class RelatedTargetVersionMapping {
+ public:
+ RelatedTargetVersionMapping(
+ VersionTuple MinimumKeyVersion, VersionTuple MaximumKeyVersion,
+ VersionTuple MinimumValue, VersionTuple MaximumValue,
+ llvm::DenseMap<VersionTuple, VersionTuple> Mapping)
+ : MinimumKeyVersion(MinimumKeyVersion),
+ MaximumKeyVersion(MaximumKeyVersion), MinimumValue(MinimumValue),
+ MaximumValue(MaximumValue), Mapping(Mapping) {
+ assert(!this->Mapping.empty() && "unexpected empty mapping");
+ }
+
+ /// Returns the value with the lowest version in the mapping.
+ const VersionTuple &getMinimumValue() const { return MinimumValue; }
+
+ /// Returns the mapped key, or the appropriate Minimum / MaximumValue if
+ /// they key is outside of the mapping bounds. If they key isn't mapped, but
+ /// within the minimum and maximum bounds, None is returned.
+ Optional<VersionTuple> map(const VersionTuple &Key,
+ const VersionTuple &MinimumValue,
+ Optional<VersionTuple> MaximumValue) const;
+
+ static Optional<RelatedTargetVersionMapping>
+ parseJSON(const llvm::json::Object &Obj,
+ VersionTuple MaximumDeploymentTarget);
+
+ private:
+ VersionTuple MinimumKeyVersion;
+ VersionTuple MaximumKeyVersion;
+ VersionTuple MinimumValue;
+ VersionTuple MaximumValue;
+ llvm::DenseMap<VersionTuple, VersionTuple> Mapping;
+ };
+
+ DarwinSDKInfo(VersionTuple Version, VersionTuple MaximumDeploymentTarget,
+ llvm::DenseMap<OSEnvPair::StorageType,
+ Optional<RelatedTargetVersionMapping>>
+ VersionMappings =
+ llvm::DenseMap<OSEnvPair::StorageType,
+ Optional<RelatedTargetVersionMapping>>())
+ : Version(Version), MaximumDeploymentTarget(MaximumDeploymentTarget),
+ VersionMappings(std::move(VersionMappings)) {}
+
+ const llvm::VersionTuple &getVersion() const { return Version; }
+
+ // Returns the optional, target-specific version mapping that maps from one
+ // target to another target.
+ //
+ // This mapping is constructed from an appropriate mapping in the SDKSettings,
+ // for instance, when building for Mac Catalyst, the mapping would contain the
+ // "macOS_iOSMac" mapping as it maps the macOS versions to the Mac Catalyst
+ // versions.
+ //
+ // This mapping does not exist when the target doesn't have an appropriate
+ // related version mapping, or when there was an error reading the mapping
+ // from the SDKSettings, or when it's missing in the SDKSettings.
+ const RelatedTargetVersionMapping *getVersionMapping(OSEnvPair Kind) const {
+ auto Mapping = VersionMappings.find(Kind.Value);
+ if (Mapping == VersionMappings.end())
+ return nullptr;
+ return Mapping->getSecond().hasValue() ? Mapping->getSecond().getPointer()
+ : nullptr;
+ }
+
+ static Optional<DarwinSDKInfo>
+ parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj);
+
+private:
+ VersionTuple Version;
+ VersionTuple MaximumDeploymentTarget;
+ // Need to wrap the value in an optional here as the value has to be default
+ // constructible, and std::unique_ptr doesn't like DarwinSDKInfo being
+ // Optional as Optional is trying to copy it in emplace.
+ llvm::DenseMap<OSEnvPair::StorageType, Optional<RelatedTargetVersionMapping>>
+ VersionMappings;
+};
+
+/// Parse the SDK information from the SDKSettings.json file.
+///
+/// \returns an error if the SDKSettings.json file is invalid, None if the
+/// SDK has no SDKSettings.json, or a valid \c DarwinSDKInfo otherwise.
+Expected<Optional<DarwinSDKInfo>> parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS,
+ StringRef SDKRootPath);
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_DARWIN_SDK_INFO_H
diff --git a/clang/include/clang/Basic/DebugInfoOptions.h b/clang/include/clang/Basic/DebugInfoOptions.h
index 7f5669c1760f..c1259d7797db 100644
--- a/clang/include/clang/Basic/DebugInfoOptions.h
+++ b/clang/include/clang/Basic/DebugInfoOptions.h
@@ -37,6 +37,7 @@ enum DebugInfoKind {
/// Limit generated debug info for classes to reduce size. This emits class
/// type info only where the constructor is emitted, if it is a class that
/// has a constructor.
+ /// FIXME: Consider combining this with LimitedDebugInfo.
DebugInfoConstructor,
/// Limit generated debug info to reduce size (-fno-standalone-debug). This
diff --git a/clang/include/clang/Basic/DeclNodes.td b/clang/include/clang/Basic/DeclNodes.td
index 4771a3549426..f8ad6cf5b262 100644
--- a/clang/include/clang/Basic/DeclNodes.td
+++ b/clang/include/clang/Basic/DeclNodes.td
@@ -71,10 +71,13 @@ def Named : DeclNode<Decl, "named declarations", 1>;
def TemplateTemplateParm : DeclNode<Template>;
def BuiltinTemplate : DeclNode<Template>;
def Concept : DeclNode<Template>;
- def Using : DeclNode<Named>;
+ def BaseUsing : DeclNode<Named, "", 1>;
+ def Using : DeclNode<BaseUsing>;
+ def UsingEnum : DeclNode<BaseUsing>;
def UsingPack : DeclNode<Named>;
def UsingShadow : DeclNode<Named>;
def ConstructorUsingShadow : DeclNode<UsingShadow>;
+ def UnresolvedUsingIfExists : DeclNode<Named>;
def ObjCMethod : DeclNode<Named, "Objective-C methods">, DeclContext;
def ObjCContainer : DeclNode<Named, "Objective-C containers", 1>, DeclContext;
def ObjCCategory : DeclNode<ObjCContainer>;
diff --git a/clang/include/clang/Basic/Diagnostic.h b/clang/include/clang/Basic/Diagnostic.h
index 3499c551cfdf..3b915fb15a89 100644
--- a/clang/include/clang/Basic/Diagnostic.h
+++ b/clang/include/clang/Basic/Diagnostic.h
@@ -273,6 +273,13 @@ private:
// Which overload candidates to show.
OverloadsShown ShowOverloads = Ovl_All;
+ // With Ovl_Best, the number of overload candidates to show when we encounter
+ // an error.
+ //
+ // The value here is the number of candidates to show in the first nontrivial
+ // error. Future errors may show a different number of candidates.
+ unsigned NumOverloadsToShow = 32;
+
// Cap of # errors emitted, 0 -> no limit.
unsigned ErrorLimit = 0;
@@ -707,6 +714,37 @@ public:
}
OverloadsShown getShowOverloads() const { return ShowOverloads; }
+ /// When a call or operator fails, print out up to this many candidate
+ /// overloads as suggestions.
+ ///
+ /// With Ovl_Best, we set a high limit for the first nontrivial overload set
+ /// we print, and a lower limit for later sets. This way the user has a
+ /// chance of diagnosing at least one callsite in their program without
+ /// having to recompile with -fshow-overloads=all.
+ unsigned getNumOverloadCandidatesToShow() const {
+ switch (getShowOverloads()) {
+ case Ovl_All:
+ // INT_MAX rather than UINT_MAX so that we don't have to think about the
+ // effect of implicit conversions on this value. In practice we'll never
+ // hit 2^31 candidates anyway.
+ return std::numeric_limits<int>::max();
+ case Ovl_Best:
+ return NumOverloadsToShow;
+ }
+ llvm_unreachable("invalid OverloadsShown kind");
+ }
+
+ /// Call this after showing N overload candidates. This influences the value
+ /// returned by later calls to getNumOverloadCandidatesToShow().
+ void overloadCandidatesShown(unsigned N) {
+ // Current heuristic: Start out with a large value for NumOverloadsToShow,
+ // and then once we print one nontrivially-large overload set, decrease it
+ // for future calls.
+ if (N > 4) {
+ NumOverloadsToShow = 4;
+ }
+ }
+
/// Pretend that the last diagnostic issued was ignored, so any
/// subsequent notes will be suppressed, or restore a prior ignoring
/// state after ignoring some diagnostics and their notes, possibly in
@@ -806,6 +844,7 @@ public:
return FatalErrorOccurred || UnrecoverableErrorOccurred;
}
+ unsigned getNumErrors() const { return NumErrors; }
unsigned getNumWarnings() const { return NumWarnings; }
void setNumWarnings(unsigned NumWarnings) {
diff --git a/clang/include/clang/Basic/DiagnosticASTKinds.td b/clang/include/clang/Basic/DiagnosticASTKinds.td
index f6b936f5ccd9..496d86ee2fe7 100644
--- a/clang/include/clang/Basic/DiagnosticASTKinds.td
+++ b/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -349,6 +349,8 @@ def note_constexpr_new_delete_mismatch : Note<
"used to delete pointer to "
"%select{array object of type %2|non-array object of type %2|"
"object allocated with 'new'}0}1">;
+def note_constexpr_deallocate_null : Note<
+ "'std::allocator<...>::deallocate' used to delete a null pointer">;
def note_constexpr_delete_subobject : Note<
"delete of pointer%select{ to subobject|}1 '%0' "
"%select{|that does not point to complete object}1">;
diff --git a/clang/include/clang/Basic/DiagnosticCategories.td b/clang/include/clang/Basic/DiagnosticCategories.td
index d7203173790e..fb6bdd710741 100644
--- a/clang/include/clang/Basic/DiagnosticCategories.td
+++ b/clang/include/clang/Basic/DiagnosticCategories.td
@@ -7,4 +7,5 @@
//===----------------------------------------------------------------------===//
class CatInlineAsm : DiagCategory<"Inline Assembly Issue">;
+class CatSourceMgr : DiagCategory<"SourceMgr Reported Issue">;
class CatBackend : DiagCategory<"Backend Issue">;
diff --git a/clang/include/clang/Basic/DiagnosticCommonKinds.td b/clang/include/clang/Basic/DiagnosticCommonKinds.td
index a4f96a97991e..4dff3379ed35 100644
--- a/clang/include/clang/Basic/DiagnosticCommonKinds.td
+++ b/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -111,6 +111,8 @@ def err_module_cycle : Error<"cyclic dependency in module '%0': %1">,
DefaultFatal;
def err_module_prebuilt : Error<
"error in loading module '%0' from prebuilt module path">, DefaultFatal;
+def err_module_rebuild_finalized : Error<
+ "cannot rebuild module '%0' as it is already finalized">, DefaultFatal;
def note_pragma_entered_here : Note<"#pragma entered here">;
def note_decl_hiding_tag_type : Note<
"%1 %0 is hidden by a non-type declaration of %0 here">;
@@ -187,6 +189,17 @@ def ext_cxx11_longlong : Extension<
def warn_cxx98_compat_longlong : Warning<
"'long long' is incompatible with C++98">,
InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def ext_cxx2b_size_t_suffix : ExtWarn<
+ "'size_t' suffix for literals is a C++2b extension">,
+ InGroup<CXX2b>;
+def warn_cxx20_compat_size_t_suffix : Warning<
+ "'size_t' suffix for literals is incompatible with C++ standards before "
+ "C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+def err_cxx2b_size_t_suffix: Error<
+ "'size_t' suffix for literals is a C++2b feature">;
+def err_size_t_literal_too_large: Error<
+ "%select{signed |}0'size_t' literal is out of range of possible "
+ "%select{signed |}0'size_t' values">;
def err_integer_literal_too_large : Error<
"integer literal is too large to be represented in any %select{signed |}0"
"integer type">;
@@ -336,6 +349,8 @@ def warn_ignored_hip_only_option : Warning<
// OpenMP
def err_omp_more_one_clause : Error<
"directive '#pragma omp %0' cannot contain more than one '%1' clause%select{| with '%3' name modifier| with 'source' dependence}2">;
+def err_omp_required_clause : Error<
+ "directive '#pragma omp %0' requires the '%1' clause">;
// Static Analyzer Core
def err_unknown_analyzer_checker_or_package : Error<
@@ -347,4 +362,13 @@ def note_suggest_disabling_all_checkers : Note<
def warn_poison_system_directories : Warning <
"include location '%0' is unsafe for cross-compilation">,
InGroup<DiagGroup<"poison-system-directories">>, DefaultIgnore;
+
+def warn_opencl_unsupported_core_feature : Warning<
+ "%0 is a core feature in %select{OpenCL C|C++ for OpenCL}1 version %2 but not supported on this target">,
+ InGroup<OpenCLCoreFeaturesDiagGroup>, DefaultIgnore;
+
+def err_opencl_extension_and_feature_differs : Error<
+ "options %0 and %1 are set to different values">;
+def err_opencl_feature_requires : Error<
+ "feature %0 requires support of %1 feature">;
}
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index ad13f923fb63..3b4daa59f66b 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -67,6 +67,8 @@ def err_drv_no_hip_runtime : Error<
"cannot find HIP runtime. Provide its path via --rocm-path, or pass "
"-nogpuinc to build without HIP runtime.">;
+def err_drv_undetermined_amdgpu_arch : Error<
+ "Cannot determine AMDGPU architecture: %0. Consider passing it via --march.">;
def err_drv_cuda_version_unsupported : Error<
"GPU arch %0 is supported by CUDA versions between %1 and %2 (inclusive), "
"but installation at %3 is %4. Use --cuda-path to specify a different CUDA "
@@ -127,6 +129,10 @@ def err_drv_invalid_Xopenmp_target_with_args : Error<
"invalid -Xopenmp-target argument: '%0', options requiring arguments are unsupported">;
def err_drv_argument_only_allowed_with : Error<
"invalid argument '%0' only allowed with '%1'">;
+def err_drv_minws_unsupported_input_type : Error<
+ "'-fminimize-whitespace' invalid for input of type %0">;
+def err_drv_amdgpu_ieee_without_no_honor_nans : Error<
+ "invalid argument '-mno-amdgpu-ieee' only allowed with relaxed NaN handling">;
def err_drv_argument_not_allowed_with : Error<
"invalid argument '%0' not allowed with '%1'">;
def err_drv_invalid_version_number : Error<
@@ -163,12 +169,12 @@ def err_drv_invalid_libcxx_deployment : Error<
"invalid deployment target for -stdlib=libc++ (requires %0 or later)">;
def err_drv_invalid_argument_to_option : Error<
"invalid argument '%0' to -%1">;
-def err_drv_malformed_sanitizer_blacklist : Error<
- "malformed sanitizer blacklist: '%0'">;
+def err_drv_malformed_sanitizer_ignorelist : Error<
+ "malformed sanitizer ignorelist: '%0'">;
def err_drv_malformed_sanitizer_coverage_whitelist : Error<
"malformed sanitizer coverage whitelist: '%0'">;
-def err_drv_malformed_sanitizer_coverage_blacklist : Error<
- "malformed sanitizer coverage blacklist: '%0'">;
+def err_drv_malformed_sanitizer_coverage_ignorelist : Error<
+ "malformed sanitizer coverage ignorelist: '%0'">;
def err_drv_duplicate_config : Error<
"no more than one option '--config' is allowed">;
def err_drv_config_file_not_exist : Error<
@@ -210,12 +216,10 @@ def warn_drv_yc_multiple_inputs_clang_cl : Warning<
"support for '/Yc' with more than one source file not implemented yet; flag ignored">,
InGroup<ClangClPch>;
-def err_drv_dllexport_inlines_and_fallback : Error<
- "option '/Zc:dllexportInlines-' is ABI-changing and not compatible with '/fallback'">;
-
def err_drv_invalid_value : Error<"invalid value '%1' in '%0'">;
def err_drv_invalid_int_value : Error<"invalid integral value '%1' in '%0'">;
-def err_drv_invalid_value_with_suggestion : Error<"invalid value '%1' in '%0','%2'">;
+def err_drv_invalid_value_with_suggestion : Error<
+ "invalid value '%1' in '%0', expected one of: %2">;
def err_drv_invalid_remap_file : Error<
"invalid option '%0' not of the form <from-file>;<to-file>">;
def err_drv_invalid_gcc_output_type : Error<
@@ -229,6 +233,8 @@ def warn_invalid_ios_deployment_target : Warning<
"invalid iOS deployment version '%0', iOS 10 is the maximum deployment "
"target for 32-bit targets">, InGroup<InvalidIOSDeploymentTarget>,
DefaultError;
+def err_invalid_macos_32bit_deployment_target : Error<
+ "32-bit targets are not supported when building for Mac Catalyst">;
def err_drv_conflicting_deployment_targets : Error<
"conflicting deployment targets, both '%0' and '%1' are present in environment">;
def err_arc_unsupported_on_runtime : Error<
@@ -264,8 +270,9 @@ def err_drv_omp_host_target_not_supported : Error<
def err_drv_expecting_fopenmp_with_fopenmp_targets : Error<
"The option -fopenmp-targets must be used in conjunction with a -fopenmp option compatible with offloading, please use -fopenmp=libomp or -fopenmp=libiomp5.">;
def err_drv_omp_offload_target_missingbcruntime : Error<
- "No library '%0' found in the default clang lib directory or in LIBRARY_PATH. Please use --libomptarget-nvptx-bc-path to specify nvptx bitcode library.">;
+ "No library '%0' found in the default clang lib directory or in LIBRARY_PATH. Please use --libomptarget-%1-bc-path to specify %1 bitcode library.">;
def err_drv_omp_offload_target_bcruntime_not_found : Error<"Bitcode library '%0' does not exist.">;
+def err_drv_omp_offload_target_cuda_version_not_support : Error<"NVPTX target requires CUDA 9.2 or above. CUDA %0 is detected.">;
def warn_drv_omp_offload_target_duplicate : Warning<
"The OpenMP offloading target '%0' is similar to target '%1' already specified - will be ignored.">,
InGroup<OpenMPTarget>;
@@ -273,6 +280,10 @@ def err_drv_unsupported_embed_bitcode
: Error<"%0 is not supported with -fembed-bitcode">;
def err_drv_bitcode_unsupported_on_toolchain : Error<
"-fembed-bitcode is not supported on versions of iOS prior to 6.0">;
+def err_drv_negative_columns : Error<
+ "invalid value '%1' in '%0', value must be 'none' or a positive integer">;
+def err_drv_small_columns : Error<
+ "invalid value '%1' in '%0', value must be '%2' or greater">;
def err_drv_invalid_malign_branch_EQ : Error<
"invalid argument '%0' to -malign-branch=; each element must be one of: %1">;
@@ -344,6 +355,10 @@ def warn_drv_disabling_vptr_no_rtti_default : Warning<
def warn_drv_object_size_disabled_O0 : Warning<
"the object size sanitizer has no effect at -O0, but is explicitly enabled: %0">,
InGroup<InvalidCommandLineArgument>, DefaultWarnNoWerror;
+def warn_ignoring_verify_debuginfo_preserve_export : Warning<
+ "ignoring -fverify-debuginfo-preserve-export=%0 because "
+ "-fverify-debuginfo-preserve wasn't enabled">,
+ InGroup<UnusedCommandLineArgument>;
def err_invalid_branch_protection: Error <
"invalid branch protection option '%0' in '%1'">;
def err_invalid_sls_hardening : Error<
@@ -392,9 +407,6 @@ def err_test_module_file_extension_format : Error<
"-ftest-module-file-extension argument '%0' is not of the required form "
"'blockname:major:minor:hashed:user info'">;
-def warn_drv_invoking_fallback : Warning<"falling back to %0">,
- InGroup<Fallback>;
-
def warn_slash_u_filename : Warning<"'/U%0' treated as the '/U' option">,
InGroup<DiagGroup<"slash-u-filename">>;
def note_use_dashdash : Note<"Use '--' to treat subsequent arguments as filenames">;
@@ -534,6 +546,15 @@ def err_drv_cannot_mix_options : Error<"cannot specify '%1' along with '%0'">;
def err_drv_invalid_object_mode : Error<"OBJECT_MODE setting %0 is not recognized and is not a valid setting.">;
-def err_aix_default_altivec_abi : Error<
- "The default Altivec ABI on AIX is not yet supported, use '-mabi=vec-extabi' for the extended Altivec ABI">;
+def err_aix_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
+
+def err_invalid_cxx_abi : Error<"Invalid C++ ABI name '%0'">;
+def err_unsupported_cxx_abi : Error<"C++ ABI '%0' is not supported on target triple '%1'">;
+
+def note_cc1_round_trip_original : Note<"Original arguments in round-trip: %0">;
+def note_cc1_round_trip_generated : Note<"Generated arguments #%0 in round-trip: %1">;
+def remark_cc1_round_trip_generated : Remark<"Generated arguments #%0 in round-trip: %1">, InGroup<RoundTripCC1Args>;
+def err_cc1_round_trip_fail_then_ok : Error<"Original arguments parse failed, then succeeded in round-trip">;
+def err_cc1_round_trip_ok_then_fail : Error<"Generated arguments parse failed in round-trip">;
+def err_cc1_round_trip_mismatch : Error<"Generated arguments do not match in round-trip">;
}
diff --git a/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
index b9f8c78e43da..0f4ccec38550 100644
--- a/clang/include/clang/Basic/DiagnosticFrontendKinds.td
+++ b/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -19,13 +19,16 @@ def err_fe_inline_asm : Error<"%0">, CatInlineAsm;
def warn_fe_inline_asm : Warning<"%0">, CatInlineAsm, InGroup<BackendInlineAsm>;
def note_fe_inline_asm : Note<"%0">, CatInlineAsm;
def note_fe_inline_asm_here : Note<"instantiated into assembly here">;
+def err_fe_source_mgr : Error<"%0">, CatSourceMgr;
+def warn_fe_source_mgr : Warning<"%0">, CatSourceMgr, InGroup<BackendSourceMgr>;
+def note_fe_source_mgr : Note<"%0">, CatSourceMgr;
def err_fe_cannot_link_module : Error<"cannot link module '%0': %1">,
DefaultFatal;
-def warn_fe_frame_larger_than : Warning<"stack frame size of %0 bytes in %q1">,
- BackendInfo, InGroup<BackendFrameLargerThanEQ>;
+def warn_fe_frame_larger_than : Warning<"stack frame size (%0) exceeds limit (%1) in %q2">,
+ BackendInfo, InGroup<BackendFrameLargerThan>;
def warn_fe_backend_frame_larger_than: Warning<"%0">,
- BackendInfo, InGroup<BackendFrameLargerThanEQ>;
+ BackendInfo, InGroup<BackendFrameLargerThan>;
def err_fe_backend_frame_larger_than: Error<"%0">, BackendInfo;
def note_fe_backend_frame_larger_than: Note<"%0">, BackendInfo;
@@ -109,7 +112,7 @@ def err_fe_action_not_available : Error<
def err_fe_invalid_alignment : Error<
"invalid value '%1' in '%0'; alignment must be a power of 2">;
def err_fe_invalid_exception_model
- : Error<"invalid exception model '%select{none|dwarf|sjlj|arm|seh|wasm|aix}0' for target '%1'">;
+ : Error<"invalid exception model '%select{none|sjlj|seh|dwarf|wasm}0' for target '%1'">;
def warn_fe_concepts_ts_flag : Warning<
"-fconcepts-ts is deprecated - use '-std=c++20' for Concepts support">,
InGroup<Deprecated>;
@@ -155,6 +158,8 @@ def err_verify_invalid_no_diags : Error<
"%select{'expected-no-diagnostics' directive|other expected directives}0">;
def err_verify_no_directives : Error<
"no expected directives found: consider use of 'expected-no-diagnostics'">;
+def err_verify_nonconst_addrspace : Error<
+ "qualifier 'const' is needed for variables in address space '%0'">;
def note_fixit_applied : Note<"FIX-IT applied suggested code changes">;
def note_fixit_in_macro : Note<
diff --git a/clang/include/clang/Basic/DiagnosticGroups.td b/clang/include/clang/Basic/DiagnosticGroups.td
index 04ba89aa457e..4b4928a7a00e 100644
--- a/clang/include/clang/Basic/DiagnosticGroups.td
+++ b/clang/include/clang/Basic/DiagnosticGroups.td
@@ -162,8 +162,12 @@ def CXX11CompatDeprecatedWritableStr :
def DeprecatedArrayCompare : DiagGroup<"deprecated-array-compare">;
def DeprecatedAttributes : DiagGroup<"deprecated-attributes">;
def DeprecatedCommaSubscript : DiagGroup<"deprecated-comma-subscript">;
-def DeprecatedCopy : DiagGroup<"deprecated-copy">;
-def DeprecatedCopyDtor : DiagGroup<"deprecated-copy-dtor">;
+def DeprecatedCopyWithUserProvidedCopy : DiagGroup<"deprecated-copy-with-user-provided-copy">;
+def DeprecatedCopyWithUserProvidedDtor : DiagGroup<"deprecated-copy-with-user-provided-dtor">;
+def DeprecatedCopy : DiagGroup<"deprecated-copy", [DeprecatedCopyWithUserProvidedCopy]>;
+def DeprecatedCopyWithDtor : DiagGroup<"deprecated-copy-with-dtor", [DeprecatedCopyWithUserProvidedDtor]>;
+// For compatibility with GCC.
+def : DiagGroup<"deprecated-copy-dtor", [DeprecatedCopyWithDtor]>;
def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
def UnavailableDeclarations : DiagGroup<"unavailable-declarations">;
def UnguardedAvailabilityNew : DiagGroup<"unguarded-availability-new">;
@@ -186,7 +190,7 @@ def Deprecated : DiagGroup<"deprecated", [DeprecatedAnonEnumEnumConversion,
DeprecatedAttributes,
DeprecatedCommaSubscript,
DeprecatedCopy,
- DeprecatedCopyDtor,
+ DeprecatedCopyWithDtor,
DeprecatedDeclarations,
DeprecatedDynamicExceptionSpec,
DeprecatedEnumCompare,
@@ -253,17 +257,33 @@ def : DiagGroup<"c++1z-compat-mangling", [CXX17CompatMangling]>;
// Name of this warning in GCC.
def NoexceptType : DiagGroup<"noexcept-type", [CXX17CompatMangling]>;
-// Warnings for C++1y code which is not compatible with prior C++ standards.
-def CXXPre14Compat : DiagGroup<"c++98-c++11-compat">;
-def CXXPre14CompatPedantic : DiagGroup<"c++98-c++11-compat-pedantic",
+// Warnings for C code which is not compatible with previous C standards.
+def CPre2xCompat : DiagGroup<"pre-c2x-compat">;
+def CPre2xCompatPedantic : DiagGroup<"pre-c2x-compat-pedantic",
+ [CPre2xCompat]>;
+
+// Warnings for C++ code which is not compatible with previous C++ standards.
+def CXXPre14Compat : DiagGroup<"pre-c++14-compat">;
+def : DiagGroup<"c++98-c++11-compat", [CXXPre14Compat]>;
+def CXXPre14CompatPedantic : DiagGroup<"pre-c++14-compat-pedantic",
[CXXPre14Compat,
CXXPre14CompatBinaryLiteral]>;
-def CXXPre17Compat : DiagGroup<"c++98-c++11-c++14-compat">;
-def CXXPre17CompatPedantic : DiagGroup<"c++98-c++11-c++14-compat-pedantic",
+def : DiagGroup<"c++98-c++11-compat-pedantic", [CXXPre14CompatPedantic]>;
+def CXXPre17Compat : DiagGroup<"pre-c++17-compat">;
+def : DiagGroup<"c++98-c++11-c++14-compat", [CXXPre17Compat]>;
+def CXXPre17CompatPedantic : DiagGroup<"pre-c++17-compat-pedantic",
[CXXPre17Compat]>;
-def CXXPre20Compat : DiagGroup<"c++98-c++11-c++14-c++17-compat">;
-def CXXPre20CompatPedantic : DiagGroup<"c++98-c++11-c++14-c++17-compat-pedantic",
+def : DiagGroup<"c++98-c++11-c++14-compat-pedantic",
+ [CXXPre17CompatPedantic]>;
+def CXXPre20Compat : DiagGroup<"pre-c++20-compat">;
+def : DiagGroup<"c++98-c++11-c++14-c++17-compat", [CXXPre20Compat]>;
+def CXXPre20CompatPedantic : DiagGroup<"pre-c++20-compat-pedantic",
[CXXPre20Compat]>;
+def : DiagGroup<"c++98-c++11-c++14-c++17-compat-pedantic",
+ [CXXPre20CompatPedantic]>;
+def CXXPre2bCompat : DiagGroup<"pre-c++2b-compat">;
+def CXXPre2bCompatPedantic :
+ DiagGroup<"pre-c++2b-compat-pedantic", [CXXPre2bCompat]>;
def CXX98CompatBindToTemporaryCopy :
DiagGroup<"c++98-compat-bind-to-temporary-copy">;
@@ -277,7 +297,8 @@ def CXX98Compat : DiagGroup<"c++98-compat",
CXX98CompatUnnamedTypeTemplateArgs,
CXXPre14Compat,
CXXPre17Compat,
- CXXPre20Compat]>;
+ CXXPre20Compat,
+ CXXPre2bCompat]>;
// Warnings for C++11 features which are Extensions in C++98 mode.
def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
[CXX98Compat,
@@ -285,7 +306,8 @@ def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic",
CXX98CompatExtraSemi,
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
- CXXPre20CompatPedantic]>;
+ CXXPre20CompatPedantic,
+ CXXPre2bCompatPedantic]>;
def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
@@ -314,33 +336,40 @@ def CXX11Compat : DiagGroup<"c++11-compat",
CXX11CompatDeprecatedWritableStr,
CXXPre14Compat,
CXXPre17Compat,
- CXXPre20Compat]>;
+ CXXPre20Compat,
+ CXXPre2bCompat]>;
def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
def CXX11CompatPedantic : DiagGroup<"c++11-compat-pedantic",
[CXX11Compat,
CXXPre14CompatPedantic,
CXXPre17CompatPedantic,
- CXXPre20CompatPedantic]>;
+ CXXPre20CompatPedantic,
+ CXXPre2bCompatPedantic]>;
def CXX14Compat : DiagGroup<"c++14-compat", [CXXPre17Compat,
- CXXPre20Compat]>;
+ CXXPre20Compat,
+ CXXPre2bCompat]>;
def CXX14CompatPedantic : DiagGroup<"c++14-compat-pedantic",
[CXX14Compat,
CXXPre17CompatPedantic,
- CXXPre20CompatPedantic]>;
+ CXXPre20CompatPedantic,
+ CXXPre2bCompatPedantic]>;
def CXX17Compat : DiagGroup<"c++17-compat", [DeprecatedRegister,
DeprecatedIncrementBool,
CXX17CompatMangling,
- CXXPre20Compat]>;
+ CXXPre20Compat,
+ CXXPre2bCompat]>;
def CXX17CompatPedantic : DiagGroup<"c++17-compat-pedantic",
[CXX17Compat,
- CXXPre20CompatPedantic]>;
+ CXXPre20CompatPedantic,
+ CXXPre2bCompatPedantic]>;
def : DiagGroup<"c++1z-compat", [CXX17Compat]>;
-def CXX20Compat : DiagGroup<"c++20-compat">;
+def CXX20Compat : DiagGroup<"c++20-compat", [CXXPre2bCompat]>;
def CXX20CompatPedantic : DiagGroup<"c++20-compat-pedantic",
- [CXX20Compat]>;
+ [CXX20Compat,
+ CXXPre2bCompatPedantic]>;
def : DiagGroup<"c++2a-compat", [CXX20Compat]>;
def : DiagGroup<"c++2a-compat-pedantic", [CXX20CompatPedantic]>;
@@ -437,6 +466,7 @@ def ModuleBuild : DiagGroup<"module-build">;
def ModuleImport : DiagGroup<"module-import">;
def ModuleConflict : DiagGroup<"module-conflict">;
def ModuleFileExtension : DiagGroup<"module-file-extension">;
+def RoundTripCC1Args : DiagGroup<"round-trip-cc1-args">;
def NewlineEOF : DiagGroup<"newline-eof">;
def Nullability : DiagGroup<"nullability">;
def NullabilityDeclSpec : DiagGroup<"nullability-declspec">;
@@ -457,6 +487,7 @@ def ClassVarargs : DiagGroup<"class-varargs", [NonPODVarargs]>;
def : DiagGroup<"nonportable-cfstrings">;
def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
def NullPointerArithmetic : DiagGroup<"null-pointer-arithmetic">;
+def NullPointerSubtraction : DiagGroup<"null-pointer-subtraction">;
def : DiagGroup<"effc++", [NonVirtualDtor]>;
def OveralignedType : DiagGroup<"over-aligned">;
def OldStyleCast : DiagGroup<"old-style-cast">;
@@ -473,6 +504,7 @@ def PrivateExtern : DiagGroup<"private-extern">;
def SelTypeCast : DiagGroup<"cast-of-sel-type">;
def FunctionDefInObjCContainer : DiagGroup<"function-def-in-objc-container">;
def BadFunctionCast : DiagGroup<"bad-function-cast">;
+def CastFunctionType : DiagGroup<"cast-function-type">;
def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
def ObjCPropertyNoAttribute : DiagGroup<"objc-property-no-attribute">;
def ObjCPropertyAssignOnObjectType : DiagGroup<"objc-property-assign-on-object-type">;
@@ -494,18 +526,17 @@ def OpenCLUnsupportedRGBA: DiagGroup<"opencl-unsupported-rgba">;
def UnderalignedExceptionObject : DiagGroup<"underaligned-exception-object">;
def DeprecatedObjCIsaUsage : DiagGroup<"deprecated-objc-isa-usage">;
def ExplicitInitializeCall : DiagGroup<"explicit-initialize-call">;
+def OrderedCompareFunctionPointers : DiagGroup<"ordered-compare-function-pointers">;
def Packed : DiagGroup<"packed">;
def Padded : DiagGroup<"padded">;
def PessimizingMove : DiagGroup<"pessimizing-move">;
-def ReturnStdMoveInCXX11 : DiagGroup<"return-std-move-in-c++11">;
def ReturnStdMove : DiagGroup<"return-std-move">;
def PointerArith : DiagGroup<"pointer-arith">;
def PoundWarning : DiagGroup<"#warnings">;
def PoundPragmaMessage : DiagGroup<"#pragma-messages">,
DiagCategory<"#pragma message Directive">;
-def : DiagGroup<"pointer-to-int-cast">;
def : DiagGroup<"redundant-decls">;
def RedeclaredClassMember : DiagGroup<"redeclared-class-member">;
def GNURedeclaredEnum : DiagGroup<"gnu-redeclared-enum">;
@@ -540,7 +571,6 @@ def ShadowAll : DiagGroup<"shadow-all", [Shadow, ShadowFieldInConstructor,
def Shorten64To32 : DiagGroup<"shorten-64-to-32">;
def : DiagGroup<"sign-promo">;
def SignCompare : DiagGroup<"sign-compare">;
-def : DiagGroup<"stack-protector">;
def : DiagGroup<"switch-default">;
def : DiagGroup<"synth">;
def SizeofArrayArgument : DiagGroup<"sizeof-array-argument">;
@@ -567,11 +597,13 @@ def SwiftNameAttribute : DiagGroup<"swift-name-attribute">;
def IntInBoolContext : DiagGroup<"int-in-bool-context">;
def TautologicalTypeLimitCompare : DiagGroup<"tautological-type-limit-compare">;
def TautologicalUnsignedZeroCompare : DiagGroup<"tautological-unsigned-zero-compare">;
+def TautologicalUnsignedCharZeroCompare : DiagGroup<"tautological-unsigned-char-zero-compare">;
def TautologicalUnsignedEnumZeroCompare : DiagGroup<"tautological-unsigned-enum-zero-compare">;
// For compatibility with GCC. Tautological comparison warnings for constants
// that are an extremal value of the type.
def TypeLimits : DiagGroup<"type-limits", [TautologicalTypeLimitCompare,
TautologicalUnsignedZeroCompare,
+ TautologicalUnsignedCharZeroCompare,
TautologicalUnsignedEnumZeroCompare]>;
// Additional tautological comparison warnings based on the expression, not
// only on its type.
@@ -608,7 +640,8 @@ def : DiagGroup<"sequence-point", [Unsequenced]>;
// Preprocessor warnings.
def AmbiguousMacro : DiagGroup<"ambiguous-macro">;
def KeywordAsMacro : DiagGroup<"keyword-macro">;
-def ReservedIdAsMacro : DiagGroup<"reserved-id-macro">;
+def ReservedIdAsMacro : DiagGroup<"reserved-macro-identifier">;
+def ReservedIdAsMacroAlias : DiagGroup<"reserved-id-macro", [ReservedIdAsMacro]>;
// Just silence warnings about -Wstrict-aliasing for now.
def : DiagGroup<"strict-aliasing=0">;
@@ -626,7 +659,6 @@ def : DiagGroup<"strict-overflow=5">;
def : DiagGroup<"strict-overflow">;
def InvalidOffsetof : DiagGroup<"invalid-offsetof">;
-def : DiagGroup<"strict-prototypes">;
def StrictSelector : DiagGroup<"strict-selector-match">;
def MethodDuplicate : DiagGroup<"duplicate-method-match">;
def ObjCCStringFormat : DiagGroup<"cstring-format-directive">;
@@ -695,6 +727,7 @@ def UnusedMemberFunction : DiagGroup<"unused-member-function",
def UnusedLabel : DiagGroup<"unused-label">;
def UnusedLambdaCapture : DiagGroup<"unused-lambda-capture">;
def UnusedParameter : DiagGroup<"unused-parameter">;
+def UnusedButSetParameter : DiagGroup<"unused-but-set-parameter">;
def UnusedResult : DiagGroup<"unused-result">;
def PotentiallyEvaluatedExpression : DiagGroup<"potentially-evaluated-expression">;
def UnevaluatedExpression : DiagGroup<"unevaluated-expression",
@@ -704,6 +737,7 @@ def UnusedValue : DiagGroup<"unused-value", [UnusedComparison, UnusedResult,
def UnusedConstVariable : DiagGroup<"unused-const-variable">;
def UnusedVariable : DiagGroup<"unused-variable",
[UnusedConstVariable]>;
+def UnusedButSetVariable : DiagGroup<"unused-but-set-variable">;
def UnusedLocalTypedef : DiagGroup<"unused-local-typedef">;
def UnusedPropertyIvar : DiagGroup<"unused-property-ivar">;
def UnusedGetterReturnValue : DiagGroup<"unused-getter-return-value">;
@@ -752,7 +786,6 @@ def Visibility : DiagGroup<"visibility">;
def ZeroLengthArray : DiagGroup<"zero-length-array">;
def GNUZeroLineDirective : DiagGroup<"gnu-zero-line-directive">;
def GNUZeroVariadicMacroArguments : DiagGroup<"gnu-zero-variadic-macro-arguments">;
-def Fallback : DiagGroup<"fallback">;
def MisleadingIndentation : DiagGroup<"misleading-indentation">;
// This covers both the deprecated case (in C++98)
@@ -773,6 +806,9 @@ def LargeByValueCopy : DiagGroup<"large-by-value-copy">;
def DuplicateArgDecl : DiagGroup<"duplicate-method-arg">;
def SignedEnumBitfield : DiagGroup<"signed-enum-bitfield">;
+def ReservedIdentifier : DiagGroup<"reserved-identifier",
+ [ReservedIdAsMacro]>;
+
// Unreachable code warning groups.
//
// The goal is make -Wunreachable-code on by default, in -Wall, or at
@@ -843,7 +879,7 @@ def Unused : DiagGroup<"unused",
// UnusedMemberFunction, (clean-up llvm before enabling)
UnusedPrivateField, UnusedLambdaCapture,
UnusedLocalTypedef, UnusedValue, UnusedVariable,
- UnusedPropertyIvar]>,
+ UnusedButSetVariable, UnusedPropertyIvar]>,
DiagCategory<"Unused Entity Issue">;
// Format settings.
@@ -895,7 +931,9 @@ def Extra : DiagGroup<"extra", [
MissingMethodReturnType,
SignCompare,
UnusedParameter,
+ UnusedButSetParameter,
NullPointerArithmetic,
+ NullPointerSubtraction,
EmptyInitStatement,
StringConcatation,
FUseLdPath,
@@ -1006,6 +1044,10 @@ def CXX17 : DiagGroup<"c++17-extensions">;
// earlier C++ versions.
def CXX20 : DiagGroup<"c++20-extensions", [CXX20Designator]>;
+// A warning group for warnings about using C++2b features as extensions in
+// earlier C++ versions.
+def CXX2b : DiagGroup<"c++2b-extensions">;
+
def : DiagGroup<"c++0x-extensions", [CXX11]>;
def : DiagGroup<"c++1y-extensions", [CXX14]>;
def : DiagGroup<"c++1z-extensions", [CXX17]>;
@@ -1043,6 +1085,9 @@ def GNU : DiagGroup<"gnu", [GNUAlignofExpression, GNUAnonymousStruct,
// A warning group for warnings about code that clang accepts but gcc doesn't.
def GccCompat : DiagGroup<"gcc-compat">;
+// A warning group for warnings about code that may be incompatible on AIX.
+def AIXCompat : DiagGroup<"aix-compat">;
+
// Warnings for Microsoft extensions.
def MicrosoftCharize : DiagGroup<"microsoft-charize">;
def MicrosoftDrectveSection : DiagGroup<"microsoft-drectve-section">;
@@ -1050,6 +1095,7 @@ def MicrosoftInclude : DiagGroup<"microsoft-include">;
def MicrosoftCppMacro : DiagGroup<"microsoft-cpp-macro">;
def MicrosoftFixedEnum : DiagGroup<"microsoft-fixed-enum">;
def MicrosoftSealed : DiagGroup<"microsoft-sealed">;
+def MicrosoftAbstract : DiagGroup<"microsoft-abstract">;
def MicrosoftUnqualifiedFriend : DiagGroup<"microsoft-unqualified-friend">;
def MicrosoftExceptionSpec : DiagGroup<"microsoft-exception-spec">;
def MicrosoftUsingDecl : DiagGroup<"microsoft-using-decl">;
@@ -1078,6 +1124,8 @@ def MicrosoftAnonTag : DiagGroup<"microsoft-anon-tag">;
def MicrosoftCommentPaste : DiagGroup<"microsoft-comment-paste">;
def MicrosoftEndOfFile : DiagGroup<"microsoft-end-of-file">;
def MicrosoftInaccessibleBase : DiagGroup<"microsoft-inaccessible-base">;
+def MicrosoftStaticAssert : DiagGroup<"microsoft-static-assert">;
+
// Aliases.
def : DiagGroup<"msvc-include", [MicrosoftInclude]>;
// -Wmsvc-include = -Wmicrosoft-include
@@ -1085,7 +1133,7 @@ def : DiagGroup<"msvc-include", [MicrosoftInclude]>;
// Warnings group for warnings about Microsoft extensions.
def Microsoft : DiagGroup<"microsoft",
[MicrosoftCharize, MicrosoftDrectveSection, MicrosoftInclude,
- MicrosoftCppMacro, MicrosoftFixedEnum, MicrosoftSealed,
+ MicrosoftCppMacro, MicrosoftFixedEnum, MicrosoftSealed, MicrosoftAbstract,
MicrosoftUnqualifiedFriend, MicrosoftExceptionSpec, MicrosoftUsingDecl,
MicrosoftMutableReference, MicrosoftPureDefinition,
MicrosoftUnionMemberReference, MicrosoftExplicitConstructorCall,
@@ -1093,7 +1141,7 @@ def Microsoft : DiagGroup<"microsoft",
MicrosoftRedeclareStatic, MicrosoftEnumForwardReference, MicrosoftGoto,
MicrosoftFlexibleArray, MicrosoftExtraQualification, MicrosoftCast,
MicrosoftConstInit, MicrosoftVoidPseudoDtor, MicrosoftAnonTag,
- MicrosoftCommentPaste, MicrosoftEndOfFile,
+ MicrosoftCommentPaste, MicrosoftEndOfFile, MicrosoftStaticAssert,
MicrosoftInconsistentDllImport]>;
def ClangClPch : DiagGroup<"clang-cl-pch">;
@@ -1143,13 +1191,19 @@ def OpenMPClauses : DiagGroup<"openmp-clauses">;
def OpenMPLoopForm : DiagGroup<"openmp-loop-form">;
def OpenMPMapping : DiagGroup<"openmp-mapping">;
def OpenMPTarget : DiagGroup<"openmp-target", [OpenMPMapping]>;
+def OpenMPPre51Compat : DiagGroup<"pre-openmp-51-compat">;
+def OpenMP51Ext : DiagGroup<"openmp-51-extensions">;
def OpenMP : DiagGroup<"openmp", [
- SourceUsesOpenMP, OpenMPClauses, OpenMPLoopForm, OpenMPTarget, OpenMPMapping
+ SourceUsesOpenMP, OpenMPClauses, OpenMPLoopForm, OpenMPTarget,
+ OpenMPMapping, OpenMP51Ext
]>;
// Backend warnings.
def BackendInlineAsm : DiagGroup<"inline-asm">;
-def BackendFrameLargerThanEQ : DiagGroup<"frame-larger-than=">;
+def BackendSourceMgr : DiagGroup<"source-mgr">;
+def BackendFrameLargerThan : DiagGroup<"frame-larger-than">;
+// Compatibility flag name from old versions of Clang.
+def : DiagGroup<"frame-larger-than=", [BackendFrameLargerThan]>;
def BackendPlugin : DiagGroup<"backend-plugin">;
def RemarkBackendPlugin : DiagGroup<"remark-backend-plugin">;
def BackendOptimizationRemark : DiagGroup<"pass">;
@@ -1247,3 +1301,5 @@ in addition with the pragmas or -fmax-tokens flag to get any warnings.
def WebAssemblyExceptionSpec : DiagGroup<"wasm-exception-spec">;
def RTTI : DiagGroup<"rtti">;
+
+def OpenCLCoreFeaturesDiagGroup : DiagGroup<"pedantic-core-features">;
diff --git a/clang/include/clang/Basic/DiagnosticIDs.h b/clang/include/clang/Basic/DiagnosticIDs.h
index 7fd107c4add7..288504def5eb 100644
--- a/clang/include/clang/Basic/DiagnosticIDs.h
+++ b/clang/include/clang/Basic/DiagnosticIDs.h
@@ -36,7 +36,7 @@ namespace clang {
DIAG_SIZE_AST = 250,
DIAG_SIZE_COMMENT = 100,
DIAG_SIZE_CROSSTU = 100,
- DIAG_SIZE_SEMA = 4000,
+ DIAG_SIZE_SEMA = 4500,
DIAG_SIZE_ANALYSIS = 100,
DIAG_SIZE_REFACTORING = 1000,
};
diff --git a/clang/include/clang/Basic/DiagnosticLexKinds.td b/clang/include/clang/Basic/DiagnosticLexKinds.td
index 130e7687bad2..ce6d0d0394b4 100644
--- a/clang/include/clang/Basic/DiagnosticLexKinds.td
+++ b/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -179,6 +179,9 @@ def err_invalid_suffix_constant : Error<
def warn_cxx11_compat_digit_separator : Warning<
"digit separators are incompatible with C++ standards before C++14">,
InGroup<CXXPre14Compat>, DefaultIgnore;
+def warn_c2x_compat_digit_separator : Warning<
+ "digit separators are incompatible with C standards before C2x">,
+ InGroup<CPre2xCompat>, DefaultIgnore;
def err_digit_separator_not_between_digits : Error<
"digit separator cannot appear at %select{start|end}0 of digit sequence">;
def warn_extraneous_char_constant : Warning<
@@ -452,9 +455,11 @@ def err_pp_malformed_ident : Error<"invalid #ident directive">;
def err_pp_unterminated_conditional : Error<
"unterminated conditional directive">;
def pp_err_else_after_else : Error<"#else after #else">;
-def pp_err_elif_after_else : Error<"#elif after #else">;
+def pp_err_elif_after_else : Error<
+ "%select{#elif|#elifdef|#elifndef}0 after #else">;
def pp_err_else_without_if : Error<"#else without #if">;
-def pp_err_elif_without_if : Error<"#elif without #if">;
+def pp_err_elif_without_if : Error<
+ "%select{#elif|#elifdef|#elifndef}0 without #if">;
def err_pp_endif_without_if : Error<"#endif without #if">;
def err_pp_expected_value_in_expr : Error<"expected value in expression">;
def err_pp_expected_rparen : Error<"expected ')' in preprocessor expression">;
diff --git a/clang/include/clang/Basic/DiagnosticOptions.def b/clang/include/clang/Basic/DiagnosticOptions.def
index 927710a0cb9a..7be81f6b6a95 100644
--- a/clang/include/clang/Basic/DiagnosticOptions.def
+++ b/clang/include/clang/Basic/DiagnosticOptions.def
@@ -78,7 +78,6 @@ ENUM_DIAGOPT(VerifyIgnoreUnexpected, DiagnosticLevelMask, 4,
/// -verify.
DIAGOPT(ElideType, 1, 0) /// Elide identical types in template diffing
DIAGOPT(ShowTemplateTree, 1, 0) /// Print a template tree when diffing
-DIAGOPT(CLFallbackMode, 1, 0) /// Format for clang-cl fallback mode
VALUE_DIAGOPT(ErrorLimit, 32, 0) /// Limit # errors emitted.
/// Limit depth of macro expansion backtrace.
diff --git a/clang/include/clang/Basic/DiagnosticParseKinds.td b/clang/include/clang/Basic/DiagnosticParseKinds.td
index 0ed80a481e78..7e4b0841e06b 100644
--- a/clang/include/clang/Basic/DiagnosticParseKinds.td
+++ b/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -21,7 +21,7 @@ let CategoryName = "Inline Assembly Issue" in {
def err_asm_empty : Error<"__asm used with no assembly instructions">;
def err_inline_ms_asm_parsing : Error<"%0">;
def err_msasm_unsupported_arch : Error<
- "Unsupported architecture '%0' for MS-style inline assembly">;
+ "unsupported architecture '%0' for MS-style inline assembly">;
def err_msasm_unable_to_create_target : Error<
"MS-style inline assembly is not available: %0">;
def err_gnu_inline_asm_disabled : Error<
@@ -424,12 +424,21 @@ def err_bool_redeclaration : Error<
def warn_cxx98_compat_static_assert : Warning<
"static_assert declarations are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
-def ext_static_assert_no_message : ExtWarn<
- "static_assert with no message is a C++17 extension">, InGroup<CXX17>;
+def ext_ms_static_assert : ExtWarn<
+ "use of 'static_assert' without inclusion of <assert.h> is a Microsoft "
+ "extension">, InGroup<MicrosoftStaticAssert>;
+def ext_cxx_static_assert_no_message : ExtWarn<
+ "'static_assert' with no message is a C++17 extension">, InGroup<CXX17>;
+def ext_c_static_assert_no_message : ExtWarn<
+ "'_Static_assert' with no message is a C2x extension">, InGroup<C2x>;
def warn_cxx14_compat_static_assert_no_message : Warning<
- "static_assert with no message is incompatible with C++ standards before "
+ "'static_assert' with no message is incompatible with C++ standards before "
"C++17">,
DefaultIgnore, InGroup<CXXPre17Compat>;
+def warn_c17_compat_static_assert_no_message : Warning<
+ "'_Static_assert' with no message is incompatible with C standards before "
+ "C2x">,
+ DefaultIgnore, InGroup<CPre2xCompat>;
def err_function_definition_not_allowed : Error<
"function definition is not allowed here">;
def err_expected_end_of_enumerator : Error<
@@ -571,6 +580,12 @@ def warn_cxx98_compat_noexcept_decl : Warning<
def err_expected_catch : Error<"expected catch">;
def err_using_namespace_in_class : Error<
"'using namespace' is not allowed in classes">;
+def warn_cxx17_compat_using_enum_declaration : Warning<
+ "using enum declaration is incompatible with C++ standards before C++20">,
+ InGroup<CXXPre20Compat>, DefaultIgnore;
+def ext_using_enum_declaration : ExtWarn<
+ "using enum declaration is a C++20 extension">,
+ InGroup<CXX20>;
def err_constructor_bad_name : Error<
"missing return type for function %0; did you mean the constructor name %1?">;
def err_destructor_tilde_identifier : Error<
@@ -675,8 +690,6 @@ def err_attribute_requires_arguments : Error<
"parentheses must be omitted if %0 attribute's argument list is empty">;
def err_cxx11_attribute_forbids_ellipsis : Error<
"attribute %0 cannot be used as an attribute pack">;
-def err_cxx11_attribute_repeated : Error<
- "attribute %0 cannot appear multiple times in an attribute specifier">;
def warn_cxx14_compat_using_attribute_ns : Warning<
"default scope specifier for attributes is incompatible with C++ standards "
"before C++17">, InGroup<CXXPre17Compat>, DefaultIgnore;
@@ -686,6 +699,9 @@ def ext_using_attribute_ns : ExtWarn<
def err_using_attribute_ns_conflict : Error<
"attribute with scope specifier cannot follow default scope specifier">;
def err_attributes_not_allowed : Error<"an attribute list cannot appear here">;
+def ext_cxx11_attr_placement : ExtWarn<
+ "ISO C++ does not allow an attribute list to appear here">,
+ InGroup<DiagGroup<"cxx-attribute-extension">>;
def err_attributes_misplaced : Error<"misplaced attributes; expected attributes here">;
def err_l_square_l_square_not_attribute : Error<
"C++11 only allows consecutive left square brackets when "
@@ -915,10 +931,16 @@ def err_override_control_interface : Error<
def ext_ms_sealed_keyword : ExtWarn<
"'sealed' keyword is a Microsoft extension">,
InGroup<MicrosoftSealed>;
+def ext_ms_abstract_keyword : ExtWarn<
+ "'abstract' keyword is a Microsoft extension">,
+ InGroup<MicrosoftAbstract>;
def err_access_specifier_interface : Error<
"interface types cannot specify '%select{private|protected}0' access">;
+def err_duplicate_class_virt_specifier : Error<
+ "class already marked '%0'">;
+
def err_duplicate_virt_specifier : Error<
"class member already marked '%0'">;
@@ -940,9 +962,6 @@ def err_expected_lambda_body : Error<"expected body of lambda expression">;
def warn_cxx98_compat_lambda : Warning<
"lambda expressions are incompatible with C++98">,
InGroup<CXX98Compat>, DefaultIgnore;
-def err_lambda_missing_parens : Error<
- "lambda requires '()' before %select{'mutable'|return type|"
- "attribute specifier|'constexpr'|'consteval'|'requires' clause}0">;
def err_lambda_decl_specifier_repeated : Error<
"%select{'mutable'|'constexpr'|'consteval'}0 cannot appear multiple times in a lambda declarator">;
def err_lambda_capture_misplaced_ellipsis : Error<
@@ -952,6 +971,16 @@ def err_lambda_capture_multiple_ellipses : Error<
"multiple ellipses in pack capture">;
def err_capture_default_first : Error<
"capture default must be first">;
+def ext_decl_attrs_on_lambda : ExtWarn<
+ "an attribute specifier sequence in this position is a C++2b extension">,
+ InGroup<CXX2b>;
+def ext_lambda_missing_parens : ExtWarn<
+ "lambda without a parameter clause is a C++2b extension">,
+ InGroup<CXX2b>;
+def warn_cxx20_compat_decl_attrs_on_lambda : Warning<
+ "an attribute specifier sequence in this position is incompatible with C++ "
+ "standards before C++2b">, InGroup<CXXPre2bCompat>, DefaultIgnore;
+
// C++17 lambda expressions
def err_expected_star_this_capture : Error<
"expected 'this' following '*' in lambda capture list">;
@@ -1220,22 +1249,18 @@ def note_pragma_attribute_namespace_on_attribute : Note<
"omit the namespace to add attributes to the most-recently"
" pushed attribute group">;
-def err_opencl_unroll_hint_on_non_loop : Error<
- "OpenCL only supports 'opencl_unroll_hint' attribute on for, while, and do statements">;
-
// OpenCL EXTENSION pragma (OpenCL 1.1 [9.1])
def warn_pragma_expected_colon : Warning<
"missing ':' after %0 - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_expected_predicate : Warning<
"expected %select{'enable', 'disable', 'begin' or 'end'|'disable'}0 - ignoring">, InGroup<IgnoredPragmas>;
-def warn_pragma_begin_end_mismatch : Warning<
- "OpenCL extension end directive mismatches begin directive - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_unknown_extension : Warning<
"unknown OpenCL extension %0 - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_unsupported_extension : Warning<
"unsupported OpenCL extension %0 - ignoring">, InGroup<IgnoredPragmas>;
def warn_pragma_extension_is_core : Warning<
- "OpenCL extension %0 is core feature or supported optional core feature - ignoring">, InGroup<DiagGroup<"pedantic-core-features">>, DefaultIgnore;
+ "OpenCL extension %0 is core feature or supported optional core feature - ignoring">,
+ InGroup<OpenCLCoreFeaturesDiagGroup>, DefaultIgnore;
// OpenCL errors.
def err_opencl_taking_function_address_parser : Error<
@@ -1302,7 +1327,15 @@ def warn_omp_unknown_assumption_clause_without_args
def note_omp_assumption_clause_continue_here
: Note<"the ignored tokens spans until here">;
def err_omp_declare_target_unexpected_clause: Error<
- "unexpected '%0' clause, only %select{'to' or 'link'|'to', 'link' or 'device_type'}1 clauses expected">;
+ "unexpected '%0' clause, only %select{'device_type'|'to' or 'link'|'to', 'link' or 'device_type'}1 clauses expected">;
+def err_omp_begin_declare_target_unexpected_implicit_to_clause: Error<
+ "unexpected '(', only 'to', 'link' or 'device_type' clauses expected for 'begin declare target' directive">;
+def err_omp_declare_target_unexpected_clause_after_implicit_to: Error<
+ "unexpected clause after an implicit 'to' clause">;
+def err_omp_declare_target_missing_to_or_link_clause: Error<
+ "expected at least one 'to' or 'link' clause">;
+def err_omp_declare_target_multiple : Error<
+ "%0 appears multiple times in clauses on the same declare target directive">;
def err_omp_expected_clause: Error<
"expected at least one clause on '#pragma omp %0' directive">;
def err_omp_mapper_illegal_identifier : Error<
@@ -1387,6 +1420,22 @@ def err_omp_variant_ctx_second_match_extension : Error<
"only a single match extension allowed per OpenMP context selector">;
def err_omp_invalid_dsa: Error<
"data-sharing attribute '%0' in '%1' clause requires OpenMP version %2 or above">;
+def err_omp_expected_punc_after_interop_mod : Error<
+ "expected ',' after interop modifier">;
+def err_omp_expected_interop_type : Error<
+ "expected interop type: 'target' and/or 'targetsync'">;
+def warn_omp_more_one_interop_type
+ : Warning<"interop type '%0' cannot be specified more than once">,
+ InGroup<OpenMPClauses>;
+def err_expected_sequence_or_directive : Error<
+ "expected an OpenMP 'directive' or 'sequence' attribute argument">;
+def ext_omp_attributes : ExtWarn<
+ "specifying OpenMP directives with [[]] is an OpenMP 5.1 extension">,
+ InGroup<OpenMP51Ext>;
+def warn_omp51_compat_attributes : Warning<
+ "specifying OpenMP directives with [[]] is incompatible with OpenMP "
+ "standards before OpenMP 5.1">,
+ InGroup<OpenMPPre51Compat>, DefaultIgnore;
// Pragma loop support.
def err_pragma_loop_missing_argument : Error<
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index 67c59f3ca09a..108f1796415c 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -82,11 +82,11 @@ def err_typecheck_converted_constant_expression_indirect : Error<
"bind reference to a temporary">;
def err_expr_not_cce : Error<
"%select{case value|enumerator value|non-type template argument|"
- "array size|constexpr if condition|explicit specifier argument}0 "
+ "array size|explicit specifier argument}0 "
"is not a constant expression">;
def ext_cce_narrowing : ExtWarn<
"%select{case value|enumerator value|non-type template argument|"
- "array size|constexpr if condition|explicit specifier argument}0 "
+ "array size|explicit specifier argument}0 "
"%select{cannot be narrowed from type %2 to %3|"
"evaluates to %2, which cannot be narrowed to type %3}1">,
InGroup<CXX11Narrowing>, DefaultError, SFINAEFailure;
@@ -118,7 +118,8 @@ def warn_float_underflow : Warning<
"magnitude of floating-point constant too small for type %0; minimum is %1">,
InGroup<LiteralRange>;
def warn_double_const_requires_fp64 : Warning<
- "double precision constant requires cl_khr_fp64, casting to single precision">;
+ "double precision constant requires %select{cl_khr_fp64|cl_khr_fp64 and __opencl_c_fp64}0, "
+ "casting to single precision">;
def err_half_const_requires_fp16 : Error<
"half precision constant requires cl_khr_fp16">;
@@ -214,6 +215,9 @@ def ext_designated_init_reordered : ExtWarn<
SFINAEFailure;
def note_previous_field_init : Note<
"previous initialization for field %0 is here">;
+def ext_designated_init_brace_elision : ExtWarn<
+ "brace elision for designated initializer is a C99 extension">,
+ InGroup<C99Designator>, SFINAEFailure;
// Declarations.
def ext_plain_complex : ExtWarn<
@@ -290,6 +294,10 @@ def err_anyx86_interrupt_attribute : Error<
"a pointer as the first parameter|a %2 type as the second parameter}1">;
def err_anyx86_interrupt_called : Error<
"interrupt service routine cannot be called directly">;
+def warn_anyx86_interrupt_regsave : Warning<
+ "interrupt service routine should only call a function"
+ " with attribute 'no_caller_saved_registers'">,
+ InGroup<DiagGroup<"interrupt-service-routine">>;
def warn_arm_interrupt_calling_convention : Warning<
"call to function without interrupt attribute could clobber interruptee's VFP registers">,
InGroup<Extra>;
@@ -303,8 +311,12 @@ def note_riscv_repeated_interrupt_attribute : Note<
"repeated RISC-V 'interrupt' attribute is here">;
def warn_unused_parameter : Warning<"unused parameter %0">,
InGroup<UnusedParameter>, DefaultIgnore;
+def warn_unused_but_set_parameter : Warning<"parameter %0 set but not used">,
+ InGroup<UnusedButSetParameter>, DefaultIgnore;
def warn_unused_variable : Warning<"unused variable %0">,
InGroup<UnusedVariable>, DefaultIgnore;
+def warn_unused_but_set_variable : Warning<"variable %0 set but not used">,
+ InGroup<UnusedButSetVariable>, DefaultIgnore;
def warn_unused_local_typedef : Warning<
"unused %select{typedef|type alias}0 %1">,
InGroup<UnusedLocalTypedef>, DefaultIgnore;
@@ -372,6 +384,15 @@ def warn_unused_lambda_capture: Warning<"lambda capture %0 is not "
"%select{used|required to be captured for this use}1">,
InGroup<UnusedLambdaCapture>, DefaultIgnore;
+def warn_reserved_extern_symbol: Warning<
+ "identifier %0 is reserved because %select{"
+ "<ERROR>|" // ReservedIdentifierStatus::NotReserved
+ "it starts with '_' at global scope|"
+ "it starts with '__'|"
+ "it starts with '_' followed by a capital letter|"
+ "it contains '__'}1">,
+ InGroup<ReservedIdentifier>, DefaultIgnore;
+
def warn_parameter_size: Warning<
"%0 is a large (%1 bytes) pass-by-value argument; "
"pass it by reference instead ?">, InGroup<LargeByValueCopy>;
@@ -422,7 +443,8 @@ def warn_decl_shadow :
"static data member of %2|"
"field of %2|"
"typedef in %2|"
- "type alias in %2}1">,
+ "type alias in %2|"
+ "structured binding}1">,
InGroup<Shadow>, DefaultIgnore;
def warn_decl_shadow_uncaptured_local :
Warning<warn_decl_shadow.Text>,
@@ -514,6 +536,10 @@ def err_using_dependent_value_is_type : Error<
"dependent using declaration resolved to type without 'typename'">;
def err_using_decl_nested_name_specifier_is_not_class : Error<
"using declaration in class refers into '%0', which is not a class">;
+def warn_cxx17_compat_using_decl_non_member_enumerator : Warning<
+ "member using declaration naming non-class '%0' enumerator is "
+ "incompatible with C++ standards before C++20">, InGroup<CXXPre20Compat>,
+ DefaultIgnore;
def err_using_decl_nested_name_specifier_is_current_class : Error<
"using declaration refers to its own class">;
def err_using_decl_nested_name_specifier_is_not_base_class : Error<
@@ -522,6 +548,14 @@ def err_using_decl_constructor_not_in_direct_base : Error<
"%0 is not a direct base of %1, cannot inherit constructors">;
def err_using_decl_can_not_refer_to_class_member : Error<
"using declaration cannot refer to class member">;
+def warn_cxx17_compat_using_decl_class_member_enumerator : Warning<
+ "member using declaration naming a non-member enumerator is incompatible "
+ "with C++ standards before C++20">, InGroup<CXXPre20Compat>, DefaultIgnore;
+def ext_using_decl_class_member_enumerator : ExtWarn<
+ "member using declaration naming a non-member enumerator is "
+ "a C++20 extension">, InGroup<CXX20>;
+def err_using_enum_is_dependent : Error<
+ "using-enum cannot name a dependent type">;
def err_ambiguous_inherited_constructor : Error<
"constructor of %0 inherited from multiple base class subobjects">;
def note_ambiguous_inherited_constructor_using : Note<
@@ -531,8 +565,12 @@ def note_using_decl_class_member_workaround : Note<
"a const variable|a constexpr variable}0 instead">;
def err_using_decl_can_not_refer_to_namespace : Error<
"using declaration cannot refer to a namespace">;
-def err_using_decl_can_not_refer_to_scoped_enum : Error<
- "using declaration cannot refer to a scoped enumerator">;
+def warn_cxx17_compat_using_decl_scoped_enumerator: Warning<
+ "using declaration naming a scoped enumerator is incompatible with "
+ "C++ standards before C++20">, InGroup<CXXPre20Compat>, DefaultIgnore;
+def ext_using_decl_scoped_enumerator : ExtWarn<
+ "using declaration naming a scoped enumerator is a C++20 extension">,
+ InGroup<CXX20>;
def err_using_decl_constructor : Error<
"using declaration cannot refer to a constructor">;
def warn_cxx98_compat_using_decl_constructor : Warning<
@@ -552,6 +590,15 @@ def err_using_decl_conflict_reverse : Error<
def note_using_decl : Note<"%select{|previous }0using declaration">;
def err_using_decl_redeclaration_expansion : Error<
"using declaration pack expansion at block scope produces multiple values">;
+def err_use_of_empty_using_if_exists : Error<
+ "reference to unresolved using declaration">;
+def note_empty_using_if_exists_here : Note<
+ "using declaration annotated with 'using_if_exists' here">;
+def err_using_if_exists_on_ctor : Error<
+ "'using_if_exists' attribute cannot be applied to an inheriting constructor">;
+def err_using_enum_decl_redeclaration : Error<
+ "redeclaration of using-enum declaration">;
+def note_using_enum_decl : Note<"%select{|previous }0using-enum declaration">;
def warn_access_decl_deprecated : Warning<
"access declarations are deprecated; use using declarations instead">,
@@ -559,15 +606,24 @@ def warn_access_decl_deprecated : Warning<
def err_access_decl : Error<
"ISO C++11 does not allow access declarations; "
"use using declarations instead">;
-def warn_deprecated_copy_operation : Warning<
+def warn_deprecated_copy : Warning<
"definition of implicit copy %select{constructor|assignment operator}1 "
"for %0 is deprecated because it has a user-declared copy "
"%select{assignment operator|constructor}1">,
InGroup<DeprecatedCopy>, DefaultIgnore;
-def warn_deprecated_copy_dtor_operation : Warning<
+def warn_deprecated_copy_with_dtor : Warning<
"definition of implicit copy %select{constructor|assignment operator}1 "
"for %0 is deprecated because it has a user-declared destructor">,
- InGroup<DeprecatedCopyDtor>, DefaultIgnore;
+ InGroup<DeprecatedCopyWithDtor>, DefaultIgnore;
+def warn_deprecated_copy_with_user_provided_copy: Warning<
+ "definition of implicit copy %select{constructor|assignment operator}1 "
+ "for %0 is deprecated because it has a user-provided copy "
+ "%select{assignment operator|constructor}1">,
+ InGroup<DeprecatedCopyWithUserProvidedCopy>, DefaultIgnore;
+def warn_deprecated_copy_with_user_provided_dtor : Warning<
+ "definition of implicit copy %select{constructor|assignment operator}1 "
+ "for %0 is deprecated because it has a user-provided destructor">,
+ InGroup<DeprecatedCopyWithUserProvidedDtor>, DefaultIgnore;
def warn_cxx17_compat_exception_spec_in_signature : Warning<
"mangled name of %0 will change in C++17 due to non-throwing exception "
"specification in function signature">, InGroup<CXX17CompatMangling>;
@@ -1431,6 +1487,8 @@ def err_messaging_class_with_direct_method : Error<
// C++ declarations
def err_static_assert_expression_is_not_constant : Error<
"static_assert expression is not an integral constant expression">;
+def err_constexpr_if_condition_expression_is_not_constant : Error<
+ "constexpr if condition is not a constant expression">;
def err_static_assert_failed : Error<"static_assert failed%select{ %1|}0">;
def err_static_assert_requirement_failed : Error<
"static_assert failed due to requirement '%0'%select{ %2|}1">;
@@ -1705,6 +1763,8 @@ def warn_cxx98_compat_sfinae_access_control : Warning<
// C++ name lookup
def err_incomplete_nested_name_spec : Error<
"incomplete type %0 named in nested name specifier">;
+def err_incomplete_enum : Error<
+ "enumeration %0 is incomplete">;
def err_dependent_nested_name_spec : Error<
"nested name specifier for a declaration cannot depend on a template "
"parameter">;
@@ -2816,6 +2876,53 @@ def warn_nomerge_attribute_ignored_in_stmt: Warning<
"%0 attribute is ignored because there exists no call expression inside the "
"statement">,
InGroup<IgnoredAttributes>;
+
+def err_musttail_needs_trivial_args : Error<
+ "tail call requires that the return value, all parameters, and any "
+ "temporaries created by the expression are trivially destructible">;
+def err_musttail_needs_call : Error<
+ "%0 attribute requires that the return value is the result of a function call"
+ >;
+def err_musttail_needs_prototype : Error<
+ "%0 attribute requires that both caller and callee functions have a "
+ "prototype">;
+def note_musttail_fix_non_prototype : Note<
+ "add 'void' to the parameter list to turn an old-style K&R function "
+ "declaration into a prototype">;
+def err_musttail_structors_forbidden : Error<"cannot perform a tail call "
+ "%select{from|to}0 a %select{constructor|destructor}1">;
+def note_musttail_structors_forbidden : Note<"target "
+ "%select{constructor|destructor}0 is declared here">;
+def err_musttail_forbidden_from_this_context : Error<
+ "%0 attribute cannot be used from "
+ "%select{a block|an Objective-C function|this context}1">;
+def err_musttail_member_mismatch : Error<
+ "%select{non-member|static member|non-static member}0 "
+ "function cannot perform a tail call to "
+ "%select{non-member|static member|non-static member|pointer-to-member}1 "
+ "function%select{| %3}2">;
+def note_musttail_callee_defined_here : Note<"%0 declared here">;
+def note_tail_call_required : Note<"tail call required by %0 attribute here">;
+def err_musttail_mismatch : Error<
+ "cannot perform a tail call to function%select{| %1}0 because its signature "
+ "is incompatible with the calling function">;
+def note_musttail_mismatch : Note<
+ "target function "
+ "%select{is a member of different class%diff{ (expected $ but has $)|}1,2"
+ "|has different number of parameters (expected %1 but has %2)"
+ "|has type mismatch at %ordinal3 parameter"
+ "%diff{ (expected $ but has $)|}1,2"
+ "|has different return type%diff{ ($ expected but has $)|}1,2}0">;
+def err_musttail_callconv_mismatch : Error<
+ "cannot perform a tail call to function%select{| %1}0 because it uses an "
+ "incompatible calling convention">;
+def note_musttail_callconv_mismatch : Note<
+ "target function has calling convention %1 (expected %0)">;
+def err_musttail_scope : Error<
+ "cannot perform a tail call from this return statement">;
+def err_musttail_no_variadic : Error<
+ "%0 attribute may not be used with variadic functions">;
+
def err_nsobject_attribute : Error<
"'NSObject' attribute is for pointer types only">;
def err_attributes_are_not_compatible : Error<
@@ -3049,7 +3156,8 @@ def warn_nsdictionary_duplicate_key : Warning<
def note_nsdictionary_duplicate_key_here : Note<
"previous equal key is here">;
def err_swift_param_attr_not_swiftcall : Error<
- "'%0' parameter can only be used with swiftcall calling convention">;
+ "'%0' parameter can only be used with swiftcall%select{ or swiftasynccall|}1 "
+ "calling convention%select{|s}1">;
def err_swift_indirect_result_not_first : Error<
"'swift_indirect_result' parameters must be first parameters of function">;
def err_swift_error_result_not_after_swift_context : Error<
@@ -3078,6 +3186,8 @@ def warn_objc_redundant_literal_use : Warning<
def err_attr_tlsmodel_arg : Error<"tls_model must be \"global-dynamic\", "
"\"local-dynamic\", \"initial-exec\" or \"local-exec\"">;
+def err_aix_attr_unsupported_tls_model : Error<"TLS model '%0' is not yet supported on AIX">;
+
def err_tls_var_aligned_over_maximum : Error<
"alignment (%0) of thread-local variable %1 is greater than the maximum supported "
"alignment (%2) for a thread-local variable on this target">;
@@ -3087,6 +3197,8 @@ def err_only_annotate_after_access_spec : Error<
def err_attribute_section_invalid_for_target : Error<
"argument to %select{'code_seg'|'section'}1 attribute is not valid for this target: %0">;
+def err_pragma_section_invalid_for_target : Error<
+ "argument to #pragma section is not valid for this target: %0">;
def warn_attribute_section_drectve : Warning<
"#pragma %0(\".drectve\") has undefined behavior, "
"use #pragma comment(linker, ...) instead">, InGroup<MicrosoftDrectveSection>;
@@ -3142,6 +3254,10 @@ def warn_assume_aligned_too_great
: Warning<"requested alignment must be %0 bytes or smaller; maximum "
"alignment assumed">,
InGroup<DiagGroup<"builtin-assume-aligned-alignment">>;
+def warn_not_xl_compatible
+ : Warning<"requesting an alignment of 16 bytes or greater for struct"
+ " members is not binary compatible with AIX XL 16.1 and older">,
+ InGroup<AIXCompat>;
def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
"%q0 redeclared without %1 attribute: previous %1 ignored">,
InGroup<MicrosoftInconsistentDllImport>;
@@ -3156,6 +3272,9 @@ def warn_attribute_ignored : Warning<"%0 attribute ignored">,
def warn_nothrow_attribute_ignored : Warning<"'nothrow' attribute conflicts with"
" exception specification; attribute ignored">,
InGroup<IgnoredAttributes>;
+def warn_attribute_ignored_on_non_definition :
+ Warning<"%0 attribute ignored on a non-definition declaration">,
+ InGroup<IgnoredAttributes>;
def warn_attribute_ignored_on_inline :
Warning<"%0 attribute ignored on inline function">,
InGroup<IgnoredAttributes>;
@@ -3430,6 +3549,10 @@ def warn_at_available_unchecked_use : Warning<
"use if (%select{@available|__builtin_available}0) instead">,
InGroup<DiagGroup<"unsupported-availability-guard">>;
+def warn_missing_sdksettings_for_availability_checking : Warning<
+ "%0 availability is ignored without a valid 'SDKSettings.json' in the SDK">,
+ InGroup<DiagGroup<"ignored-availability-without-sdk-settings">>;
+
// Thread Safety Attributes
def warn_thread_attribute_ignored : Warning<
"ignoring %0 attribute because its argument is invalid">,
@@ -3554,13 +3677,13 @@ def warn_fun_requires_lock_precise :
def note_found_mutex_near_match : Note<"found near match '%0'">;
// Verbose thread safety warnings
-def warn_thread_safety_verbose : Warning<"Thread safety verbose warning.">,
+def warn_thread_safety_verbose : Warning<"thread safety verbose warning">,
InGroup<ThreadSafetyVerbose>, DefaultIgnore;
-def note_thread_warning_in_fun : Note<"Thread warning in function %0">;
-def note_guarded_by_declared_here : Note<"Guarded_by declared here.">;
+def note_thread_warning_in_fun : Note<"thread warning in function %0">;
+def note_guarded_by_declared_here : Note<"guarded_by declared here">;
// Dummy warning that will trigger "beta" warnings from the analysis if enabled.
-def warn_thread_safety_beta : Warning<"Thread safety beta warning.">,
+def warn_thread_safety_beta : Warning<"thread safety beta warning">,
InGroup<ThreadSafetyBeta>, DefaultIgnore;
// Consumed warnings
@@ -3876,6 +3999,9 @@ def warn_attribute_sentinel_named_arguments : Warning<
def warn_attribute_sentinel_not_variadic : Warning<
"'sentinel' attribute only supported for variadic %select{functions|blocks}0">,
InGroup<IgnoredAttributes>;
+def warn_deprecated_ignored_on_using : Warning<
+ "%0 currently has no effect on a using declaration">,
+ InGroup<IgnoredAttributes>;
def err_attribute_sentinel_less_than_zero : Error<
"'sentinel' parameter 1 less than zero">;
def err_attribute_sentinel_not_zero_or_one : Error<
@@ -3957,6 +4083,8 @@ def note_protocol_decl_undefined : Note<
def err_attribute_preferred_name_arg_invalid : Error<
"argument %0 to 'preferred_name' attribute is not a typedef for "
"a specialization of %1">;
+def err_attribute_builtin_alias : Error<
+ "%0 attribute can only be applied to a ARM or RISC-V builtin">;
// called-once attribute diagnostics.
def err_called_once_attribute_wrong_type : Error<
@@ -4104,6 +4232,17 @@ def err_swift_async_bad_block_type : Error<
"'swift_async' completion handler parameter must have block type returning"
" 'void', type here is %0">;
+def err_swift_async_error_without_swift_async : Error<
+ "%0 attribute must be applied to a %select{function|method}1 annotated "
+ "with non-'none' attribute 'swift_async'">;
+def err_swift_async_error_no_error_parameter : Error<
+ "%0 attribute with 'nonnull_error' convention can only be applied to a "
+ "%select{function|method}1 with a completion handler with an error "
+ "parameter">;
+def err_swift_async_error_non_integral : Error<
+ "%0 attribute with '%1' convention must have an integral-typed parameter "
+ "in completion handler at index %2, type here is %3">;
+
def warn_ignored_objc_externally_retained : Warning<
"'objc_externally_retained' can only be applied to local variables "
"%select{of retainable type|with strong ownership}0">,
@@ -4279,8 +4418,6 @@ def warn_diagnose_if_succeeded : Warning<"%0">, InGroup<UserDefinedWarnings>,
ShowInSystemHeader;
def note_ovl_candidate_disabled_by_function_cond_attr : Note<
"candidate disabled: %0">;
-def note_ovl_candidate_disabled_by_extension : Note<
- "candidate unavailable as it requires OpenCL extension '%0' to be enabled">;
def err_addrof_function_disabled_by_enable_if_attr : Error<
"cannot take address of function %0 because it has one or more "
"non-tautological enable_if conditions">;
@@ -5710,11 +5847,8 @@ def err_anon_bitfield_has_negative_width : Error<
"anonymous bit-field has negative width (%0)">;
def err_bitfield_has_zero_width : Error<"named bit-field %0 has zero width">;
def err_bitfield_width_exceeds_type_width : Error<
- "width of bit-field %0 (%1 bits) exceeds %select{width|size}2 "
- "of its type (%3 bit%s3)">;
-def err_anon_bitfield_width_exceeds_type_width : Error<
- "width of anonymous bit-field (%0 bits) exceeds %select{width|size}1 "
- "of its type (%2 bit%s2)">;
+ "width of%select{ anonymous|}0 bit-field%select{| %1}0 (%2 bits) exceeds the "
+ "%select{width|size}3 of its type (%4 bit%s4)">;
def err_incorrect_number_of_vector_initializers : Error<
"number of elements must be either one or match the size of the vector">;
@@ -5753,6 +5887,9 @@ def note_goto_ms_asm_label : Note<
def warn_unused_label : Warning<"unused label %0">,
InGroup<UnusedLabel>, DefaultIgnore;
+def err_continue_from_cond_var_init : Error<
+ "cannot jump from this continue statement to the loop increment; "
+ "jump bypasses initialization of loop condition variable">;
def err_goto_into_protected_scope : Error<
"cannot jump from this goto statement to its label">;
def ext_goto_into_protected_scope : ExtWarn<
@@ -6257,6 +6394,14 @@ def warn_pointer_arith_null_ptr : Warning<
def warn_gnu_null_ptr_arith : Warning<
"arithmetic on a null pointer treated as a cast from integer to pointer is a GNU extension">,
InGroup<NullPointerArithmetic>, DefaultIgnore;
+def warn_pointer_sub_null_ptr : Warning<
+ "performing pointer subtraction with a null pointer %select{has|may have}0 undefined behavior">,
+ InGroup<NullPointerSubtraction>, DefaultIgnore;
+def err_kernel_invalidates_sycl_unique_stable_name
+ : Error<"kernel instantiation changes the result of an evaluated "
+ "'__builtin_sycl_unique_stable_name'">;
+def note_sycl_unique_stable_name_evaluated_here
+ : Note<"'__builtin_sycl_unique_stable_name' evaluated here">;
def warn_floatingpoint_eq : Warning<
"comparing floating point with == or != is unsafe">,
@@ -6351,19 +6496,6 @@ def warn_pessimizing_move_on_initialization : Warning<
InGroup<PessimizingMove>, DefaultIgnore;
def note_remove_move : Note<"remove std::move call here">;
-def warn_return_std_move : Warning<
- "local variable %0 will be copied despite being %select{returned|thrown}1 by name">,
- InGroup<ReturnStdMove>, DefaultIgnore;
-def note_add_std_move : Note<
- "call 'std::move' explicitly to avoid copying">;
-def warn_return_std_move_in_cxx11 : Warning<
- "prior to the resolution of a defect report against ISO C++11, "
- "local variable %0 would have been copied despite being returned by name, "
- "due to its not matching the function return type%diff{ ($ vs $)|}1,2">,
- InGroup<ReturnStdMoveInCXX11>, DefaultIgnore;
-def note_add_std_move_in_cxx11 : Note<
- "call 'std::move' explicitly to avoid copying on older compilers">;
-
def warn_string_plus_int : Warning<
"adding %0 to a string does not append to the string">,
InGroup<StringPlusInt>;
@@ -6637,11 +6769,14 @@ def warn_pointer_indirection_from_incompatible_type : Warning<
def warn_taking_address_of_packed_member : Warning<
"taking address of packed member %0 of class or structure %q1 may result in an unaligned pointer value">,
InGroup<DiagGroup<"address-of-packed-member">>;
+def warn_param_mismatched_alignment : Warning<
+ "passing %0-byte aligned argument to %1-byte aligned parameter %2 of %3 may result in an unaligned pointer access">,
+ InGroup<DiagGroup<"align-mismatch">>;
def err_objc_object_assignment : Error<
"cannot assign to class object (%0 invalid)">;
def err_typecheck_invalid_operands : Error<
- "invalid operands to binary expression (%0 and %1)">;
+ "invalid operands to binary expression (%0 and %1)">, Deferrable;
def note_typecheck_invalid_operands_converted : Note<
"%select{first|second}0 operand was implicitly converted to type %1">;
def err_typecheck_logical_vector_expr_gnu_cpp_restrict : Error<
@@ -6664,9 +6799,14 @@ def ext_typecheck_compare_complete_incomplete_pointers : Extension<
"%0 is %select{|in}2complete and "
"%1 is %select{|in}3complete">,
InGroup<C11>;
+def warn_typecheck_ordered_comparison_of_function_pointers : Warning<
+ "ordered comparison of function pointers (%0 and %1)">,
+ InGroup<OrderedCompareFunctionPointers>;
def ext_typecheck_ordered_comparison_of_function_pointers : ExtWarn<
"ordered comparison of function pointers (%0 and %1)">,
- InGroup<DiagGroup<"ordered-compare-function-pointers">>;
+ InGroup<OrderedCompareFunctionPointers>;
+def err_typecheck_ordered_comparison_of_function_pointers : Error<
+ "ordered comparison of function pointers (%0 and %1)">;
def ext_typecheck_comparison_of_fptr_to_void : Extension<
"equality comparison between function pointer and void pointer (%0 and %1)">;
def err_typecheck_comparison_of_fptr_to_void : Error<
@@ -6762,6 +6902,10 @@ def warn_unsigned_always_true_comparison : Warning<
"result of comparison of %select{%3|unsigned expression}0 %2 "
"%select{unsigned expression|%3}0 is always %4">,
InGroup<TautologicalUnsignedZeroCompare>, DefaultIgnore;
+def warn_unsigned_char_always_true_comparison : Warning<
+ "result of comparison of %select{%3|char expression}0 %2 "
+ "%select{char expression|%3}0 is always %4, since char is interpreted as "
+ "unsigned">, InGroup<TautologicalUnsignedCharZeroCompare>, DefaultIgnore;
def warn_unsigned_enum_always_true_comparison : Warning<
"result of comparison of %select{%3|unsigned enum expression}0 %2 "
"%select{unsigned enum expression|%3}0 is always %4">,
@@ -7172,6 +7316,9 @@ def err_need_header_before_typeid : Error<
"you need to include <typeinfo> before using the 'typeid' operator">;
def err_need_header_before_ms_uuidof : Error<
"you need to include <guiddef.h> before using the '__uuidof' operator">;
+def err_need_header_before_placement_new : Error<
+ "no matching %0 function for non-allocating placement new expression; "
+ "include <new>">;
def err_ms___leave_not_in___try : Error<
"'__leave' statement not in __try block">;
def err_uuidof_without_guid : Error<
@@ -7299,6 +7446,12 @@ def warn_deprecated_volatile_structured_binding : Warning<
"volatile qualifier in structured binding declaration is deprecated">,
InGroup<DeprecatedVolatile>;
+def warn_deprecated_altivec_src_compat : Warning<
+ "Current handling of vector bool and vector pixel types in this context are "
+ "deprecated. The default behaviour will soon change to that implied by the "
+ "'-altivec-compat=xl' option">,
+ InGroup<DiagGroup<"deprecated-altivec-src-compat">>;
+
def err_catch_incomplete_ptr : Error<
"cannot catch pointer to incomplete type %0">;
def err_catch_incomplete_ref : Error<
@@ -7401,9 +7554,10 @@ def err_conditional_vector_element_size : Error<
def err_conditional_vector_has_void : Error<
"GNU vector conditional operand cannot be %select{void|a throw expression}0">;
def err_conditional_vector_operand_type
- : Error<"%select{enumeration|extended vector}0 type %1 is not allowed in a "
- "vector conditional">;
-def err_conditional_vector_mismatched_vectors
+ : Error<"enumeration type %0 is not allowed in a vector conditional">;
+def err_conditional_vector_cond_result_mismatch
+ : Error<"cannot mix vectors and extended vectors in a vector conditional">;
+def err_conditional_vector_mismatched
: Error<"vector operands to the vector conditional must be the same type "
"%diff{($ and $)|}0,1}">;
@@ -7439,6 +7593,8 @@ let CategoryName = "Lambda Issue" in {
"duration">;
def err_this_capture : Error<
"'this' cannot be %select{implicitly |}0captured in this context">;
+ def note_lambda_this_capture_fixit : Note<
+ "explicitly capture 'this'">;
def err_lambda_capture_anonymous_var : Error<
"unnamed variable cannot be implicitly captured in a lambda expression">;
def err_lambda_capture_flexarray_type : Error<
@@ -7447,6 +7603,10 @@ let CategoryName = "Lambda Issue" in {
def err_lambda_impcap : Error<
"variable %0 cannot be implicitly captured in a lambda with no "
"capture-default specified">;
+ def note_lambda_variable_capture_fixit : Note<
+ "capture %0 by %select{value|reference}1">;
+ def note_lambda_default_capture_fixit : Note<
+ "default capture by %select{value|reference}0">;
def note_lambda_decl : Note<"lambda expression begins here">;
def err_lambda_unevaluated_operand : Error<
"lambda expression in an unevaluated operand">;
@@ -7624,7 +7784,7 @@ def warn_condition_is_assignment : Warning<"using the result of an "
"assignment as a condition without parentheses">,
InGroup<Parentheses>;
def warn_free_nonheap_object
- : Warning<"attempt to call %0 on non-heap object %1">,
+ : Warning<"attempt to call %0 on non-heap %select{object %2|object: block expression|object: lambda-to-function-pointer conversion}1">,
InGroup<FreeNonHeapObject>;
// Completely identical except off by default.
@@ -8156,6 +8316,9 @@ def err_atomic_op_needs_non_const_pointer : Error<
def err_atomic_op_needs_trivial_copy : Error<
"address argument to atomic operation must be a pointer to a "
"trivially-copyable type (%0 invalid)">;
+def err_atomic_op_needs_atomic_int_ptr_or_fp : Error<
+ "address argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer, pointer or supported floating point type (%1 invalid)">;
def err_atomic_op_needs_atomic_int_or_ptr : Error<
"address argument to atomic operation must be a pointer to %select{|atomic }0"
"integer or pointer (%1 invalid)">;
@@ -8208,6 +8371,10 @@ def err_global_call_not_config : Error<
def err_ref_bad_target : Error<
"reference to %select{__device__|__global__|__host__|__host__ __device__}0 "
"%select{function|variable}1 %2 in %select{__device__|__global__|__host__|__host__ __device__}3 function">;
+def note_cuda_const_var_unpromoted : Note<
+ "const variable cannot be emitted on device side due to dynamic initialization">;
+def note_cuda_host_var : Note<
+ "host variable declared here">;
def err_ref_bad_target_global_initializer : Error<
"reference to %select{__device__|__global__|__host__|__host__ __device__}0 "
"function %1 in global initializer">;
@@ -8271,6 +8438,9 @@ def note_cuda_device_builtin_surftex_should_be_template_class : Note<
"%0 needs to be instantiated from a class template with proper "
"template arguments">;
+def err_hip_invalid_args_builtin_mangled_name : Error<
+ "invalid argument: symbol must be a device-side function or global variable">;
+
def warn_non_pod_vararg_with_format_string : Warning<
"cannot pass %select{non-POD|non-trivial}0 object of type %1 to variadic "
"%select{function|block|method|constructor}2; expected type from format "
@@ -8354,6 +8524,9 @@ def note_change_calling_conv_fixit : Note<
def warn_bad_function_cast : Warning<
"cast from function call of type %0 to non-matching type %1">,
InGroup<BadFunctionCast>, DefaultIgnore;
+def warn_cast_function_type : Warning<
+ "cast %diff{from $ to $ |}0,1converts to incompatible function type">,
+ InGroup<CastFunctionType>, DefaultIgnore;
def err_cast_pointer_to_non_pointer_int : Error<
"pointer cannot be cast to type %0">;
def err_cast_to_bfloat16 : Error<"cannot type-cast to __bf16">;
@@ -8362,6 +8535,9 @@ def err_typecheck_expect_scalar_operand : Error<
"operand of type %0 where arithmetic or pointer type is required">;
def err_typecheck_cond_incompatible_operands : Error<
"incompatible operand types%diff{ ($ and $)|}0,1">;
+def err_typecheck_expect_flt_or_vector : Error<
+ "invalid operand of type %0 where floating, complex or "
+ "a vector of such types is required">;
def err_cast_selector_expr : Error<
"cannot type cast @selector expression">;
def ext_typecheck_cond_incompatible_pointers : ExtWarn<
@@ -8492,6 +8668,7 @@ let CategoryName = "Inline Assembly Issue" in {
"asm constraint has an unexpected number of alternatives: %0 vs %1">;
def err_asm_incomplete_type : Error<"asm operand has incomplete type %0">;
def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">;
+ def err_asm_unwind_and_goto : Error<"unwind clobber can't be used with asm goto">;
def err_asm_invalid_global_var_reg : Error<"register '%0' unsuitable for "
"global register variables on this target">;
def err_asm_register_size_mismatch : Error<"size of register '%0' does not "
@@ -8539,6 +8716,12 @@ let CategoryName = "Inline Assembly Issue" in {
let CategoryName = "Semantic Issue" in {
+def err_invalid_conversion_between_matrixes : Error<
+ "conversion between matrix types%diff{ $ and $|}0,1 of different size is not allowed">;
+
+def err_invalid_conversion_between_matrix_and_type : Error<
+ "conversion between matrix type %0 and incompatible type %1 is not allowed">;
+
def err_invalid_conversion_between_vectors : Error<
"invalid conversion between vector type%diff{ $ and $|}0,1 of different "
"size">;
@@ -8547,7 +8730,7 @@ def err_invalid_conversion_between_vector_and_integer : Error<
"of different size">;
def err_opencl_function_pointer : Error<
- "pointers to functions are not allowed">;
+ "%select{pointers|references}0 to functions are not allowed">;
def err_opencl_taking_address_capture : Error<
"taking address of a capture is not allowed">;
@@ -8574,6 +8757,15 @@ def warn_initializer_out_of_order : Warning<
"%select{field|base class}0 %1 will be initialized after "
"%select{field|base}2 %3">,
InGroup<ReorderCtor>, DefaultIgnore;
+
+def warn_some_initializers_out_of_order : Warning<
+ "initializer order does not match the declaration order">,
+ InGroup<ReorderCtor>, DefaultIgnore;
+
+def note_initializer_out_of_order : Note<
+ "%select{field|base class}0 %1 will be initialized after "
+ "%select{field|base}2 %3">;
+
def warn_abstract_vbase_init_ignored : Warning<
"initializer for virtual base class %0 of abstract class %1 "
"will never be used">,
@@ -8950,8 +9142,8 @@ def note_defaulted_comparison_calls_deleted : Note<
"defaulted %0 is implicitly deleted because it would invoke a deleted "
"comparison function%select{| for member %2| for base class %2}1">;
def note_defaulted_comparison_no_viable_function : Note<
- "defaulted %0 is implicitly deleted because there is no viable comparison "
- "function%select{| for member %2| for base class %2}1">;
+ "defaulted %0 is implicitly deleted because there is no viable three-way "
+ "comparison function for%select{| member| base class}1 %2">;
def note_defaulted_comparison_no_viable_function_synthesized : Note<
"three-way comparison cannot be synthesized because there is no viable "
"function for %select{'=='|'<'}0 comparison">;
@@ -9003,6 +9195,14 @@ def warn_array_index_precedes_bounds : Warning<
def warn_array_index_exceeds_bounds : Warning<
"array index %0 is past the end of the array (which contains %1 "
"element%s2)">, InGroup<ArrayBounds>;
+def warn_ptr_arith_exceeds_max_addressable_bounds : Warning<
+ "the pointer incremented by %0 refers past the last possible element for an array in %1-bit "
+ "address space containing %2-bit (%3-byte) elements (max possible %4 element%s5)">,
+ InGroup<ArrayBounds>;
+def warn_array_index_exceeds_max_addressable_bounds : Warning<
+ "array index %0 refers past the last possible element for an array in %1-bit "
+ "address space containing %2-bit (%3-byte) elements (max possible %4 element%s5)">,
+ InGroup<ArrayBounds>;
def note_array_declared_here : Note<
"array %0 declared here">;
@@ -9537,6 +9737,8 @@ def err_argument_not_shifted_byte : Error<
"argument should be an 8-bit value shifted by a multiple of 8 bits">;
def err_argument_not_shifted_byte_or_xxff : Error<
"argument should be an 8-bit value shifted by a multiple of 8 bits, or in the form 0x??FF">;
+def err_argument_not_contiguous_bit_field : Error<
+ "argument %0 value should represent a contiguous bit field">;
def err_rotation_argument_to_cadd
: Error<"argument should be the value 90 or 270">;
def err_rotation_argument_to_cmla
@@ -9565,8 +9767,8 @@ def err_mips_builtin_requires_dspr2 : Error<
"this builtin requires 'dsp r2' ASE, please use -mdspr2">;
def err_mips_builtin_requires_msa : Error<
"this builtin requires 'msa' ASE, please use -mmsa">;
-def err_ppc_builtin_only_on_pwr7 : Error<
- "this builtin is only valid on POWER7 or later CPUs">;
+def err_ppc_builtin_only_on_arch : Error<
+ "this builtin is only valid on POWER%0 or later CPUs">;
def err_ppc_invalid_use_mma_type : Error<
"invalid use of PPC MMA type">;
def err_x86_builtin_invalid_rounding : Error<
@@ -9618,7 +9820,7 @@ def warn_duplicate_attribute_exact : Warning<
"attribute %0 is already applied">, InGroup<IgnoredAttributes>;
def warn_duplicate_attribute : Warning<
- "attribute %0 is already applied with different parameters">,
+ "attribute %0 is already applied with different arguments">,
InGroup<IgnoredAttributes>;
def warn_sync_fetch_and_nand_semantics_change : Warning<
@@ -9886,13 +10088,20 @@ def err_opencl_pointer_to_type : Error<
"pointer to type %0 is invalid in OpenCL">;
def err_opencl_type_can_only_be_used_as_function_parameter : Error <
"type %0 can only be used as a function parameter in OpenCL">;
+def err_opencl_type_not_found : Error<
+ "%0 type %1 not found; include the base header with -finclude-default-header">;
def warn_opencl_attr_deprecated_ignored : Warning <
"%0 attribute is deprecated and ignored in OpenCL version %1">,
InGroup<IgnoredAttributes>;
def err_opencl_variadic_function : Error<
"invalid prototype, variadic arguments are not allowed in OpenCL">;
def err_opencl_requires_extension : Error<
- "use of %select{type|declaration}0 %1 requires %2 extension to be enabled">;
+ "use of %select{type|declaration}0 %1 requires %2 support">;
+def ext_opencl_double_without_pragma : Extension<
+ "Clang permits use of type 'double' regardless pragma if 'cl_khr_fp64' is"
+ " supported">;
+def err_opencl_double_requires_extension : Error<
+ "use of type 'double' requires %select{cl_khr_fp64|cl_khr_fp64 and __opencl_c_fp64}0 support">;
def warn_opencl_generic_address_space_arg : Warning<
"passing non-generic address space pointer to %0"
" may cause dynamic conversion affecting performance">,
@@ -9912,7 +10121,8 @@ def err_opencl_builtin_pipe_invalid_access_modifier : Error<
def err_opencl_invalid_access_qualifier : Error<
"access qualifier can only be used for pipe and image type">;
def err_opencl_invalid_read_write : Error<
- "access qualifier %0 can not be used for %1 %select{|prior to OpenCL version 2.0}2">;
+ "access qualifier %0 can not be used for %1 %select{|prior to OpenCL C version 2.0 or in version 3.0 "
+ "and without __opencl_c_read_write_images feature}2">;
def err_opencl_multiple_access_qualifiers : Error<
"multiple access qualifiers">;
def note_opencl_typedef_access_qualifier : Note<
@@ -9947,9 +10157,9 @@ def err_opencl_enqueue_kernel_blocks_no_args : Error<
def err_opencl_builtin_expected_type : Error<
"illegal call to %0, expected %1 argument type">;
-// OpenCL v2.2 s2.1.2.3 - Vector Component Access
+// OpenCL v3.0 s6.3.7 - Vector Components
def ext_opencl_ext_vector_type_rgba_selector: ExtWarn<
- "vector component name '%0' is an OpenCL version 2.2 feature">,
+ "vector component name '%0' is an OpenCL C version 3.0 feature">,
InGroup<OpenCLUnsupportedRGBA>;
def err_openclcxx_placement_new : Error<
@@ -10101,8 +10311,6 @@ def warn_omp_alignment_not_power_of_two : Warning<
InGroup<OpenMPClauses>;
def err_omp_invalid_target_decl : Error<
"%0 used in declare target directive is not a variable or a function name">;
-def err_omp_declare_target_multiple : Error<
- "%0 appears multiple times in clauses on the same declare target directive">;
def err_omp_declare_target_to_and_link : Error<
"%0 must not appear in both clauses 'to' and 'link'">;
def warn_omp_not_in_target_context : Warning<
@@ -10465,6 +10673,8 @@ def err_omp_expected_private_copy_for_allocate : Error<
"the referenced item is not found in any private clause on the same directive">;
def err_omp_stmt_depends_on_loop_counter : Error<
"the loop %select{initializer|condition}0 expression depends on the current loop control variable">;
+def err_omp_invariant_dependency : Error<
+ "expected loop invariant expression">;
def err_omp_invariant_or_linear_dependency : Error<
"expected loop invariant expression or '<invariant1> * %0 + <invariant2>' kind of expression">;
def err_omp_wrong_dependency_iterator_type : Error<
@@ -10554,6 +10764,23 @@ def note_omp_protected_structured_block
: Note<"jump bypasses OpenMP structured block">;
def note_omp_exits_structured_block
: Note<"jump exits scope of OpenMP structured block">;
+def err_omp_interop_variable_expected : Error<
+ "expected%select{| non-const}0 variable of type 'omp_interop_t'">;
+def err_omp_interop_variable_wrong_type : Error<
+ "interop variable must be of type 'omp_interop_t'">;
+def err_omp_interop_prefer_type : Error<
+ "prefer_list item must be a string literal or constant integral "
+ "expression">;
+def err_omp_interop_bad_depend_clause : Error<
+ "'depend' clause requires the 'targetsync' interop type">;
+def err_omp_interop_var_multiple_actions : Error<
+ "interop variable %0 used in multiple action clauses">;
+def err_omp_dispatch_statement_call
+ : Error<"statement after '#pragma omp dispatch' must be a direct call"
+ " to a target function or an assignment to one">;
+def err_omp_unroll_full_variable_trip_count : Error<
+ "loop to be fully unrolled must have a constant trip count">;
+def note_omp_directive_here : Note<"'%0' directive found here">;
} // end of OpenMP category
let CategoryName = "Related Result Type Issue" in {
@@ -11119,4 +11346,10 @@ def err_tcb_conflicting_attributes : Error<
def warn_tcb_enforcement_violation : Warning<
"calling %0 is a violation of trusted computing base '%1'">,
InGroup<DiagGroup<"tcb-enforcement">>;
+
+// RISC-V builtin required extension warning
+def err_riscv_builtin_requires_extension : Error<
+ "builtin requires '%0' extension support to be enabled">;
+def err_riscv_builtin_invalid_lmul : Error<
+ "LMUL argument must be in the range [0,3] or [5,7]">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/clang/include/clang/Basic/DiagnosticSerializationKinds.td
index ce48833a8703..bf3221be004d 100644
--- a/clang/include/clang/Basic/DiagnosticSerializationKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -69,6 +69,9 @@ def err_module_file_not_module : Error<
"AST file '%0' was not built as a module">, DefaultFatal;
def err_module_file_missing_top_level_submodule : Error<
"module file '%0' is missing its top-level submodule">, DefaultFatal;
+def note_module_file_conflict : Note<
+ "this is generally caused by modules with the same name found in multiple "
+ "paths">;
def remark_module_import : Remark<
"importing module '%0'%select{| into '%3'}2 from '%1'">,
diff --git a/clang/include/clang/Basic/DirectoryEntry.h b/clang/include/clang/Basic/DirectoryEntry.h
index e0f4ae28321a..edb8031a20b8 100644
--- a/clang/include/clang/Basic/DirectoryEntry.h
+++ b/clang/include/clang/Basic/DirectoryEntry.h
@@ -120,8 +120,7 @@ public:
MapEntryOptionalStorage() : MaybeRef(optional_none_tag()) {}
template <class... ArgTypes>
- explicit MapEntryOptionalStorage(llvm::optional_detail::in_place_t,
- ArgTypes &&...Args)
+ explicit MapEntryOptionalStorage(llvm::in_place_t, ArgTypes &&...Args)
: MaybeRef(std::forward<ArgTypes>(Args)...) {}
void reset() { MaybeRef = optional_none_tag(); }
diff --git a/clang/include/clang/Basic/Features.def b/clang/include/clang/Basic/Features.def
index 5424da67b62d..6ca0e646b865 100644
--- a/clang/include/clang/Basic/Features.def
+++ b/clang/include/clang/Basic/Features.def
@@ -49,6 +49,7 @@ FEATURE(memtag_sanitizer, LangOpts.Sanitize.has(SanitizerKind::MemTag))
FEATURE(xray_instrument, LangOpts.XRayInstrument)
FEATURE(undefined_behavior_sanitizer,
LangOpts.Sanitize.hasOneOf(SanitizerKind::Undefined))
+FEATURE(coverage_sanitizer, LangOpts.SanitizeCoverage)
FEATURE(assume_nonnull, true)
FEATURE(attribute_analyzer_noreturn, true)
FEATURE(attribute_availability, true)
@@ -92,6 +93,9 @@ FEATURE(memory_sanitizer,
FEATURE(thread_sanitizer, LangOpts.Sanitize.has(SanitizerKind::Thread))
FEATURE(dataflow_sanitizer, LangOpts.Sanitize.has(SanitizerKind::DataFlow))
FEATURE(scudo, LangOpts.Sanitize.hasOneOf(SanitizerKind::Scudo))
+FEATURE(swiftasynccc,
+ PP.getTargetInfo().checkCallingConvention(CC_SwiftAsync) ==
+ clang::TargetInfo::CCCR_OK)
// Objective-C features
FEATURE(objc_arr, LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
FEATURE(objc_arc, LangOpts.ObjCAutoRefCount)
@@ -253,9 +257,12 @@ EXTENSION(cxx_variable_templates, LangOpts.CPlusPlus)
EXTENSION(overloadable_unmarked, true)
EXTENSION(pragma_clang_attribute_namespaces, true)
EXTENSION(pragma_clang_attribute_external_declaration, true)
+EXTENSION(statement_attributes_with_gnu_syntax, true)
EXTENSION(gnu_asm, LangOpts.GNUAsm)
EXTENSION(gnu_asm_goto_with_outputs, LangOpts.GNUAsm)
EXTENSION(matrix_types, LangOpts.MatrixTypes)
+EXTENSION(matrix_types_scalar_division, true)
+EXTENSION(cxx_attributes_on_using_declarations, LangOpts.CPlusPlus11)
FEATURE(cxx_abi_relative_vtable, LangOpts.CPlusPlus && LangOpts.RelativeCXXABIVTables)
diff --git a/clang/include/clang/Basic/IdentifierTable.h b/clang/include/clang/Basic/IdentifierTable.h
index 204a0f0cc0a5..f2379c7ddfbd 100644
--- a/clang/include/clang/Basic/IdentifierTable.h
+++ b/clang/include/clang/Basic/IdentifierTable.h
@@ -40,6 +40,14 @@ class LangOptions;
class MultiKeywordSelector;
class SourceLocation;
+enum class ReservedIdentifierStatus {
+ NotReserved = 0,
+ StartsWithUnderscoreAtGlobalScope,
+ StartsWithDoubleUnderscore,
+ StartsWithUnderscoreFollowedByCapitalLetter,
+ ContainsDoubleUnderscore,
+};
+
/// A simple pair of identifier info and location.
using IdentifierLocPair = std::pair<IdentifierInfo *, SourceLocation>;
@@ -48,7 +56,7 @@ using IdentifierLocPair = std::pair<IdentifierInfo *, SourceLocation>;
/// of a pointer to one of these classes.
enum { IdentifierInfoAlignment = 8 };
-static constexpr int ObjCOrBuiltinIDBits = 15;
+static constexpr int ObjCOrBuiltinIDBits = 16;
/// One of these records is kept for each identifier that
/// is lexed. This contains information about whether the token was \#define'd,
@@ -385,14 +393,7 @@ public:
/// Determine whether \p this is a name reserved for the implementation (C99
/// 7.1.3, C++ [lib.global.names]).
- bool isReservedName(bool doubleUnderscoreOnly = false) const {
- if (getLength() < 2)
- return false;
- const char *Name = getNameStart();
- return Name[0] == '_' &&
- (Name[1] == '_' ||
- (Name[1] >= 'A' && Name[1] <= 'Z' && !doubleUnderscoreOnly));
- }
+ ReservedIdentifierStatus isReserved(const LangOptions &LangOpts) const;
/// Provide less than operator for lexicographical sorting.
bool operator<(const IdentifierInfo &RHS) const {
diff --git a/clang/include/clang/Basic/LLVM.h b/clang/include/clang/Basic/LLVM.h
index 02e422051071..4ac2d744af3c 100644
--- a/clang/include/clang/Basic/LLVM.h
+++ b/clang/include/clang/Basic/LLVM.h
@@ -22,6 +22,9 @@
// None.h includes an enumerator that is desired & cannot be forward declared
// without a definition of NoneType.
#include "llvm/ADT/None.h"
+// Add this header as a workaround to prevent `too few template arguments for
+// class template 'SmallVector'` building error with build compilers like XL.
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
// ADT's.
diff --git a/clang/include/clang/Basic/LangOptions.def b/clang/include/clang/Basic/LangOptions.def
index c01f0cca9c9c..08b8d8851afa 100644
--- a/clang/include/clang/Basic/LangOptions.def
+++ b/clang/include/clang/Basic/LangOptions.def
@@ -98,6 +98,8 @@ BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
"Objective-C auto-synthesized properties")
BENIGN_LANGOPT(EncodeExtendedBlockSig , 1, 0,
"Encoding extended block type signature")
+BENIGN_LANGOPT(EncodeCXXClassTemplateSpec , 1, 0,
+ "Fully encode c++ class template specialization")
BENIGN_LANGOPT(ObjCInferRelatedResultType , 1, 1,
"Objective-C related result type inference")
LANGOPT(AppExt , 1, 0, "Objective-C App Extension")
@@ -107,6 +109,7 @@ LANGOPT(Bool , 1, 0, "bool, true, and false keywords")
LANGOPT(Half , 1, 0, "half keyword")
LANGOPT(WChar , 1, CPlusPlus, "wchar_t keyword")
LANGOPT(Char8 , 1, 0, "char8_t keyword")
+LANGOPT(IEEE128 , 1, 0, "__ieee128 keyword")
LANGOPT(DeclSpecKeyword , 1, 0, "__declspec keyword")
BENIGN_LANGOPT(DollarIdents , 1, 1, "'$' in identifiers")
BENIGN_LANGOPT(AsmPreprocessor, 1, 0, "preprocessor in asm mode")
@@ -123,12 +126,15 @@ LANGOPT(WritableStrings , 1, 0, "writable string support")
LANGOPT(ConstStrings , 1, 0, "const-qualified string support")
ENUM_LANGOPT(LaxVectorConversions, LaxVectorConversionKind, 2,
LaxVectorConversionKind::All, "lax vector conversions")
+ENUM_LANGOPT(AltivecSrcCompat, AltivecSrcCompatKind, 2,
+ AltivecSrcCompatKind::Default, "Altivec source compatibility")
LANGOPT(ConvergentFunctions, 1, 1, "Assume convergent functions")
LANGOPT(AltiVec , 1, 0, "AltiVec-style vector initializers")
LANGOPT(ZVector , 1, 0, "System z vector extensions")
LANGOPT(Exceptions , 1, 0, "exception handling")
LANGOPT(ObjCExceptions , 1, 0, "Objective-C exceptions")
LANGOPT(CXXExceptions , 1, 0, "C++ exceptions")
+LANGOPT(EHAsynch , 1, 0, "C/C++ EH Asynch exceptions")
ENUM_LANGOPT(ExceptionHandling, ExceptionHandlingKind, 3,
ExceptionHandlingKind::None, "exception handling")
LANGOPT(IgnoreExceptions , 1, 0, "ignore exceptions")
@@ -193,6 +199,8 @@ COMPATIBLE_LANGOPT(Deprecated , 1, 0, "__DEPRECATED predefined macro")
COMPATIBLE_LANGOPT(FastMath , 1, 0, "fast FP math optimizations, and __FAST_MATH__ predefined macro")
COMPATIBLE_LANGOPT(FiniteMathOnly , 1, 0, "__FINITE_MATH_ONLY__ predefined macro")
COMPATIBLE_LANGOPT(UnsafeFPMath , 1, 0, "Unsafe Floating Point Math")
+COMPATIBLE_LANGOPT(ProtectParens , 1, 0, "optimizer honors parentheses "
+ "when floating-point expressions are evaluated")
BENIGN_LANGOPT(AllowFPReassoc , 1, 0, "Permit Floating Point reassociation")
BENIGN_LANGOPT(NoHonorNaNs , 1, 0, "Permit Floating Point optimization without regard to NaN")
BENIGN_LANGOPT(NoHonorInfs , 1, 0, "Permit Floating Point optimization without regard to infinities")
@@ -215,6 +223,8 @@ LANGOPT(OpenCL , 1, 0, "OpenCL")
LANGOPT(OpenCLVersion , 32, 0, "OpenCL C version")
LANGOPT(OpenCLCPlusPlus , 1, 0, "C++ for OpenCL")
LANGOPT(OpenCLCPlusPlusVersion , 32, 0, "C++ for OpenCL version")
+LANGOPT(OpenCLGenericAddressSpace, 1, 0, "OpenCL generic keyword")
+LANGOPT(OpenCLPipe , 1, 0, "OpenCL pipe keyword")
LANGOPT(NativeHalfType , 1, 0, "Native half type support")
LANGOPT(NativeHalfArgsAndReturns, 1, 0, "Native half args and returns")
LANGOPT(HalfArgsAndReturns, 1, 0, "half args and returns")
@@ -230,8 +240,8 @@ LANGOPT(OpenMPCUDAForceFullRuntime , 1, 0, "Force to use full runtime in all con
LANGOPT(OpenMPCUDANumSMs , 32, 0, "Number of SMs for CUDA devices.")
LANGOPT(OpenMPCUDABlocksPerSM , 32, 0, "Number of blocks per SM for CUDA devices.")
LANGOPT(OpenMPCUDAReductionBufNum , 32, 1024, "Number of the reduction records in the intermediate reduction buffer used for the teams reductions.")
+LANGOPT(OpenMPTargetNewRuntime , 1, 0, "Use the new bitcode library for OpenMP offloading")
LANGOPT(OpenMPOptimisticCollapse , 1, 0, "Use at most 32 bits to represent the collapsed loop nest counter.")
-LANGOPT(OpenMPCUDATargetParallel, 1, 0, "Support parallel execution of target region on Cuda-based devices.")
LANGOPT(RenderScript , 1, 0, "RenderScript")
LANGOPT(CUDAIsDevice , 1, 0, "compiling for CUDA device")
@@ -240,13 +250,13 @@ LANGOPT(CUDAHostDeviceConstexpr, 1, 1, "treating unattributed constexpr function
LANGOPT(CUDADeviceApproxTranscendentals, 1, 0, "using approximate transcendental functions")
LANGOPT(GPURelocatableDeviceCode, 1, 0, "generate relocatable device code")
LANGOPT(GPUAllowDeviceInit, 1, 0, "allowing device side global init functions for HIP")
-LANGOPT(GPUMaxThreadsPerBlock, 32, 256, "default max threads per block for kernel launch bounds for HIP")
+LANGOPT(GPUMaxThreadsPerBlock, 32, 1024, "default max threads per block for kernel launch bounds for HIP")
LANGOPT(GPUDeferDiag, 1, 0, "defer host/device related diagnostic messages for CUDA/HIP")
LANGOPT(GPUExcludeWrongSideOverloads, 1, 0, "always exclude wrong side overloads in overloading resolution for CUDA/HIP")
-LANGOPT(SYCL , 1, 0, "SYCL")
LANGOPT(SYCLIsDevice , 1, 0, "Generate code for SYCL device")
-ENUM_LANGOPT(SYCLVersion , SYCLMajorVersion, 1, SYCL_None, "Version of the SYCL standard used")
+LANGOPT(SYCLIsHost , 1, 0, "SYCL host compilation")
+ENUM_LANGOPT(SYCLVersion , SYCLMajorVersion, 2, SYCL_None, "Version of the SYCL standard used")
LANGOPT(HIPUseNewLaunchAPI, 1, 0, "Use new kernel launching API for HIP")
@@ -260,9 +270,12 @@ BENIGN_LANGOPT(ModulesDebugInfo , 1, 0, "Modules debug info")
BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
BENIGN_LANGOPT(DumpRecordLayouts , 1, 0, "dumping the layout of IRgen'd records")
BENIGN_LANGOPT(DumpRecordLayoutsSimple , 1, 0, "dumping the layout of IRgen'd records in a simple form")
+BENIGN_LANGOPT(DumpRecordLayoutsCanonical , 1, 0, "dumping the AST layout of records using canonical field types")
+BENIGN_LANGOPT(DumpRecordLayoutsComplete , 1, 0, "dumping the AST layout of all complete records")
BENIGN_LANGOPT(DumpVTableLayouts , 1, 0, "dumping the layouts of emitted vtables")
LANGOPT(NoConstantCFStrings , 1, 0, "no constant CoreFoundation strings")
BENIGN_LANGOPT(InlineVisibilityHidden , 1, 0, "hidden visibility for inline C++ methods")
+BENIGN_LANGOPT(IgnoreXCOFFVisibility, 1, 0, "All the visibility attributes that are specified in the source code are ignored in aix XCOFF.")
BENIGN_LANGOPT(VisibilityInlinesHiddenStaticLocalVar, 1, 0,
"hidden visibility for static local variables in inline C++ "
"methods when -fvisibility-inlines hidden is enabled")
@@ -292,6 +305,8 @@ LANGOPT(ObjCSubscriptingLegacyRuntime , 1, 0, "Subscripting support in l
BENIGN_LANGOPT(CompatibilityQualifiedIdBlockParamTypeChecking, 1, 0,
"compatibility mode for type checking block parameters "
"involving qualified id types")
+LANGOPT(ObjCDisableDirectMethodsForTesting, 1, 0,
+ "Disable recognition of objc_direct methods")
LANGOPT(CFProtectionBranch , 1, 0, "Control-Flow Branch Protection enabled")
LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map")
ENUM_LANGOPT(AddressSpaceMapMangling , AddrSpaceMapMangling, 2, ASMM_Target, "OpenCL address space map mangling mode")
@@ -406,6 +421,10 @@ LANGOPT(RelativeCXXABIVTables, 1, 0,
LANGOPT(ArmSveVectorBits, 32, 0, "SVE vector size in bits")
+ENUM_LANGOPT(ExtendIntArgs, ExtendArgsKind, 1, ExtendArgsKind::ExtendTo32,
+ "Controls how scalar integer arguments are extended in calls "
+ "to unprototyped and varargs functions")
+
#undef LANGOPT
#undef COMPATIBLE_LANGOPT
#undef BENIGN_LANGOPT
diff --git a/clang/include/clang/Basic/LangOptions.h b/clang/include/clang/Basic/LangOptions.h
index d8bd2a8b52fc..71cf0c65e692 100644
--- a/clang/include/clang/Basic/LangOptions.h
+++ b/clang/include/clang/Basic/LangOptions.h
@@ -19,11 +19,11 @@
#include "clang/Basic/LangStandard.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/TargetCXXABI.h"
#include "clang/Basic/Visibility.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCTargetOptions.h"
#include <string>
#include <vector>
@@ -124,11 +124,16 @@ public:
MSVC2017_5 = 1912,
MSVC2017_7 = 1914,
MSVC2019 = 1920,
+ MSVC2019_8 = 1928,
};
enum SYCLMajorVersion {
SYCL_None,
SYCL_2017,
+ SYCL_2020,
+ // The "default" SYCL version to be used when none is specified on the
+ // frontend command line.
+ SYCL_Default = SYCL_2020
};
/// Clang versions with different platform ABI conformance.
@@ -163,13 +168,18 @@ public:
Ver9,
/// Attempt to be ABI-compatible with code generated by Clang 11.0.x
- /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
+ /// (git 2e10b7a39b93). This causes clang to pass unions with a 256-bit
/// vector member on the stack instead of using registers, to not properly
/// mangle substitutions for template names in some cases, and to mangle
/// declaration template arguments without a cast to the parameter type
/// even when that can lead to mangling collisions.
Ver11,
+ /// Attempt to be ABI-compatible with code generated by Clang 12.0.x
+ /// (git 8e464dd76bef). This causes clang to mangle lambdas within
+ /// global-scope inline variables incorrectly.
+ Ver12,
+
/// Conform to the underlying platform's C and C++ ABIs as closely
/// as we can.
Latest
@@ -221,7 +231,7 @@ public:
};
/// Possible exception handling behavior.
- using ExceptionHandlingKind = llvm::ExceptionHandling;
+ enum class ExceptionHandlingKind { None, SjLj, WinEH, DwarfCFI, Wasm };
enum class LaxVectorConversionKind {
/// Permit no implicit vector bitcasts.
@@ -234,6 +244,18 @@ public:
All,
};
+ enum class AltivecSrcCompatKind {
+ // All vector compares produce scalars except vector pixel and vector bool.
+ // The types vector pixel and vector bool return vector results.
+ Mixed,
+ // All vector compares produce vector results as in GCC.
+ GCC,
+ // All vector compares produce scalars as in XL.
+ XL,
+ // Default clang behaviour.
+ Default = Mixed,
+ };
+
enum class SignReturnAddressScopeKind {
/// No signing for any function.
None,
@@ -257,16 +279,25 @@ public:
Single
};
+ enum class ExtendArgsKind {
+ /// Integer arguments are sign or zero extended to 32/64 bits
+ /// during default argument promotions.
+ ExtendTo32,
+ ExtendTo64
+ };
+
public:
/// The used language standard.
LangStandard::Kind LangStd;
/// Set of enabled sanitizers.
SanitizerSet Sanitize;
+ /// Is at least one coverage instrumentation type enabled.
+ bool SanitizeCoverage = false;
- /// Paths to blacklist files specifying which objects
+ /// Paths to files specifying which objects
/// (files, functions, variables) should not be instrumented.
- std::vector<std::string> SanitizerBlacklistFiles;
+ std::vector<std::string> NoSanitizeFiles;
/// Paths to the XRay "always instrument" files specifying which
/// objects (files, functions, variables) should be imbued with the XRay
@@ -331,6 +362,16 @@ public:
/// host code generation.
std::string OMPHostIRFile;
+ /// The user provided compilation unit ID, if non-empty. This is used to
+ /// externalize static variables which is needed to support accessing static
+ /// device variables in host code for single source offloading languages
+ /// like CUDA/HIP.
+ std::string CUID;
+
+ /// C++ ABI to compile with, if specified by the frontend through -fc++-abi=.
+ /// This overrides the default ABI used by the target.
+ llvm::Optional<TargetCXXABI::Kind> CXXABI;
+
/// Indicates whether the front-end is explicitly told that the
/// input is a header file (i.e. -x c-header).
bool IsHeaderFile = false;
@@ -403,20 +444,22 @@ public:
}
bool hasSjLjExceptions() const {
- return getExceptionHandling() == llvm::ExceptionHandling::SjLj;
+ return getExceptionHandling() == ExceptionHandlingKind::SjLj;
}
bool hasSEHExceptions() const {
- return getExceptionHandling() == llvm::ExceptionHandling::WinEH;
+ return getExceptionHandling() == ExceptionHandlingKind::WinEH;
}
bool hasDWARFExceptions() const {
- return getExceptionHandling() == llvm::ExceptionHandling::DwarfCFI;
+ return getExceptionHandling() == ExceptionHandlingKind::DwarfCFI;
}
bool hasWasmExceptions() const {
- return getExceptionHandling() == llvm::ExceptionHandling::Wasm;
+ return getExceptionHandling() == ExceptionHandlingKind::Wasm;
}
+
+ bool isSYCL() const { return SYCLIsDevice || SYCLIsHost; }
};
/// Floating point control options
@@ -654,7 +697,11 @@ enum TranslationUnitKind {
TU_Prefix,
/// The translation unit is a module.
- TU_Module
+ TU_Module,
+
+ /// The translation unit is a is a complete translation unit that we might
+ /// incrementally extend later.
+ TU_Incremental
};
} // namespace clang
diff --git a/clang/include/clang/Basic/LangStandard.h b/clang/include/clang/Basic/LangStandard.h
index f82ce05a6369..b0785409628c 100644
--- a/clang/include/clang/Basic/LangStandard.h
+++ b/clang/include/clang/Basic/LangStandard.h
@@ -32,6 +32,7 @@ enum class Language : uint8_t {
ObjC,
ObjCXX,
OpenCL,
+ OpenCLCXX,
CUDA,
RenderScript,
HIP,
diff --git a/clang/include/clang/Basic/LangStandards.def b/clang/include/clang/Basic/LangStandards.def
index f086d8a43ccb..2cfeb68e56d6 100644
--- a/clang/include/clang/Basic/LangStandards.def
+++ b/clang/include/clang/Basic/LangStandards.def
@@ -194,11 +194,11 @@ LANGSTANDARD_ALIAS_DEPR(openclcpp, "CLC++")
// CUDA
LANGSTANDARD(cuda, "cuda", CUDA, "NVIDIA CUDA(tm)",
- LineComment | CPlusPlus | Digraphs)
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
// HIP
LANGSTANDARD(hip, "hip", HIP, "HIP",
- LineComment | CPlusPlus | Digraphs)
+ LineComment | CPlusPlus | CPlusPlus11 | CPlusPlus14 | Digraphs)
#undef LANGSTANDARD
#undef LANGSTANDARD_ALIAS
diff --git a/clang/include/clang/Basic/Module.h b/clang/include/clang/Basic/Module.h
index f5b176f8d20b..3476b05d2e92 100644
--- a/clang/include/clang/Basic/Module.h
+++ b/clang/include/clang/Basic/Module.h
@@ -133,9 +133,7 @@ public:
std::string PresumedModuleMapFile;
/// The umbrella header or directory.
- llvm::PointerUnion<const FileEntryRef::MapEntry *,
- const DirectoryEntryRef::MapEntry *>
- Umbrella;
+ llvm::PointerUnion<const FileEntry *, const DirectoryEntry *> Umbrella;
/// The module signature.
ASTFileSignature Signature;
@@ -143,6 +141,9 @@ public:
/// The name of the umbrella entry, as written in the module map.
std::string UmbrellaAsWritten;
+ // The path to the umbrella entry relative to the root module's \c Directory.
+ std::string UmbrellaRelativeToRootModuleDirectory;
+
/// The module through which entities defined in this module will
/// eventually be exposed, for use in "private" modules.
std::string ExportAsModule;
@@ -190,18 +191,20 @@ public:
/// file.
struct Header {
std::string NameAsWritten;
- OptionalFileEntryRefDegradesToFileEntryPtr Entry;
+ std::string PathRelativeToRootModuleDirectory;
+ const FileEntry *Entry;
- explicit operator bool() { return Entry != None; }
+ explicit operator bool() { return Entry; }
};
/// Information about a directory name as found in the module map
/// file.
struct DirectoryName {
std::string NameAsWritten;
- OptionalDirectoryEntryRefDegradesToDirectoryEntryPtr Entry;
+ std::string PathRelativeToRootModuleDirectory;
+ const DirectoryEntry *Entry;
- explicit operator bool() { return Entry != None; }
+ explicit operator bool() { return Entry; }
};
/// The headers that are part of this module.
@@ -546,15 +549,16 @@ public:
/// Retrieve the header that serves as the umbrella header for this
/// module.
Header getUmbrellaHeader() const {
- if (auto *ME = Umbrella.dyn_cast<const FileEntryRef::MapEntry *>())
- return Header{UmbrellaAsWritten, FileEntryRef(*ME)};
+ if (auto *FE = Umbrella.dyn_cast<const FileEntry *>())
+ return Header{UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
+ FE};
return Header{};
}
/// Determine whether this module has an umbrella directory that is
/// not based on an umbrella header.
bool hasUmbrellaDir() const {
- return Umbrella && Umbrella.is<const DirectoryEntryRef::MapEntry *>();
+ return Umbrella && Umbrella.is<const DirectoryEntry *>();
}
/// Add a top-level header associated with this module.
@@ -639,7 +643,7 @@ public:
}
/// Print the module map for this module to the given stream.
- void print(raw_ostream &OS, unsigned Indent = 0) const;
+ void print(raw_ostream &OS, unsigned Indent = 0, bool Dump = false) const;
/// Dump the contents of this module to the given output stream.
void dump() const;
diff --git a/clang/include/clang/Basic/NoSanitizeList.h b/clang/include/clang/Basic/NoSanitizeList.h
new file mode 100644
index 000000000000..3f80e0fdedda
--- /dev/null
+++ b/clang/include/clang/Basic/NoSanitizeList.h
@@ -0,0 +1,50 @@
+//===--- NoSanitizeList.h - List of ignored entities for sanitizers --*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided list of ignored entities used to disable/alter
+// instrumentation done in sanitizers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_NOSANITIZELIST_H
+#define LLVM_CLANG_BASIC_NOSANITIZELIST_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+#include <vector>
+
+namespace clang {
+
+class SanitizerMask;
+class SourceManager;
+class SanitizerSpecialCaseList;
+
+class NoSanitizeList {
+ std::unique_ptr<SanitizerSpecialCaseList> SSCL;
+ SourceManager &SM;
+
+public:
+ NoSanitizeList(const std::vector<std::string> &NoSanitizeListPaths,
+ SourceManager &SM);
+ ~NoSanitizeList();
+ bool containsGlobal(SanitizerMask Mask, StringRef GlobalName,
+ StringRef Category = StringRef()) const;
+ bool containsType(SanitizerMask Mask, StringRef MangledTypeName,
+ StringRef Category = StringRef()) const;
+ bool containsFunction(SanitizerMask Mask, StringRef FunctionName) const;
+ bool containsFile(SanitizerMask Mask, StringRef FileName,
+ StringRef Category = StringRef()) const;
+ bool containsLocation(SanitizerMask Mask, SourceLocation Loc,
+ StringRef Category = StringRef()) const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/clang/include/clang/Basic/OpenCLExtensions.def b/clang/include/clang/Basic/OpenCLExtensions.def
index 801916c3ab94..a053a0e9adb5 100644
--- a/clang/include/clang/Basic/OpenCLExtensions.def
+++ b/clang/include/clang/Basic/OpenCLExtensions.def
@@ -16,8 +16,12 @@
// If extensions are to be enumerated with information about whether
// an extension is core or optional core and minimum OpenCL version
// when an extension becomes available,
-// define OPENCL_GENERIC_EXTENSION(ext, avail, core, opt) where
+// define OPENCL_GENERIC_EXTENSION(ext, pragma, avail, core, opt) where
// ext - name of the extension or optional core feature.
+// pragma - true if extension needs pragmas or false otherwise.
+// NOTE: extension pragma without any documentation detailing
+// its behavior explicitly is deprecated. Therefore the default
+// value is false.
// avail - minimum OpenCL version supporting it.
// core - OpenCL versions mask when the extension becomes core feature.
// 0U indicates not a core feature.
@@ -50,55 +54,69 @@
#endif // OPENCL_GENERIC_EXTENSION
// Declaration helpers
-#define OPENCL_EXTENSION(ext, avail) OPENCL_GENERIC_EXTENSION(ext, avail, 0U, 0U)
-#define OPENCL_COREFEATURE(ext, avail, core) OPENCL_GENERIC_EXTENSION(ext, avail, core, 0U)
-#define OPENCL_OPTIONALCOREFEATURE(ext, avail, opt) OPENCL_GENERIC_EXTENSION(ext, avail, 0U, opt)
+#define OPENCL_EXTENSION(ext, pragma, avail) OPENCL_GENERIC_EXTENSION(ext, pragma, avail, 0U, 0U)
+#define OPENCL_COREFEATURE(ext, pragma, avail, core) OPENCL_GENERIC_EXTENSION(ext, pragma, avail, core, 0U)
+#define OPENCL_OPTIONALCOREFEATURE(ext, pragma, avail, opt) OPENCL_GENERIC_EXTENSION(ext, pragma, avail, 0U, opt)
// OpenCL 1.0.
-OPENCL_COREFEATURE(cl_khr_byte_addressable_store, 100, OCL_C_11P)
-OPENCL_COREFEATURE(cl_khr_global_int32_base_atomics, 100, OCL_C_11P)
-OPENCL_COREFEATURE(cl_khr_global_int32_extended_atomics, 100, OCL_C_11P)
-OPENCL_COREFEATURE(cl_khr_local_int32_base_atomics, 100, OCL_C_11P)
-OPENCL_COREFEATURE(cl_khr_local_int32_extended_atomics, 100, OCL_C_11P)
-OPENCL_OPTIONALCOREFEATURE(cl_khr_fp64, 100, OCL_C_12P)
-OPENCL_EXTENSION(cl_khr_fp16, 100)
-OPENCL_EXTENSION(cl_khr_int64_base_atomics, 100)
-OPENCL_EXTENSION(cl_khr_int64_extended_atomics, 100)
-OPENCL_GENERIC_EXTENSION(cl_khr_3d_image_writes, 100, OCL_C_20, OCL_C_30)
+OPENCL_COREFEATURE(cl_khr_byte_addressable_store, true, 100, OCL_C_11P)
+OPENCL_COREFEATURE(cl_khr_global_int32_base_atomics, true, 100, OCL_C_11P)
+OPENCL_COREFEATURE(cl_khr_global_int32_extended_atomics, true, 100, OCL_C_11P)
+OPENCL_COREFEATURE(cl_khr_local_int32_base_atomics, true, 100, OCL_C_11P)
+OPENCL_COREFEATURE(cl_khr_local_int32_extended_atomics, true, 100, OCL_C_11P)
+OPENCL_OPTIONALCOREFEATURE(cl_khr_fp64, true, 100, OCL_C_12P)
+OPENCL_EXTENSION(cl_khr_fp16, true, 100)
+OPENCL_EXTENSION(cl_khr_int64_base_atomics, true, 100)
+OPENCL_EXTENSION(cl_khr_int64_extended_atomics, true, 100)
+OPENCL_GENERIC_EXTENSION(cl_khr_3d_image_writes, true, 100, OCL_C_20, OCL_C_30)
// EMBEDDED_PROFILE
-OPENCL_EXTENSION(cles_khr_int64, 110)
+OPENCL_EXTENSION(cles_khr_int64, true, 110)
// OpenCL 1.2.
-OPENCL_EXTENSION(cl_khr_depth_images, 120)
-OPENCL_EXTENSION(cl_khr_gl_msaa_sharing, 120)
+OPENCL_EXTENSION(cl_khr_depth_images, true, 120)
+OPENCL_EXTENSION(cl_khr_gl_msaa_sharing,true, 120)
// OpenCL 2.0.
-OPENCL_EXTENSION(cl_khr_mipmap_image, 200)
-OPENCL_EXTENSION(cl_khr_mipmap_image_writes, 200)
-OPENCL_EXTENSION(cl_khr_srgb_image_writes, 200)
-OPENCL_EXTENSION(cl_khr_subgroups, 200)
+OPENCL_EXTENSION(cl_khr_mipmap_image, true, 200)
+OPENCL_EXTENSION(cl_khr_mipmap_image_writes, true, 200)
+OPENCL_EXTENSION(cl_khr_srgb_image_writes, true, 200)
+OPENCL_EXTENSION(cl_khr_subgroups, true, 200)
// Clang Extensions.
-OPENCL_EXTENSION(cl_clang_storage_class_specifiers, 100)
-OPENCL_EXTENSION(__cl_clang_function_pointers, 100)
-OPENCL_EXTENSION(__cl_clang_variadic_functions, 100)
+OPENCL_EXTENSION(cl_clang_storage_class_specifiers, true, 100)
+OPENCL_EXTENSION(__cl_clang_function_pointers, true, 100)
+OPENCL_EXTENSION(__cl_clang_variadic_functions, true, 100)
+OPENCL_EXTENSION(__cl_clang_non_portable_kernel_param_types, true, 100)
+OPENCL_EXTENSION(__cl_clang_bitfields, true, 100)
// AMD OpenCL extensions
-OPENCL_EXTENSION(cl_amd_media_ops, 100)
-OPENCL_EXTENSION(cl_amd_media_ops2, 100)
+OPENCL_EXTENSION(cl_amd_media_ops, true, 100)
+OPENCL_EXTENSION(cl_amd_media_ops2, true, 100)
// ARM OpenCL extensions
-OPENCL_EXTENSION(cl_arm_integer_dot_product_int8, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int8, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int16, 120)
-OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_saturate_int8, 120)
+OPENCL_EXTENSION(cl_arm_integer_dot_product_int8, true, 120)
+OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int8, true, 120)
+OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_int16, true, 120)
+OPENCL_EXTENSION(cl_arm_integer_dot_product_accumulate_saturate_int8, true, 120)
// Intel OpenCL extensions
-OPENCL_EXTENSION(cl_intel_subgroups, 120)
-OPENCL_EXTENSION(cl_intel_subgroups_short, 120)
-OPENCL_EXTENSION(cl_intel_device_side_avc_motion_estimation, 120)
+OPENCL_EXTENSION(cl_intel_subgroups, true, 120)
+OPENCL_EXTENSION(cl_intel_subgroups_short, true, 120)
+OPENCL_EXTENSION(cl_intel_device_side_avc_motion_estimation, true, 120)
+// OpenCL C 3.0 features (6.2.1. Features)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_pipes, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_generic_address_space, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_atomic_order_acq_rel, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_atomic_order_seq_cst, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_subgroups, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_3d_image_writes, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_device_enqueue, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_read_write_images, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_program_scope_global_variables, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_fp64, false, 300, OCL_C_30)
+OPENCL_OPTIONALCOREFEATURE(__opencl_c_images, false, 300, OCL_C_30)
#undef OPENCL_OPTIONALCOREFEATURE
#undef OPENCL_COREFEATURE
diff --git a/clang/include/clang/Basic/OpenCLImageTypes.def b/clang/include/clang/Basic/OpenCLImageTypes.def
index cfb018a661ae..ada5892c06b3 100644
--- a/clang/include/clang/Basic/OpenCLImageTypes.def
+++ b/clang/include/clang/Basic/OpenCLImageTypes.def
@@ -65,7 +65,7 @@ IMAGE_WRITE_TYPE(image2d_msaa, OCLImage2dMSAA, "cl_khr_gl_msaa_sharing")
IMAGE_WRITE_TYPE(image2d_array_msaa, OCLImage2dArrayMSAA, "cl_khr_gl_msaa_sharing")
IMAGE_WRITE_TYPE(image2d_msaa_depth, OCLImage2dMSAADepth, "cl_khr_gl_msaa_sharing")
IMAGE_WRITE_TYPE(image2d_array_msaa_depth, OCLImage2dArrayMSAADepth, "cl_khr_gl_msaa_sharing")
-IMAGE_WRITE_TYPE(image3d, OCLImage3d, "cl_khr_3d_image_writes")
+IMAGE_WRITE_TYPE(image3d, OCLImage3d, "")
IMAGE_READ_WRITE_TYPE(image1d, OCLImage1d, "")
IMAGE_READ_WRITE_TYPE(image1d_array, OCLImage1dArray, "")
diff --git a/clang/include/clang/Basic/OpenCLOptions.h b/clang/include/clang/Basic/OpenCLOptions.h
index fe27ef19d4d5..1a035626fade 100644
--- a/clang/include/clang/Basic/OpenCLOptions.h
+++ b/clang/include/clang/Basic/OpenCLOptions.h
@@ -19,6 +19,9 @@
namespace clang {
+class DiagnosticsEngine;
+class TargetInfo;
+
namespace {
// This enum maps OpenCL version(s) into value. These values are used as
// a mask to indicate in which OpenCL version(s) extension is a core or
@@ -51,28 +54,48 @@ static inline OpenCLVersionID encodeOpenCLVersion(unsigned OpenCLVersion) {
}
}
-// Simple helper to check if OpenCL C version is contained in a given encoded
-// OpenCL C version mask
-static inline bool isOpenCLVersionIsContainedInMask(const LangOptions &LO,
- unsigned Mask) {
+// Check if OpenCL C version is contained in a given encoded OpenCL C version
+// mask.
+static inline bool isOpenCLVersionContainedInMask(const LangOptions &LO,
+ unsigned Mask) {
auto CLVer = LO.OpenCLCPlusPlus ? 200 : LO.OpenCLVersion;
OpenCLVersionID Code = encodeOpenCLVersion(CLVer);
return Mask & Code;
}
+
} // end anonymous namespace
/// OpenCL supported extensions and optional core features
class OpenCLOptions {
+
public:
+ // OpenCL C v1.2 s6.5 - All program scope variables must be declared in the
+ // __constant address space.
+ // OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
+ // variables inside a function can also be declared in the global
+ // address space.
+ // OpenCL C v3.0 s6.7.1 - Variables at program scope or static or extern
+ // variables inside functions can be declared in global address space if
+ // the __opencl_c_program_scope_global_variables feature is supported
+ // C++ for OpenCL inherits rule from OpenCL C v2.0.
+ bool areProgramScopeVariablesSupported(const LangOptions &Opts) const {
+ return Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200 ||
+ (Opts.OpenCLVersion == 300 &&
+ isSupported("__opencl_c_program_scope_global_variables", Opts));
+ }
+
struct OpenCLOptionInfo {
+ // Does this option have pragma.
+ bool WithPragma = false;
+
// Option starts to be available in this OpenCL version
- unsigned Avail;
+ unsigned Avail = 100U;
// Option becomes core feature in this OpenCL versions
- unsigned Core;
+ unsigned Core = 0U;
// Option becomes optional core feature in this OpenCL versions
- unsigned Opt;
+ unsigned Opt = 0U;
// Is this option supported
bool Supported = false;
@@ -80,8 +103,10 @@ public:
// Is this option enabled
bool Enabled = false;
- OpenCLOptionInfo(unsigned A = 100, unsigned C = 0U, unsigned O = 0U)
- : Avail(A), Core(C), Opt(O) {}
+ OpenCLOptionInfo() = default;
+ OpenCLOptionInfo(bool Pragma, unsigned AvailV, unsigned CoreV,
+ unsigned OptV)
+ : WithPragma(Pragma), Avail(AvailV), Core(CoreV), Opt(OptV) {}
bool isCore() const { return Core != 0U; }
@@ -96,18 +121,23 @@ public:
// Is core option in OpenCL version \p LO.
bool isCoreIn(const LangOptions &LO) const {
- return isAvailableIn(LO) && isOpenCLVersionIsContainedInMask(LO, Core);
+ return isAvailableIn(LO) && isOpenCLVersionContainedInMask(LO, Core);
}
// Is optional core option in OpenCL version \p LO.
bool isOptionalCoreIn(const LangOptions &LO) const {
- return isAvailableIn(LO) && isOpenCLVersionIsContainedInMask(LO, Opt);
+ return isAvailableIn(LO) && isOpenCLVersionContainedInMask(LO, Opt);
}
};
bool isKnown(llvm::StringRef Ext) const;
- bool isEnabled(llvm::StringRef Ext) const;
+ // For core or optional core feature check that it is supported
+ // by a target, for any other option (extension) check that it is
+ // enabled via pragma
+ bool isAvailableOption(llvm::StringRef Ext, const LangOptions &LO) const;
+
+ bool isWithPragma(llvm::StringRef Ext) const;
// Is supported as either an extension or an (optional) core feature for
// OpenCL version \p LO.
@@ -131,6 +161,11 @@ public:
// For supported core or optional core feature, return false.
bool isSupportedExtension(llvm::StringRef Ext, const LangOptions &LO) const;
+ // FIXME: Whether extension should accept pragma should not
+ // be reset dynamically. But it currently required when
+ // registering new extensions via pragmas.
+ void acceptsPragma(llvm::StringRef Ext, bool V = true);
+
void enable(llvm::StringRef Ext, bool V = true);
/// Enable or disable support for OpenCL extensions
@@ -139,7 +174,6 @@ public:
void support(llvm::StringRef Ext, bool V = true);
OpenCLOptions();
- OpenCLOptions(const OpenCLOptions &) = default;
// Set supported options based on target settings and language version
void addSupport(const llvm::StringMap<bool> &FeaturesMap,
@@ -148,15 +182,36 @@ public:
// Disable all extensions
void disableAll();
- // Enable supported core and optional core features
- void enableSupportedCore(const LangOptions &LO);
-
friend class ASTWriter;
friend class ASTReader;
using OpenCLOptionInfoMap = llvm::StringMap<OpenCLOptionInfo>;
+ template <typename... Args>
+ static bool isOpenCLOptionCoreIn(const LangOptions &LO, Args &&... args) {
+ return OpenCLOptionInfo(std::forward<Args>(args)...).isCoreIn(LO);
+ }
+
+ template <typename... Args>
+ static bool isOpenCLOptionAvailableIn(const LangOptions &LO,
+ Args &&... args) {
+ return OpenCLOptionInfo(std::forward<Args>(args)...).isAvailableIn(LO);
+ }
+
+ // Diagnose feature dependencies for OpenCL C 3.0. Return false if target
+ // doesn't follow these requirements.
+ static bool diagnoseUnsupportedFeatureDependencies(const TargetInfo &TI,
+ DiagnosticsEngine &Diags);
+
+ // Diagnose that features and equivalent extension are set to same values.
+ // Return false if target doesn't follow these requirements.
+ static bool diagnoseFeatureExtensionDifferences(const TargetInfo &TI,
+ DiagnosticsEngine &Diags);
+
private:
+ // Option is enabled via pragma
+ bool isEnabled(llvm::StringRef Ext) const;
+
OpenCLOptionInfoMap OptMap;
};
diff --git a/clang/include/clang/Basic/OpenMPKinds.h b/clang/include/clang/Basic/OpenMPKinds.h
index 0f37dc9ad997..c7a2591de26c 100644
--- a/clang/include/clang/Basic/OpenMPKinds.h
+++ b/clang/include/clang/Basic/OpenMPKinds.h
@@ -267,6 +267,11 @@ bool isOpenMPTaskingDirective(OpenMPDirectiveKind Kind);
/// functions
bool isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind);
+/// Checks if the specified directive is a loop transformation directive.
+/// \param DKind Specified directive.
+/// \return True iff the directive is a loop transformation.
+bool isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind);
+
/// Return the captured regions of an OpenMP directive.
void getOpenMPCaptureRegions(
llvm::SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
diff --git a/clang/include/clang/Basic/RISCVVTypes.def b/clang/include/clang/Basic/RISCVVTypes.def
new file mode 100644
index 000000000000..f6ef62a64636
--- /dev/null
+++ b/clang/include/clang/Basic/RISCVVTypes.def
@@ -0,0 +1,147 @@
+//===-- RISCVVTypes.def - Metadata for the RISC-V V types ------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various RISC-V V builtin types. The macros are:
+//
+// - RVV_TYPE(Name, Id, SingletonId)
+// A builtin type that has not been covered by any other #define
+// Defining this macro covers all the builtins.
+//
+// - RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, IsSigned, IsFP)
+// A RISC-V V scalable vector.
+//
+// - RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)
+// An RISC-V V scalable mask.
+//
+// where:
+//
+// - Name is the name of the builtin type.
+//
+// - Id is the enumerator defining the type.
+//
+// - SingletonId is the global singleton of this type.
+//
+// - NumEls enumerates the number of the elements.
+//
+// - ElBits is the size of one element in bits (SEW).
+//
+// - NF is the number of fields (NFIELDS) used in the Zvlsseg instructions
+// (TODO).
+//
+// - IsSigned is true for vectors of signed integer elements and
+// for vectors of floating-point elements.
+//
+// - IsFP is true for vectors of floating-point elements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef RVV_VECTOR_TYPE
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, IsFP)\
+ RVV_TYPE(Name, Id, SingletonId)
+#endif
+
+#ifndef RVV_PREDICATE_TYPE
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls)\
+ RVV_TYPE(Name, Id, SingletonId)
+#endif
+
+#ifndef RVV_VECTOR_TYPE_INT
+#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned) \
+ RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, false)
+#endif
+
+#ifndef RVV_VECTOR_TYPE_FLOAT
+#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
+ RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, false, true)
+#endif
+
+//===- Vector types -------------------------------------------------------===//
+
+RVV_VECTOR_TYPE_INT("__rvv_int8mf8_t", RvvInt8mf8, RvvInt8mf8Ty, 1, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf4_t", RvvInt8mf4, RvvInt8mf4Ty, 2, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8mf2_t", RvvInt8mf2, RvvInt8mf2Ty, 4, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m1_t", RvvInt8m1, RvvInt8m1Ty, 8, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m2_t", RvvInt8m2, RvvInt8m2Ty, 16, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m4_t", RvvInt8m4, RvvInt8m4Ty, 32, 8, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int8m8_t", RvvInt8m8, RvvInt8m8Ty, 64, 8, 1, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf8_t",RvvUint8mf8,RvvUint8mf8Ty,1, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf4_t",RvvUint8mf4,RvvUint8mf4Ty,2, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8mf2_t",RvvUint8mf2,RvvUint8mf2Ty,4, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m1_t", RvvUint8m1, RvvUint8m1Ty, 8, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m2_t", RvvUint8m2, RvvUint8m2Ty, 16, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m4_t", RvvUint8m4, RvvUint8m4Ty, 32, 8, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint8m8_t", RvvUint8m8, RvvUint8m8Ty, 64, 8, 1, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_int16mf4_t",RvvInt16mf4,RvvInt16mf4Ty,1, 16, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16mf2_t",RvvInt16mf2,RvvInt16mf2Ty,2, 16, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m1_t", RvvInt16m1, RvvInt16m1Ty, 4, 16, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m2_t", RvvInt16m2, RvvInt16m2Ty, 8, 16, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m4_t", RvvInt16m4, RvvInt16m4Ty, 16, 16, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int16m8_t", RvvInt16m8, RvvInt16m8Ty, 32, 16, 1, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf4_t",RvvUint16mf4,RvvUint16mf4Ty,1, 16, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16mf2_t",RvvUint16mf2,RvvUint16mf2Ty,2, 16, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m1_t", RvvUint16m1, RvvUint16m1Ty, 4, 16, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m2_t", RvvUint16m2, RvvUint16m2Ty, 8, 16, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m4_t", RvvUint16m4, RvvUint16m4Ty, 16, 16, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint16m8_t", RvvUint16m8, RvvUint16m8Ty, 32, 16, 1, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_int32mf2_t",RvvInt32mf2,RvvInt32mf2Ty,1, 32, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m1_t", RvvInt32m1, RvvInt32m1Ty, 2, 32, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m2_t", RvvInt32m2, RvvInt32m2Ty, 4, 32, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m4_t", RvvInt32m4, RvvInt32m4Ty, 8, 32, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int32m8_t", RvvInt32m8, RvvInt32m8Ty, 16, 32, 1, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint32mf2_t",RvvUint32mf2,RvvUint32mf2Ty,1, 32, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m1_t", RvvUint32m1, RvvUint32m1Ty, 2, 32, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m2_t", RvvUint32m2, RvvUint32m2Ty, 4, 32, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m4_t", RvvUint32m4, RvvUint32m4Ty, 8, 32, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint32m8_t", RvvUint32m8, RvvUint32m8Ty, 16, 32, 1, false)
+
+RVV_VECTOR_TYPE_INT("__rvv_int64m1_t", RvvInt64m1, RvvInt64m1Ty, 1, 64, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m2_t", RvvInt64m2, RvvInt64m2Ty, 2, 64, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m4_t", RvvInt64m4, RvvInt64m4Ty, 4, 64, 1, true)
+RVV_VECTOR_TYPE_INT("__rvv_int64m8_t", RvvInt64m8, RvvInt64m8Ty, 8, 64, 1, true)
+
+RVV_VECTOR_TYPE_INT("__rvv_uint64m1_t",RvvUint64m1,RvvUint64m1Ty,1, 64, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m2_t",RvvUint64m2,RvvUint64m2Ty,2, 64, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m4_t",RvvUint64m4,RvvUint64m4Ty,4, 64, 1, false)
+RVV_VECTOR_TYPE_INT("__rvv_uint64m8_t",RvvUint64m8,RvvUint64m8Ty,8, 64, 1, false)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf4_t",RvvFloat16mf4,RvvFloat16mf4Ty,1, 16, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16mf2_t",RvvFloat16mf2,RvvFloat16mf2Ty,2, 16, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m1_t", RvvFloat16m1, RvvFloat16m1Ty, 4, 16, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m2_t", RvvFloat16m2, RvvFloat16m2Ty, 8, 16, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m4_t", RvvFloat16m4, RvvFloat16m4Ty, 16, 16, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float16m8_t", RvvFloat16m8, RvvFloat16m8Ty, 32, 16, 1)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32mf2_t",RvvFloat32mf2,RvvFloat32mf2Ty,1, 32, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m1_t", RvvFloat32m1, RvvFloat32m1Ty, 2, 32, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m2_t", RvvFloat32m2, RvvFloat32m2Ty, 4, 32, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m4_t", RvvFloat32m4, RvvFloat32m4Ty, 8, 32, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float32m8_t", RvvFloat32m8, RvvFloat32m8Ty, 16, 32, 1)
+
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m1_t", RvvFloat64m1, RvvFloat64m1Ty, 1, 64, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m2_t", RvvFloat64m2, RvvFloat64m2Ty, 2, 64, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m4_t", RvvFloat64m4, RvvFloat64m4Ty, 4, 64, 1)
+RVV_VECTOR_TYPE_FLOAT("__rvv_float64m8_t", RvvFloat64m8, RvvFloat64m8Ty, 8, 64, 1)
+
+RVV_PREDICATE_TYPE("__rvv_bool1_t", RvvBool1, RvvBool1Ty, 64)
+RVV_PREDICATE_TYPE("__rvv_bool2_t", RvvBool2, RvvBool2Ty, 32)
+RVV_PREDICATE_TYPE("__rvv_bool4_t", RvvBool4, RvvBool4Ty, 16)
+RVV_PREDICATE_TYPE("__rvv_bool8_t", RvvBool8, RvvBool8Ty, 8)
+RVV_PREDICATE_TYPE("__rvv_bool16_t", RvvBool16, RvvBool16Ty, 4)
+RVV_PREDICATE_TYPE("__rvv_bool32_t", RvvBool32, RvvBool32Ty, 2)
+RVV_PREDICATE_TYPE("__rvv_bool64_t", RvvBool64, RvvBool64Ty, 1)
+
+#undef RVV_VECTOR_TYPE_FLOAT
+#undef RVV_VECTOR_TYPE_INT
+#undef RVV_VECTOR_TYPE
+#undef RVV_PREDICATE_TYPE
+#undef RVV_TYPE
diff --git a/clang/include/clang/Basic/SanitizerBlacklist.h b/clang/include/clang/Basic/SanitizerBlacklist.h
deleted file mode 100644
index c874ff28aacc..000000000000
--- a/clang/include/clang/Basic/SanitizerBlacklist.h
+++ /dev/null
@@ -1,49 +0,0 @@
-//===--- SanitizerBlacklist.h - Blacklist for sanitizers --------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// User-provided blacklist used to disable/alter instrumentation done in
-// sanitizers.
-//
-//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_BASIC_SANITIZERBLACKLIST_H
-#define LLVM_CLANG_BASIC_SANITIZERBLACKLIST_H
-
-#include "clang/Basic/LLVM.h"
-#include "clang/Basic/SourceLocation.h"
-#include "llvm/ADT/StringRef.h"
-#include <memory>
-#include <vector>
-
-namespace clang {
-
-class SanitizerMask;
-class SourceManager;
-class SanitizerSpecialCaseList;
-
-class SanitizerBlacklist {
- std::unique_ptr<SanitizerSpecialCaseList> SSCL;
- SourceManager &SM;
-
-public:
- SanitizerBlacklist(const std::vector<std::string> &BlacklistPaths,
- SourceManager &SM);
- ~SanitizerBlacklist();
- bool isBlacklistedGlobal(SanitizerMask Mask, StringRef GlobalName,
- StringRef Category = StringRef()) const;
- bool isBlacklistedType(SanitizerMask Mask, StringRef MangledTypeName,
- StringRef Category = StringRef()) const;
- bool isBlacklistedFunction(SanitizerMask Mask, StringRef FunctionName) const;
- bool isBlacklistedFile(SanitizerMask Mask, StringRef FileName,
- StringRef Category = StringRef()) const;
- bool isBlacklistedLocation(SanitizerMask Mask, SourceLocation Loc,
- StringRef Category = StringRef()) const;
-};
-
-} // end namespace clang
-
-#endif
diff --git a/clang/include/clang/Basic/SanitizerSpecialCaseList.h b/clang/include/clang/Basic/SanitizerSpecialCaseList.h
index c84894dae298..d024b7dfc2e8 100644
--- a/clang/include/clang/Basic/SanitizerSpecialCaseList.h
+++ b/clang/include/clang/Basic/SanitizerSpecialCaseList.h
@@ -39,7 +39,7 @@ public:
createOrDie(const std::vector<std::string> &Paths,
llvm::vfs::FileSystem &VFS);
- // Query blacklisted entries if any bit in Mask matches the entry's section.
+ // Query ignorelisted entries if any bit in Mask matches the entry's section.
bool inSection(SanitizerMask Mask, StringRef Prefix, StringRef Query,
StringRef Category = StringRef()) const;
diff --git a/clang/include/clang/Basic/Sanitizers.h b/clang/include/clang/Basic/Sanitizers.h
index c6b0446cea4f..b12a3b7821d7 100644
--- a/clang/include/clang/Basic/Sanitizers.h
+++ b/clang/include/clang/Basic/Sanitizers.h
@@ -16,7 +16,7 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/MathExtras.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <cassert>
#include <cstdint>
@@ -59,12 +59,7 @@ public:
return SanitizerMask(mask1, mask2);
}
- unsigned countPopulation() const {
- unsigned total = 0;
- for (const auto &Val : maskLoToHigh)
- total += llvm::countPopulation(Val);
- return total;
- }
+ unsigned countPopulation() const;
void flipAllBits() {
for (auto &Val : maskLoToHigh)
@@ -178,6 +173,10 @@ struct SanitizerSet {
/// Returns a non-zero SanitizerMask, or \c 0 if \p Value is not known.
SanitizerMask parseSanitizerValue(StringRef Value, bool AllowGroups);
+/// Serialize a SanitizerSet into values for -fsanitize= or -fno-sanitize=.
+void serializeSanitizerSet(SanitizerSet Set,
+ SmallVectorImpl<StringRef> &Values);
+
/// For each sanitizer group bit set in \p Kinds, set the bits for sanitizers
/// this group enables.
SanitizerMask expandSanitizerGroups(SanitizerMask Kinds);
@@ -189,6 +188,16 @@ inline SanitizerMask getPPTransparentSanitizers() {
SanitizerKind::Undefined | SanitizerKind::FloatDivideByZero;
}
+StringRef AsanDtorKindToString(llvm::AsanDtorKind kind);
+
+llvm::AsanDtorKind AsanDtorKindFromString(StringRef kind);
+
+StringRef AsanDetectStackUseAfterReturnModeToString(
+ llvm::AsanDetectStackUseAfterReturnMode mode);
+
+llvm::AsanDetectStackUseAfterReturnMode
+AsanDetectStackUseAfterReturnModeFromString(StringRef modeStr);
+
} // namespace clang
#endif // LLVM_CLANG_BASIC_SANITIZERS_H
diff --git a/clang/include/clang/Basic/SourceLocation.h b/clang/include/clang/Basic/SourceLocation.h
index fc722b1d563d..540de23b9f55 100644
--- a/clang/include/clang/Basic/SourceLocation.h
+++ b/clang/include/clang/Basic/SourceLocation.h
@@ -16,7 +16,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <cstdint>
#include <string>
@@ -92,11 +91,14 @@ class SourceLocation {
friend class SourceManager;
friend struct llvm::FoldingSetTrait<SourceLocation>;
- unsigned ID = 0;
+public:
+ using UIntTy = uint32_t;
+ using IntTy = int32_t;
- enum : unsigned {
- MacroIDBit = 1U << 31
- };
+private:
+ UIntTy ID = 0;
+
+ enum : UIntTy { MacroIDBit = 1ULL << (8 * sizeof(UIntTy) - 1) };
public:
bool isFileID() const { return (ID & MacroIDBit) == 0; }
@@ -112,18 +114,16 @@ public:
private:
/// Return the offset into the manager's global input view.
- unsigned getOffset() const {
- return ID & ~MacroIDBit;
- }
+ UIntTy getOffset() const { return ID & ~MacroIDBit; }
- static SourceLocation getFileLoc(unsigned ID) {
+ static SourceLocation getFileLoc(UIntTy ID) {
assert((ID & MacroIDBit) == 0 && "Ran out of source locations!");
SourceLocation L;
L.ID = ID;
return L;
}
- static SourceLocation getMacroLoc(unsigned ID) {
+ static SourceLocation getMacroLoc(UIntTy ID) {
assert((ID & MacroIDBit) == 0 && "Ran out of source locations!");
SourceLocation L;
L.ID = MacroIDBit | ID;
@@ -133,7 +133,7 @@ private:
public:
/// Return a source location with the specified offset from this
/// SourceLocation.
- SourceLocation getLocWithOffset(int Offset) const {
+ SourceLocation getLocWithOffset(IntTy Offset) const {
assert(((getOffset()+Offset) & MacroIDBit) == 0 && "offset overflow");
SourceLocation L;
L.ID = ID+Offset;
@@ -145,13 +145,13 @@ public:
///
/// This should only be passed to SourceLocation::getFromRawEncoding, it
/// should not be inspected directly.
- unsigned getRawEncoding() const { return ID; }
+ UIntTy getRawEncoding() const { return ID; }
/// Turn a raw encoding of a SourceLocation object into
/// a real SourceLocation.
///
/// \see getRawEncoding.
- static SourceLocation getFromRawEncoding(unsigned Encoding) {
+ static SourceLocation getFromRawEncoding(UIntTy Encoding) {
SourceLocation X;
X.ID = Encoding;
return X;
@@ -171,7 +171,7 @@ public:
/// Turn a pointer encoding of a SourceLocation object back
/// into a real SourceLocation.
static SourceLocation getFromPtrEncoding(const void *Encoding) {
- return getFromRawEncoding((unsigned)(uintptr_t)Encoding);
+ return getFromRawEncoding((SourceLocation::UIntTy)(uintptr_t)Encoding);
}
static bool isPairOfFileLocations(SourceLocation Start, SourceLocation End) {
@@ -489,11 +489,13 @@ namespace llvm {
/// DenseMapInfo<unsigned> which uses SourceLocation::ID is used as a key.
template <> struct DenseMapInfo<clang::SourceLocation> {
static clang::SourceLocation getEmptyKey() {
- return clang::SourceLocation::getFromRawEncoding(~0U);
+ constexpr clang::SourceLocation::UIntTy Zero = 0;
+ return clang::SourceLocation::getFromRawEncoding(~Zero);
}
static clang::SourceLocation getTombstoneKey() {
- return clang::SourceLocation::getFromRawEncoding(~0U - 1);
+ constexpr clang::SourceLocation::UIntTy Zero = 0;
+ return clang::SourceLocation::getFromRawEncoding(~Zero - 1);
}
static unsigned getHashValue(clang::SourceLocation Loc) {
@@ -510,20 +512,6 @@ namespace llvm {
static void Profile(const clang::SourceLocation &X, FoldingSetNodeID &ID);
};
- // Teach SmallPtrSet how to handle SourceLocation.
- template<>
- struct PointerLikeTypeTraits<clang::SourceLocation> {
- static constexpr int NumLowBitsAvailable = 0;
-
- static void *getAsVoidPointer(clang::SourceLocation L) {
- return L.getPtrEncoding();
- }
-
- static clang::SourceLocation getFromVoidPointer(void *P) {
- return clang::SourceLocation::getFromRawEncoding((unsigned)(uintptr_t)P);
- }
- };
-
} // namespace llvm
#endif // LLVM_CLANG_BASIC_SOURCELOCATION_H
diff --git a/clang/include/clang/Basic/SourceManager.h b/clang/include/clang/Basic/SourceManager.h
index 8cd37756d8f1..cc29c24f5a35 100644
--- a/clang/include/clang/Basic/SourceManager.h
+++ b/clang/include/clang/Basic/SourceManager.h
@@ -465,8 +465,9 @@ static_assert(sizeof(FileInfo) <= sizeof(ExpansionInfo),
/// SourceManager keeps an array of these objects, and they are uniquely
/// identified by the FileID datatype.
class SLocEntry {
- unsigned Offset : 31;
- unsigned IsExpansion : 1;
+ static constexpr int OffsetBits = 8 * sizeof(SourceLocation::UIntTy) - 1;
+ SourceLocation::UIntTy Offset : OffsetBits;
+ SourceLocation::UIntTy IsExpansion : 1;
union {
FileInfo File;
ExpansionInfo Expansion;
@@ -475,7 +476,7 @@ class SLocEntry {
public:
SLocEntry() : Offset(), IsExpansion(), File() {}
- unsigned getOffset() const { return Offset; }
+ SourceLocation::UIntTy getOffset() const { return Offset; }
bool isExpansion() const { return IsExpansion; }
bool isFile() const { return !isExpansion(); }
@@ -490,8 +491,8 @@ public:
return Expansion;
}
- static SLocEntry get(unsigned Offset, const FileInfo &FI) {
- assert(!(Offset & (1u << 31)) && "Offset is too large");
+ static SLocEntry get(SourceLocation::UIntTy Offset, const FileInfo &FI) {
+ assert(!(Offset & (1ULL << OffsetBits)) && "Offset is too large");
SLocEntry E;
E.Offset = Offset;
E.IsExpansion = false;
@@ -499,8 +500,9 @@ public:
return E;
}
- static SLocEntry get(unsigned Offset, const ExpansionInfo &Expansion) {
- assert(!(Offset & (1u << 31)) && "Offset is too large");
+ static SLocEntry get(SourceLocation::UIntTy Offset,
+ const ExpansionInfo &Expansion) {
+ assert(!(Offset & (1ULL << OffsetBits)) && "Offset is too large");
SLocEntry E;
E.Offset = Offset;
E.IsExpansion = true;
@@ -690,17 +692,18 @@ class SourceManager : public RefCountedBase<SourceManager> {
/// The starting offset of the next local SLocEntry.
///
/// This is LocalSLocEntryTable.back().Offset + the size of that entry.
- unsigned NextLocalOffset;
+ SourceLocation::UIntTy NextLocalOffset;
/// The starting offset of the latest batch of loaded SLocEntries.
///
/// This is LoadedSLocEntryTable.back().Offset, except that that entry might
/// not have been loaded, so that value would be unknown.
- unsigned CurrentLoadedOffset;
+ SourceLocation::UIntTy CurrentLoadedOffset;
- /// The highest possible offset is 2^31-1, so CurrentLoadedOffset
- /// starts at 2^31.
- static const unsigned MaxLoadedOffset = 1U << 31U;
+ /// The highest possible offset is 2^32-1 (2^63-1 for 64-bit source
+ /// locations), so CurrentLoadedOffset starts at 2^31 (2^63 resp.).
+ static const SourceLocation::UIntTy MaxLoadedOffset =
+ 1ULL << (8 * sizeof(SourceLocation::UIntTy) - 1);
/// A bitmap that indicates whether the entries of LoadedSLocEntryTable
/// have already been loaded from the external source.
@@ -865,11 +868,13 @@ public:
/// This translates NULL into standard input.
FileID createFileID(const FileEntry *SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID = 0, unsigned LoadedOffset = 0);
+ int LoadedID = 0,
+ SourceLocation::UIntTy LoadedOffset = 0);
FileID createFileID(FileEntryRef SourceFile, SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID = 0, unsigned LoadedOffset = 0);
+ int LoadedID = 0,
+ SourceLocation::UIntTy LoadedOffset = 0);
/// Create a new FileID that represents the specified memory buffer.
///
@@ -877,7 +882,7 @@ public:
/// MemoryBuffer, so only pass a MemoryBuffer to this once.
FileID createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
SrcMgr::CharacteristicKind FileCharacter = SrcMgr::C_User,
- int LoadedID = 0, unsigned LoadedOffset = 0,
+ int LoadedID = 0, SourceLocation::UIntTy LoadedOffset = 0,
SourceLocation IncludeLoc = SourceLocation());
/// Create a new FileID that represents the specified memory buffer.
@@ -886,7 +891,7 @@ public:
/// outlive the SourceManager.
FileID createFileID(const llvm::MemoryBufferRef &Buffer,
SrcMgr::CharacteristicKind FileCharacter = SrcMgr::C_User,
- int LoadedID = 0, unsigned LoadedOffset = 0,
+ int LoadedID = 0, SourceLocation::UIntTy LoadedOffset = 0,
SourceLocation IncludeLoc = SourceLocation());
/// Get the FileID for \p SourceFile if it exists. Otherwise, create a
@@ -905,13 +910,11 @@ public:
/// Return a new SourceLocation that encodes the fact
/// that a token from SpellingLoc should actually be referenced from
/// ExpansionLoc.
- SourceLocation createExpansionLoc(SourceLocation Loc,
- SourceLocation ExpansionLocStart,
- SourceLocation ExpansionLocEnd,
- unsigned TokLength,
- bool ExpansionIsTokenRange = true,
- int LoadedID = 0,
- unsigned LoadedOffset = 0);
+ SourceLocation
+ createExpansionLoc(SourceLocation Loc, SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd, unsigned TokLength,
+ bool ExpansionIsTokenRange = true, int LoadedID = 0,
+ SourceLocation::UIntTy LoadedOffset = 0);
/// Return a new SourceLocation that encodes that the token starting
/// at \p TokenStart ends prematurely at \p TokenEnd.
@@ -1098,7 +1101,7 @@ public:
/// the entry in SLocEntryTable which contains the specified location.
///
FileID getFileID(SourceLocation SpellingLoc) const {
- unsigned SLocOffset = SpellingLoc.getOffset();
+ SourceLocation::UIntTy SLocOffset = SpellingLoc.getOffset();
// If our one-entry cache covers this offset, just return it.
if (isOffsetInFileID(LastFileIDLookup, SLocOffset))
@@ -1221,7 +1224,7 @@ public:
if (!Entry)
return SourceLocation();
- unsigned GlobalOffset = Entry->getOffset() + Offset;
+ SourceLocation::UIntTy GlobalOffset = Entry->getOffset() + Offset;
return Entry->isFile() ? SourceLocation::getFileLoc(GlobalOffset)
: SourceLocation::getMacroLoc(GlobalOffset);
}
@@ -1326,17 +1329,17 @@ public:
///
/// If it's true and \p RelativeOffset is non-null, it will be set to the
/// relative offset of \p Loc inside the chunk.
- bool isInSLocAddrSpace(SourceLocation Loc,
- SourceLocation Start, unsigned Length,
- unsigned *RelativeOffset = nullptr) const {
+ bool
+ isInSLocAddrSpace(SourceLocation Loc, SourceLocation Start, unsigned Length,
+ SourceLocation::UIntTy *RelativeOffset = nullptr) const {
assert(((Start.getOffset() < NextLocalOffset &&
Start.getOffset()+Length <= NextLocalOffset) ||
(Start.getOffset() >= CurrentLoadedOffset &&
Start.getOffset()+Length < MaxLoadedOffset)) &&
"Chunk is not valid SLoc address space");
- unsigned LocOffs = Loc.getOffset();
- unsigned BeginOffs = Start.getOffset();
- unsigned EndOffs = BeginOffs + Length;
+ SourceLocation::UIntTy LocOffs = Loc.getOffset();
+ SourceLocation::UIntTy BeginOffs = Start.getOffset();
+ SourceLocation::UIntTy EndOffs = BeginOffs + Length;
if (LocOffs >= BeginOffs && LocOffs < EndOffs) {
if (RelativeOffset)
*RelativeOffset = LocOffs - BeginOffs;
@@ -1352,8 +1355,8 @@ public:
/// If it's true and \p RelativeOffset is non-null, it will be set to the
/// offset of \p RHS relative to \p LHS.
bool isInSameSLocAddrSpace(SourceLocation LHS, SourceLocation RHS,
- int *RelativeOffset) const {
- unsigned LHSOffs = LHS.getOffset(), RHSOffs = RHS.getOffset();
+ SourceLocation::IntTy *RelativeOffset) const {
+ SourceLocation::UIntTy LHSOffs = LHS.getOffset(), RHSOffs = RHS.getOffset();
bool LHSLoaded = LHSOffs >= CurrentLoadedOffset;
bool RHSLoaded = RHSOffs >= CurrentLoadedOffset;
@@ -1517,7 +1520,7 @@ public:
/// of FileID) to \p relativeOffset.
bool isInFileID(SourceLocation Loc, FileID FID,
unsigned *RelativeOffset = nullptr) const {
- unsigned Offs = Loc.getOffset();
+ SourceLocation::UIntTy Offs = Loc.getOffset();
if (isOffsetInFileID(FID, Offs)) {
if (RelativeOffset)
*RelativeOffset = Offs - getSLocEntry(FID).getOffset();
@@ -1636,8 +1639,9 @@ public:
/// offset in the "source location address space".
///
/// Note that we always consider source locations loaded from
- bool isBeforeInSLocAddrSpace(SourceLocation LHS, unsigned RHS) const {
- unsigned LHSOffset = LHS.getOffset();
+ bool isBeforeInSLocAddrSpace(SourceLocation LHS,
+ SourceLocation::UIntTy RHS) const {
+ SourceLocation::UIntTy LHSOffset = LHS.getOffset();
bool LHSLoaded = LHSOffset >= CurrentLoadedOffset;
bool RHSLoaded = RHS >= CurrentLoadedOffset;
if (LHSLoaded == RHSLoaded)
@@ -1699,7 +1703,7 @@ public:
return getSLocEntryByID(FID.ID, Invalid);
}
- unsigned getNextLocalOffset() const { return NextLocalOffset; }
+ SourceLocation::UIntTy getNextLocalOffset() const { return NextLocalOffset; }
void setExternalSLocEntrySource(ExternalSLocEntrySource *Source) {
assert(LoadedSLocEntryTable.empty() &&
@@ -1713,8 +1717,9 @@ public:
/// NumSLocEntries will be allocated, which occupy a total of TotalSize space
/// in the global source view. The lowest ID and the base offset of the
/// entries will be returned.
- std::pair<int, unsigned>
- AllocateLoadedSLocEntries(unsigned NumSLocEntries, unsigned TotalSize);
+ std::pair<int, SourceLocation::UIntTy>
+ AllocateLoadedSLocEntries(unsigned NumSLocEntries,
+ SourceLocation::UIntTy TotalSize);
/// Returns true if \p Loc came from a PCH/Module.
bool isLoadedSourceLocation(SourceLocation Loc) const {
@@ -1795,14 +1800,15 @@ private:
/// Implements the common elements of storing an expansion info struct into
/// the SLocEntry table and producing a source location that refers to it.
- SourceLocation createExpansionLocImpl(const SrcMgr::ExpansionInfo &Expansion,
- unsigned TokLength,
- int LoadedID = 0,
- unsigned LoadedOffset = 0);
+ SourceLocation
+ createExpansionLocImpl(const SrcMgr::ExpansionInfo &Expansion,
+ unsigned TokLength, int LoadedID = 0,
+ SourceLocation::UIntTy LoadedOffset = 0);
/// Return true if the specified FileID contains the
/// specified SourceLocation offset. This is a very hot method.
- inline bool isOffsetInFileID(FileID FID, unsigned SLocOffset) const {
+ inline bool isOffsetInFileID(FileID FID,
+ SourceLocation::UIntTy SLocOffset) const {
const SrcMgr::SLocEntry &Entry = getSLocEntry(FID);
// If the entry is after the offset, it can't contain it.
if (SLocOffset < Entry.getOffset()) return false;
@@ -1836,7 +1842,7 @@ private:
FileID createFileIDImpl(SrcMgr::ContentCache &File, StringRef Filename,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind DirCharacter, int LoadedID,
- unsigned LoadedOffset);
+ SourceLocation::UIntTy LoadedOffset);
SrcMgr::ContentCache &getOrCreateContentCache(FileEntryRef SourceFile,
bool isSystemFile = false);
@@ -1845,9 +1851,9 @@ private:
SrcMgr::ContentCache &
createMemBufferContentCache(std::unique_ptr<llvm::MemoryBuffer> Buf);
- FileID getFileIDSlow(unsigned SLocOffset) const;
- FileID getFileIDLocal(unsigned SLocOffset) const;
- FileID getFileIDLoaded(unsigned SLocOffset) const;
+ FileID getFileIDSlow(SourceLocation::UIntTy SLocOffset) const;
+ FileID getFileIDLocal(SourceLocation::UIntTy SLocOffset) const;
+ FileID getFileIDLoaded(SourceLocation::UIntTy SLocOffset) const;
SourceLocation getExpansionLocSlowCase(SourceLocation Loc) const;
SourceLocation getSpellingLocSlowCase(SourceLocation Loc) const;
diff --git a/clang/include/clang/Basic/Specifiers.h b/clang/include/clang/Basic/Specifiers.h
index 07d8177b8ab2..1c38b411e083 100644
--- a/clang/include/clang/Basic/Specifiers.h
+++ b/clang/include/clang/Basic/Specifiers.h
@@ -105,9 +105,9 @@ namespace clang {
/// The categorization of expression values, currently following the
/// C++11 scheme.
enum ExprValueKind {
- /// An r-value expression (a pr-value in the C++11 taxonomy)
+ /// A pr-value expression (in the C++11 taxonomy)
/// produces a temporary value.
- VK_RValue,
+ VK_PRValue,
/// An l-value expression is a reference to an object with
/// independent storage.
@@ -266,6 +266,7 @@ namespace clang {
CC_SpirFunction, // default for OpenCL functions on SPIR target
CC_OpenCLKernel, // inferred for OpenCL kernels
CC_Swift, // __attribute__((swiftcall))
+ CC_SwiftAsync, // __attribute__((swiftasynccall))
CC_PreserveMost, // __attribute__((preserve_most))
CC_PreserveAll, // __attribute__((preserve_all))
CC_AArch64VectorCall, // __attribute__((aarch64_vector_pcs))
@@ -284,6 +285,7 @@ namespace clang {
case CC_SpirFunction:
case CC_OpenCLKernel:
case CC_Swift:
+ case CC_SwiftAsync:
return false;
default:
return true;
@@ -344,7 +346,12 @@ namespace clang {
/// This parameter (which must have pointer type) uses the special
/// Swift context-pointer ABI treatment. There can be at
/// most one parameter on a given function that uses this treatment.
- SwiftContext
+ SwiftContext,
+
+ /// This parameter (which must have pointer type) uses the special
+ /// Swift asynchronous context-pointer ABI treatment. There can be at
+ /// most one parameter on a given function that uses this treatment.
+ SwiftAsyncContext,
};
/// Assigned inheritance model for a class in the MS C++ ABI. Must match order
diff --git a/clang/include/clang/Basic/StmtNodes.td b/clang/include/clang/Basic/StmtNodes.td
index 5965e8b9902a..508f1fddf1b3 100644
--- a/clang/include/clang/Basic/StmtNodes.td
+++ b/clang/include/clang/Basic/StmtNodes.td
@@ -57,6 +57,7 @@ def CoreturnStmt : StmtNode<Stmt>;
// Expressions
def Expr : StmtNode<ValueStmt, 1>;
def PredefinedExpr : StmtNode<Expr>;
+def SYCLUniqueStableNameExpr : StmtNode<Expr>;
def DeclRefExpr : StmtNode<Expr>;
def IntegerLiteral : StmtNode<Expr>;
def FixedPointLiteral : StmtNode<Expr>;
@@ -216,10 +217,14 @@ def MSDependentExistsStmt : StmtNode<Stmt>;
def AsTypeExpr : StmtNode<Expr>;
// OpenMP Directives.
+def OMPCanonicalLoop : StmtNode<Stmt>;
def OMPExecutableDirective : StmtNode<Stmt, 1>;
-def OMPLoopDirective : StmtNode<OMPExecutableDirective, 1>;
+def OMPLoopBasedDirective : StmtNode<OMPExecutableDirective, 1>;
+def OMPLoopDirective : StmtNode<OMPLoopBasedDirective, 1>;
def OMPParallelDirective : StmtNode<OMPExecutableDirective>;
def OMPSimdDirective : StmtNode<OMPLoopDirective>;
+def OMPTileDirective : StmtNode<OMPLoopBasedDirective>;
+def OMPUnrollDirective : StmtNode<OMPLoopBasedDirective>;
def OMPForDirective : StmtNode<OMPLoopDirective>;
def OMPForSimdDirective : StmtNode<OMPLoopDirective>;
def OMPSectionsDirective : StmtNode<OMPExecutableDirective>;
@@ -272,3 +277,6 @@ def OMPTargetTeamsDistributeDirective : StmtNode<OMPLoopDirective>;
def OMPTargetTeamsDistributeParallelForDirective : StmtNode<OMPLoopDirective>;
def OMPTargetTeamsDistributeParallelForSimdDirective : StmtNode<OMPLoopDirective>;
def OMPTargetTeamsDistributeSimdDirective : StmtNode<OMPLoopDirective>;
+def OMPInteropDirective : StmtNode<OMPExecutableDirective>;
+def OMPDispatchDirective : StmtNode<OMPExecutableDirective>;
+def OMPMaskedDirective : StmtNode<OMPExecutableDirective>;
diff --git a/clang/include/clang/Basic/TargetBuiltins.h b/clang/include/clang/Basic/TargetBuiltins.h
index b472547012f0..ed53b10f61ef 100644
--- a/clang/include/clang/Basic/TargetBuiltins.h
+++ b/clang/include/clang/Basic/TargetBuiltins.h
@@ -124,6 +124,16 @@ namespace clang {
enum { LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1, LastTSBuiltin };
}
+ /// RISCV builtins
+ namespace RISCV {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsRISCV.def"
+ LastTSBuiltin
+ };
+ } // namespace RISCV
+
/// Flags to identify the types for overloaded Neon builtins.
///
/// These must be kept in sync with the flags in utils/TableGen/NeonEmitter.h.
@@ -290,16 +300,6 @@ namespace clang {
};
}
- /// Le64 builtins
- namespace Le64 {
- enum {
- LastTIBuiltin = clang::Builtin::FirstTSBuiltin - 1,
- #define BUILTIN(ID, TYPE, ATTRS) BI##ID,
- #include "clang/Basic/BuiltinsLe64.def"
- LastTSBuiltin
- };
- }
-
/// SystemZ builtins
namespace SystemZ {
enum {
@@ -321,12 +321,11 @@ namespace clang {
}
static constexpr uint64_t LargestBuiltinID = std::max<uint64_t>(
- {NEON::FirstTSBuiltin, ARM::LastTSBuiltin, SVE::FirstTSBuiltin,
- AArch64::LastTSBuiltin, BPF::LastTSBuiltin, PPC::LastTSBuiltin,
- NVPTX::LastTSBuiltin, AMDGPU::LastTSBuiltin, X86::LastTSBuiltin,
+ {ARM::LastTSBuiltin, AArch64::LastTSBuiltin, BPF::LastTSBuiltin,
+ PPC::LastTSBuiltin, NVPTX::LastTSBuiltin, AMDGPU::LastTSBuiltin,
+ X86::LastTSBuiltin, VE::LastTSBuiltin, RISCV::LastTSBuiltin,
Hexagon::LastTSBuiltin, Mips::LastTSBuiltin, XCore::LastTSBuiltin,
- Le64::LastTSBuiltin, SystemZ::LastTSBuiltin,
- WebAssembly::LastTSBuiltin});
+ SystemZ::LastTSBuiltin, WebAssembly::LastTSBuiltin});
} // end namespace clang.
diff --git a/clang/include/clang/Basic/TargetCXXABI.def b/clang/include/clang/Basic/TargetCXXABI.def
new file mode 100644
index 000000000000..9501cca76094
--- /dev/null
+++ b/clang/include/clang/Basic/TargetCXXABI.def
@@ -0,0 +1,129 @@
+//===--- TargetCXXABI.def - Target C++ ABI database --------------- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the various C++ ABI kinds used on different platforms.
+// Users of this file must define the CXXABI macro to make use of this
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CXXABI
+#error Define the CXXABI macro to handle C++ ABI kinds.
+#endif
+
+#ifndef ITANIUM_CXXABI
+#define ITANIUM_CXXABI(Name, Str) CXXABI(Name, Str)
+#endif
+
+#ifndef MICROSOFT_CXXABI
+#define MICROSOFT_CXXABI(Name, Str) CXXABI(Name, Str)
+#endif
+
+/// The generic Itanium ABI is the standard ABI of most open-source
+/// and Unix-like platforms. It is the primary ABI targeted by
+/// many compilers, including Clang and GCC.
+///
+/// It is documented here:
+/// http://www.codesourcery.com/public/cxx-abi/
+ITANIUM_CXXABI(GenericItanium, "itanium")
+
+/// The generic ARM ABI is a modified version of the Itanium ABI
+/// proposed by ARM for use on ARM-based platforms.
+///
+/// These changes include:
+/// - the representation of member function pointers is adjusted
+/// to not conflict with the 'thumb' bit of ARM function pointers;
+/// - constructors and destructors return 'this';
+/// - guard variables are smaller;
+/// - inline functions are never key functions;
+/// - array cookies have a slightly different layout;
+/// - additional convenience functions are specified;
+/// - and more!
+///
+/// It is documented here:
+/// http://infocenter.arm.com
+/// /help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+ITANIUM_CXXABI(GenericARM, "arm")
+
+/// The iOS ABI is a partial implementation of the ARM ABI.
+/// Several of the features of the ARM ABI were not fully implemented
+/// in the compilers that iOS was launched with.
+///
+/// Essentially, the iOS ABI includes the ARM changes to:
+/// - member function pointers,
+/// - guard variables,
+/// - array cookies, and
+/// - constructor/destructor signatures.
+ITANIUM_CXXABI(iOS, "ios")
+
+/// The iOS 64-bit and macOS 64-bit ARM ABI follows ARM's published 64-bit
+/// ABI more closely, but we don't guarantee to follow it perfectly.
+///
+/// It is documented here:
+/// http://infocenter.arm.com
+/// /help/topic/com.arm.doc.ihi0059a/IHI0059A_cppabi64.pdf
+ITANIUM_CXXABI(AppleARM64, "applearm64")
+
+/// WatchOS is a modernisation of the iOS ABI, which roughly means it's
+/// the iOS64 ABI ported to 32-bits. The primary difference from iOS64 is
+/// that RTTI objects must still be unique at the moment.
+ITANIUM_CXXABI(WatchOS, "watchos")
+
+/// The generic AArch64 ABI is also a modified version of the Itanium ABI,
+/// but it has fewer divergences than the 32-bit ARM ABI.
+///
+/// The relevant changes from the generic ABI in this case are:
+/// - representation of member function pointers adjusted as in ARM.
+/// - guard variables are smaller.
+ITANIUM_CXXABI(GenericAArch64, "aarch64")
+
+/// The generic Mips ABI is a modified version of the Itanium ABI.
+///
+/// At the moment, only change from the generic ABI in this case is:
+/// - representation of member function pointers adjusted as in ARM.
+ITANIUM_CXXABI(GenericMIPS, "mips")
+
+/// The WebAssembly ABI is a modified version of the Itanium ABI.
+///
+/// The changes from the Itanium ABI are:
+/// - representation of member function pointers is adjusted, as in ARM;
+/// - member functions are not specially aligned;
+/// - constructors and destructors return 'this', as in ARM;
+/// - guard variables are 32-bit on wasm32, as in ARM;
+/// - unused bits of guard variables are reserved, as in ARM;
+/// - inline functions are never key functions, as in ARM;
+/// - C++11 POD rules are used for tail padding, as in iOS64.
+///
+/// TODO: At present the WebAssembly ABI is not considered stable, so none
+/// of these details is necessarily final yet.
+ITANIUM_CXXABI(WebAssembly, "webassembly")
+
+/// The Fuchsia ABI is a modified version of the Itanium ABI.
+///
+/// The relevant changes from the Itanium ABI are:
+/// - constructors and destructors return 'this', as in ARM.
+ITANIUM_CXXABI(Fuchsia, "fuchsia")
+
+/// The XL ABI is the ABI used by IBM xlclang compiler and is a modified
+/// version of the Itanium ABI.
+///
+/// The relevant changes from the Itanium ABI are:
+/// - static initialization is adjusted to use sinit and sterm functions;
+ITANIUM_CXXABI(XL, "xl")
+
+/// The Microsoft ABI is the ABI used by Microsoft Visual Studio (and
+/// compatible compilers).
+///
+/// FIXME: should this be split into Win32 and Win64 variants?
+///
+/// Only scattered and incomplete official documentation exists.
+MICROSOFT_CXXABI(Microsoft, "microsoft")
+
+#undef CXXABI
+#undef ITANIUM_CXXABI
+#undef MICROSOFT_CXXABI
diff --git a/clang/include/clang/Basic/TargetCXXABI.h b/clang/include/clang/Basic/TargetCXXABI.h
index 2d267f43f92b..e727f85edad7 100644
--- a/clang/include/clang/Basic/TargetCXXABI.h
+++ b/clang/include/clang/Basic/TargetCXXABI.h
@@ -15,7 +15,11 @@
#ifndef LLVM_CLANG_BASIC_TARGETCXXABI_H
#define LLVM_CLANG_BASIC_TARGETCXXABI_H
+#include <map>
+
#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
namespace clang {
@@ -25,105 +29,8 @@ class TargetCXXABI {
public:
/// The basic C++ ABI kind.
enum Kind {
- /// The generic Itanium ABI is the standard ABI of most open-source
- /// and Unix-like platforms. It is the primary ABI targeted by
- /// many compilers, including Clang and GCC.
- ///
- /// It is documented here:
- /// http://www.codesourcery.com/public/cxx-abi/
- GenericItanium,
-
- /// The generic ARM ABI is a modified version of the Itanium ABI
- /// proposed by ARM for use on ARM-based platforms.
- ///
- /// These changes include:
- /// - the representation of member function pointers is adjusted
- /// to not conflict with the 'thumb' bit of ARM function pointers;
- /// - constructors and destructors return 'this';
- /// - guard variables are smaller;
- /// - inline functions are never key functions;
- /// - array cookies have a slightly different layout;
- /// - additional convenience functions are specified;
- /// - and more!
- ///
- /// It is documented here:
- /// http://infocenter.arm.com
- /// /help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
- GenericARM,
-
- /// The iOS ABI is a partial implementation of the ARM ABI.
- /// Several of the features of the ARM ABI were not fully implemented
- /// in the compilers that iOS was launched with.
- ///
- /// Essentially, the iOS ABI includes the ARM changes to:
- /// - member function pointers,
- /// - guard variables,
- /// - array cookies, and
- /// - constructor/destructor signatures.
- iOS,
-
- /// The iOS 64-bit and macOS 64-bit ARM ABI follows ARM's published 64-bit
- /// ABI more closely, but we don't guarantee to follow it perfectly.
- ///
- /// It is documented here:
- /// http://infocenter.arm.com
- /// /help/topic/com.arm.doc.ihi0059a/IHI0059A_cppabi64.pdf
- AppleARM64,
-
- /// WatchOS is a modernisation of the iOS ABI, which roughly means it's
- /// the AppleARM64 ABI ported to 32-bits. The primary difference from
- /// AppleARM64 is that RTTI objects must still be unique at the moment.
- WatchOS,
-
- /// The generic AArch64 ABI is also a modified version of the Itanium ABI,
- /// but it has fewer divergences than the 32-bit ARM ABI.
- ///
- /// The relevant changes from the generic ABI in this case are:
- /// - representation of member function pointers adjusted as in ARM.
- /// - guard variables are smaller.
- GenericAArch64,
-
- /// The generic Mips ABI is a modified version of the Itanium ABI.
- ///
- /// At the moment, only change from the generic ABI in this case is:
- /// - representation of member function pointers adjusted as in ARM.
- GenericMIPS,
-
- /// The WebAssembly ABI is a modified version of the Itanium ABI.
- ///
- /// The changes from the Itanium ABI are:
- /// - representation of member function pointers is adjusted, as in ARM;
- /// - member functions are not specially aligned;
- /// - constructors and destructors return 'this', as in ARM;
- /// - guard variables are 32-bit on wasm32, as in ARM;
- /// - unused bits of guard variables are reserved, as in ARM;
- /// - inline functions are never key functions, as in ARM;
- /// - C++11 POD rules are used for tail padding, as in AppleARM64.
- ///
- /// TODO: At present the WebAssembly ABI is not considered stable, so none
- /// of these details is necessarily final yet.
- WebAssembly,
-
- /// The Fuchsia ABI is a modified version of the Itanium ABI.
- ///
- /// The relevant changes from the Itanium ABI are:
- /// - constructors and destructors return 'this', as in ARM.
- Fuchsia,
-
- /// The XL ABI is the ABI used by IBM xlclang compiler and is a modified
- /// version of the Itanium ABI.
- ///
- /// The relevant changes from the Itanium ABI are:
- /// - static initialization is adjusted to use sinit and sterm functions;
- XL,
-
- /// The Microsoft ABI is the ABI used by Microsoft Visual Studio (and
- /// compatible compilers).
- ///
- /// FIXME: should this be split into Win32 and Win64 variants?
- ///
- /// Only scattered and incomplete official documentation exists.
- Microsoft
+#define CXXABI(Name, Str) Name,
+#include "TargetCXXABI.def"
};
private:
@@ -132,7 +39,37 @@ private:
// audit the users to pass it by reference instead.
Kind TheKind;
+ static const auto &getABIMap() {
+ static llvm::StringMap<Kind> ABIMap = {
+#define CXXABI(Name, Str) {Str, Name},
+#include "TargetCXXABI.def"
+ };
+ return ABIMap;
+ }
+
+ static const auto &getSpellingMap() {
+ static std::map<Kind, std::string> SpellingMap = {
+#define CXXABI(Name, Str) {Name, Str},
+#include "TargetCXXABI.def"
+ };
+ return SpellingMap;
+ }
+
public:
+ static Kind getKind(StringRef Name) { return getABIMap().lookup(Name); }
+ static const auto &getSpelling(Kind ABIKind) {
+ return getSpellingMap().find(ABIKind)->second;
+ }
+ static bool isABI(StringRef Name) {
+ return getABIMap().find(Name) != getABIMap().end();
+ }
+
+ // Return true if this target should use the relative vtables C++ ABI by
+ // default.
+ static bool usesRelativeVTables(const llvm::Triple &T) {
+ return T.isOSFuchsia();
+ }
+
/// A bogus initialization of the platform ABI.
TargetCXXABI() : TheKind(GenericItanium) {}
@@ -144,22 +81,54 @@ public:
Kind getKind() const { return TheKind; }
- /// Does this ABI generally fall into the Itanium family of ABIs?
- bool isItaniumFamily() const {
- switch (getKind()) {
- case AppleARM64:
- case Fuchsia:
- case GenericAArch64:
- case GenericItanium:
+ // Check that the kind provided by the fc++-abi flag is supported on this
+ // target. Users who want to experiment using different ABIs on specific
+ // platforms can change this freely, but this function should be conservative
+ // enough such that not all ABIs are allowed on all platforms. For example, we
+ // probably don't want to allow usage of an ARM ABI on an x86 architecture.
+ static bool isSupportedCXXABI(const llvm::Triple &T, Kind Kind) {
+ switch (Kind) {
case GenericARM:
+ return T.isARM() || T.isAArch64();
+
case iOS:
case WatchOS:
+ case AppleARM64:
+ return T.isOSDarwin();
+
+ case Fuchsia:
+ return T.isOSFuchsia();
+
+ case GenericAArch64:
+ return T.isAArch64();
+
case GenericMIPS:
+ return T.isMIPS();
+
case WebAssembly:
+ return T.isWasm();
+
case XL:
+ return T.isOSAIX();
+
+ case GenericItanium:
return true;
case Microsoft:
+ return T.isKnownWindowsMSVCEnvironment();
+ }
+ llvm_unreachable("invalid CXXABI kind");
+ };
+
+ /// Does this ABI generally fall into the Itanium family of ABIs?
+ bool isItaniumFamily() const {
+ switch (getKind()) {
+#define CXXABI(Name, Str)
+#define ITANIUM_CXXABI(Name, Str) case Name:
+#include "TargetCXXABI.def"
+ return true;
+
+ default:
return false;
}
llvm_unreachable("bad ABI kind");
@@ -168,20 +137,13 @@ public:
/// Is this ABI an MSVC-compatible ABI?
bool isMicrosoft() const {
switch (getKind()) {
- case AppleARM64:
- case Fuchsia:
- case GenericAArch64:
- case GenericItanium:
- case GenericARM:
- case iOS:
- case WatchOS:
- case GenericMIPS:
- case WebAssembly:
- case XL:
- return false;
-
- case Microsoft:
+#define CXXABI(Name, Str)
+#define MICROSOFT_CXXABI(Name, Str) case Name:
+#include "TargetCXXABI.def"
return true;
+
+ default:
+ return false;
}
llvm_unreachable("bad ABI kind");
}
diff --git a/clang/include/clang/Basic/TargetInfo.h b/clang/include/clang/Basic/TargetInfo.h
index b782172d93a3..4f0cbf986b31 100644
--- a/clang/include/clang/Basic/TargetInfo.h
+++ b/clang/include/clang/Basic/TargetInfo.h
@@ -32,6 +32,7 @@
#include "llvm/ADT/Triple.h"
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/VersionTuple.h"
#include <cassert>
#include <string>
@@ -39,7 +40,6 @@
namespace llvm {
struct fltSemantics;
-class DataLayout;
}
namespace clang {
@@ -129,9 +129,9 @@ struct TransferrableTargetInfo {
Float128
};
protected:
- IntType SizeType, IntMaxType, PtrDiffType, IntPtrType, WCharType,
- WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType,
- ProcessIDType;
+ IntType SizeType, IntMaxType, PtrDiffType, IntPtrType, WCharType, WIntType,
+ Char16Type, Char32Type, Int64Type, Int16Type, SigAtomicType,
+ ProcessIDType;
/// Whether Objective-C's built-in boolean type should be signed char.
///
@@ -154,12 +154,20 @@ protected:
/// zero-length bitfield.
unsigned UseZeroLengthBitfieldAlignment : 1;
+ /// Whether zero length bitfield alignment is respected if they are the
+ /// leading members.
+ unsigned UseLeadingZeroLengthBitfield : 1;
+
/// Whether explicit bit field alignment attributes are honored.
unsigned UseExplicitBitFieldAlignment : 1;
/// If non-zero, specifies a fixed alignment value for bitfields that follow
/// zero length bitfield, regardless of the zero length bitfield type.
unsigned ZeroLengthBitfieldBoundary;
+
+ /// If non-zero, specifies a maximum alignment to truncate alignment
+ /// specified in the aligned attribute of a static variable to this value.
+ unsigned MaxAlignedAttribute;
};
/// OpenCL type kinds.
@@ -196,7 +204,8 @@ protected:
unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
unsigned short SimdDefaultAlign;
- std::unique_ptr<llvm::DataLayout> DataLayout;
+ std::string DataLayoutString;
+ const char *UserLabelPrefix;
const char *MCountName;
unsigned char RegParmMax, SSERegParmMax;
TargetCXXABI TheCXXABI;
@@ -218,6 +227,8 @@ protected:
unsigned HasAArch64SVETypes : 1;
+ unsigned HasRISCVVTypes : 1;
+
unsigned AllowAMDGPUUnsafeFPAtomics : 1;
unsigned ARMCDECoprocMask : 8;
@@ -227,7 +238,9 @@ protected:
// TargetInfo Constructor. Default initializes all fields.
TargetInfo(const llvm::Triple &T);
- void resetDataLayout(StringRef DL);
+ // UserLabelPrefix must match DL's getGlobalPrefix() when interpreted
+ // as a DataLayout object.
+ void resetDataLayout(StringRef DL, const char *UserLabelPrefix = "");
public:
/// Construct a target for the given options.
@@ -338,6 +351,10 @@ public:
IntType getUInt64Type() const {
return getCorrespondingUnsignedType(Int64Type);
}
+ IntType getInt16Type() const { return Int16Type; }
+ IntType getUInt16Type() const {
+ return getCorrespondingUnsignedType(Int16Type);
+ }
IntType getSigAtomicType() const { return SigAtomicType; }
IntType getProcessIDType() const { return ProcessIDType; }
@@ -601,8 +618,8 @@ public:
}
/// Return the largest alignment for which a suitably-sized allocation with
- /// '::operator new(size_t)' is guaranteed to produce a correctly-aligned
- /// pointer.
+ /// '::operator new(size_t)' or 'malloc' is guaranteed to produce a
+ /// correctly-aligned pointer.
unsigned getNewAlign() const {
return NewAlign ? NewAlign : std::max(LongDoubleAlign, LongLongAlign);
}
@@ -736,6 +753,12 @@ public:
return PointerWidth;
}
+ /// \brief Returns the default value of the __USER_LABEL_PREFIX__ macro,
+ /// which is the prefix given to user symbols by default.
+ ///
+ /// On most platforms this is "", but it is "_" on some.
+ const char *getUserLabelPrefix() const { return UserLabelPrefix; }
+
/// Returns the name of the mcount instrumentation function.
const char *getMCountName() const {
return MCountName;
@@ -765,12 +788,22 @@ public:
return UseZeroLengthBitfieldAlignment;
}
+ /// Check whether zero length bitfield alignment is respected if they are
+ /// leading members.
+ bool useLeadingZeroLengthBitfield() const {
+ return UseLeadingZeroLengthBitfield;
+ }
+
/// Get the fixed alignment value in bits for a member that follows
/// a zero length bitfield.
unsigned getZeroLengthBitfieldBoundary() const {
return ZeroLengthBitfieldBoundary;
}
+ /// Get the maximum alignment in bits for a static variable with
+ /// aligned attribute.
+ unsigned getMaxAlignedAttribute() const { return MaxAlignedAttribute; }
+
/// Check whether explicit bitfield alignment attributes should be
// honored, as in "__attribute__((aligned(2))) int b : 1;".
bool useExplicitBitFieldAlignment() const {
@@ -859,6 +892,10 @@ public:
/// available on this target.
bool hasAArch64SVETypes() const { return HasAArch64SVETypes; }
+ /// Returns whether or not the RISC-V V built-in types are
+ /// available on this target.
+ bool hasRISCVVTypes() const { return HasRISCVVTypes; }
+
/// Returns whether or not the AMDGPU unsafe floating point atomics are
/// allowed.
bool allowAMDGPUUnsafeFPAtomics() const { return AllowAMDGPUUnsafeFPAtomics; }
@@ -1054,6 +1091,12 @@ public:
return std::string(1, *Constraint);
}
+ /// Replace some escaped characters with another string based on
+ /// target-specific rules
+ virtual llvm::Optional<std::string> handleAsmEscapedChar(char C) const {
+ return llvm::None;
+ }
+
/// Returns a string of target-specific clobbers, in LLVM format.
virtual const char *getClobbers() const = 0;
@@ -1071,9 +1114,9 @@ public:
/// Returns the target ID if supported.
virtual llvm::Optional<std::string> getTargetID() const { return llvm::None; }
- const llvm::DataLayout &getDataLayout() const {
- assert(DataLayout && "Uninitialized DataLayout!");
- return *DataLayout;
+ const char *getDataLayoutString() const {
+ assert(!DataLayoutString.empty() && "Uninitialized DataLayout!");
+ return DataLayoutString.c_str();
}
struct GCCRegAlias {
@@ -1105,19 +1148,13 @@ public:
getTriple().isWindowsItaniumEnvironment() || getTriple().isPS4CPU();
}
- /// An optional hook that targets can implement to perform semantic
- /// checking on attribute((section("foo"))) specifiers.
- ///
- /// In this case, "foo" is passed in to be checked. If the section
- /// specifier is invalid, the backend should return a non-empty string
- /// that indicates the problem.
- ///
- /// This hook is a simple quality of implementation feature to catch errors
- /// and give good diagnostics in cases when the assembler or code generator
- /// would otherwise reject the section specifier.
- ///
- virtual std::string isValidSectionSpecifier(StringRef SR) const {
- return "";
+ // Does this target have PS4 specific dllimport/export handling?
+ virtual bool hasPS4DLLImportExport() const {
+ return getTriple().isPS4CPU() ||
+ // Windows Itanium support allows for testing the SCEI flavour of
+ // dllimport/export handling on a Windows system.
+ (getTriple().isWindowsItaniumEnvironment() &&
+ getTriple().getVendor() == llvm::Triple::SCEI);
}
/// Set forced language options.
@@ -1125,7 +1162,7 @@ public:
/// Apply changes to the target information with respect to certain
/// language options which change the target configuration and adjust
/// the language based on the target options where applicable.
- virtual void adjust(LangOptions &Opts);
+ virtual void adjust(DiagnosticsEngine &Diags, LangOptions &Opts);
/// Adjust target options based on codegen options.
virtual void adjustTargetOptions(const CodeGenOptions &CGOpts,
@@ -1192,6 +1229,12 @@ public:
return false;
}
+ /// Check if target has a given feature enabled
+ virtual bool hasFeatureEnabled(const llvm::StringMap<bool> &Features,
+ StringRef Name) const {
+ return Features.lookup(Name);
+ }
+
/// Enable or disable a specific target feature;
/// the feature name must be valid.
virtual void setFeatureEnabled(llvm::StringMap<bool> &Features,
@@ -1378,6 +1421,12 @@ public:
bool isBigEndian() const { return BigEndian; }
bool isLittleEndian() const { return !BigEndian; }
+ /// Whether the option -fextend-arguments={32,64} is supported on the target.
+ virtual bool supportsExtendIntArgs() const { return false; }
+
+ /// Controls if __arithmetic_fence is supported in the targeted backend.
+ virtual bool checkArithmeticFenceSupported() const { return false; }
+
/// Gets the default calling convention for the given target and
/// declaration context.
virtual CallingConv getDefaultCallingConv() const {
@@ -1439,7 +1488,8 @@ public:
virtual void setSupportedOpenCLOpts() {}
virtual void supportAllOpenCLOpts(bool V = true) {
-#define OPENCLEXTNAME(Ext) getTargetOpts().OpenCLFeaturesMap[#Ext] = V;
+#define OPENCLEXTNAME(Ext) \
+ setFeatureEnabled(getTargetOpts().OpenCLFeaturesMap, #Ext, V);
#include "clang/Basic/OpenCLExtensions.def"
}
@@ -1459,10 +1509,6 @@ public:
}
}
- /// Define OpenCL macros based on target settings and language version
- void getOpenCLFeatureDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const;
-
/// Get supported OpenCL extensions and optional core features.
llvm::StringMap<bool> &getSupportedOpenCLOpts() {
return getTargetOpts().OpenCLFeaturesMap;
@@ -1502,10 +1548,15 @@ public:
return true;
}
+ /// Check that OpenCL target has valid options setting based on OpenCL
+ /// version.
+ virtual bool validateOpenCLTarget(const LangOptions &Opts,
+ DiagnosticsEngine &Diags) const;
+
virtual void setAuxTarget(const TargetInfo *Aux) {}
- /// Whether target allows debuginfo types for decl only variables.
- virtual bool allowDebugInfoForExternalVar() const { return false; }
+ /// Whether target allows debuginfo types for decl only variables/functions.
+ virtual bool allowDebugInfoForExternalRef() const { return false; }
protected:
/// Copy type and layout related info.
diff --git a/clang/include/clang/Basic/Thunk.h b/clang/include/clang/Basic/Thunk.h
new file mode 100644
index 000000000000..91088be6ae73
--- /dev/null
+++ b/clang/include/clang/Basic/Thunk.h
@@ -0,0 +1,188 @@
+//===----- Thunk.h - Declarations related to VTable Thunks ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Enums/classes describing THUNK related information about constructors,
+/// destructors and thunks.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_THUNK_H
+#define LLVM_CLANG_BASIC_THUNK_H
+
+#include <cstdint>
+#include <cstring>
+
+namespace clang {
+
+class CXXMethodDecl;
+
+/// A return adjustment.
+struct ReturnAdjustment {
+ /// The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// Holds the ABI-specific information about the virtual return
+ /// adjustment, if needed.
+ union VirtualAdjustment {
+ // Itanium ABI
+ struct {
+ /// The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+ } Itanium;
+
+ // Microsoft ABI
+ struct {
+ /// The offset (in bytes) of the vbptr, relative to the beginning
+ /// of the derived class.
+ uint32_t VBPtrOffset;
+
+ /// Index of the virtual base in the vbtable.
+ uint32_t VBIndex;
+ } Microsoft;
+
+ VirtualAdjustment() { memset(this, 0, sizeof(*this)); }
+
+ bool Equals(const VirtualAdjustment &Other) const {
+ return memcmp(this, &Other, sizeof(Other)) == 0;
+ }
+
+ bool isEmpty() const {
+ VirtualAdjustment Zero;
+ return Equals(Zero);
+ }
+
+ bool Less(const VirtualAdjustment &RHS) const {
+ return memcmp(this, &RHS, sizeof(RHS)) < 0;
+ }
+ } Virtual;
+
+ ReturnAdjustment() : NonVirtual(0) {}
+
+ bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
+
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Equals(RHS.Virtual);
+ }
+
+ friend bool operator!=(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return !(LHS == RHS);
+ }
+
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Less(RHS.Virtual);
+ }
+};
+
+/// A \c this pointer adjustment.
+struct ThisAdjustment {
+ /// The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// Holds the ABI-specific information about the virtual this
+ /// adjustment, if needed.
+ union VirtualAdjustment {
+ // Itanium ABI
+ struct {
+ /// The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+ } Itanium;
+
+ struct {
+ /// The offset of the vtordisp (in bytes), relative to the ECX.
+ int32_t VtordispOffset;
+
+ /// The offset of the vbptr of the derived class (in bytes),
+ /// relative to the ECX after vtordisp adjustment.
+ int32_t VBPtrOffset;
+
+ /// The offset (in bytes) of the vbase offset in the vbtable.
+ int32_t VBOffsetOffset;
+ } Microsoft;
+
+ VirtualAdjustment() { memset(this, 0, sizeof(*this)); }
+
+ bool Equals(const VirtualAdjustment &Other) const {
+ return memcmp(this, &Other, sizeof(Other)) == 0;
+ }
+
+ bool isEmpty() const {
+ VirtualAdjustment Zero;
+ return Equals(Zero);
+ }
+
+ bool Less(const VirtualAdjustment &RHS) const {
+ return memcmp(this, &RHS, sizeof(RHS)) < 0;
+ }
+ } Virtual;
+
+ ThisAdjustment() : NonVirtual(0) {}
+
+ bool isEmpty() const { return !NonVirtual && Virtual.isEmpty(); }
+
+ friend bool operator==(const ThisAdjustment &LHS, const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Equals(RHS.Virtual);
+ }
+
+ friend bool operator!=(const ThisAdjustment &LHS, const ThisAdjustment &RHS) {
+ return !(LHS == RHS);
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS, const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual && LHS.Virtual.Less(RHS.Virtual);
+ }
+};
+
+/// The \c this pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// The \c this pointer adjustment.
+ ThisAdjustment This;
+
+ /// The return adjustment.
+ ReturnAdjustment Return;
+
+ /// Holds a pointer to the overridden method this thunk is for,
+ /// if needed by the ABI to distinguish different thunks with equal
+ /// adjustments. Otherwise, null.
+ /// CAUTION: In the unlikely event you need to sort ThunkInfos, consider using
+ /// an ABI-specific comparator.
+ const CXXMethodDecl *Method;
+
+ ThunkInfo() : Method(nullptr) {}
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return,
+ const CXXMethodDecl *Method = nullptr)
+ : This(This), Return(Return), Method(Method) {}
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return &&
+ LHS.Method == RHS.Method;
+ }
+
+ bool isEmpty() const {
+ return This.isEmpty() && Return.isEmpty() && Method == nullptr;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/clang/include/clang/Basic/TokenKinds.def b/clang/include/clang/Basic/TokenKinds.def
index 572ebae6618d..48a664e3494e 100644
--- a/clang/include/clang/Basic/TokenKinds.def
+++ b/clang/include/clang/Basic/TokenKinds.def
@@ -98,6 +98,8 @@ PPKEYWORD(if)
PPKEYWORD(ifdef)
PPKEYWORD(ifndef)
PPKEYWORD(elif)
+PPKEYWORD(elifdef)
+PPKEYWORD(elifndef)
PPKEYWORD(else)
PPKEYWORD(endif)
PPKEYWORD(defined)
@@ -694,11 +696,12 @@ ALIAS("_declspec" , __declspec , KEYMS)
ALIAS("_pascal" , __pascal , KEYBORLAND)
// Clang Extensions.
-KEYWORD(__builtin_convertvector , KEYALL)
-ALIAS("__char16_t" , char16_t , KEYCXX)
-ALIAS("__char32_t" , char32_t , KEYCXX)
-KEYWORD(__builtin_bit_cast , KEYALL)
-KEYWORD(__builtin_available , KEYALL)
+KEYWORD(__builtin_convertvector , KEYALL)
+ALIAS("__char16_t" , char16_t , KEYCXX)
+ALIAS("__char32_t" , char32_t , KEYCXX)
+KEYWORD(__builtin_bit_cast , KEYALL)
+KEYWORD(__builtin_available , KEYALL)
+KEYWORD(__builtin_sycl_unique_stable_name, KEYSYCL)
// Clang-specific keywords enabled only in testing.
TESTING_KEYWORD(__unknown_anytype , KEYALL)
@@ -860,6 +863,13 @@ PRAGMA_ANNOTATION(pragma_ms_pragma)
PRAGMA_ANNOTATION(pragma_opencl_extension)
// Annotations for OpenMP pragma directives - #pragma omp ...
+// The parser produces this annotation token when it parses an [[omp::*]]
+// attribute. The tokens from the attribute argument list are replayed to the
+// token stream with this leading token (and a trailing pragma_openmp_end) so
+// that the parser can reuse the OpenMP parsing logic but still be able to
+// distinguish between a real pragma and a converted pragma. It is not marked
+// as a PRAGMA_ANNOTATION because it doesn't get generated from a #pragma.
+ANNOTATION(attr_openmp)
// The lexer produces these so that they only take effect when the parser
// handles #pragma omp ... directives.
PRAGMA_ANNOTATION(pragma_openmp)
diff --git a/clang/include/clang/Basic/XRayInstr.h b/clang/include/clang/Basic/XRayInstr.h
index 42ca7773fcce..23ca2c75fc99 100644
--- a/clang/include/clang/Basic/XRayInstr.h
+++ b/clang/include/clang/Basic/XRayInstr.h
@@ -65,8 +65,13 @@ struct XRayInstrSet {
XRayInstrMask Mask = 0;
};
+/// Parses a command line argument into a mask.
XRayInstrMask parseXRayInstrValue(StringRef Value);
+/// Serializes a set into a list of command line arguments.
+void serializeXRayInstrValue(XRayInstrSet Set,
+ SmallVectorImpl<StringRef> &Values);
+
} // namespace clang
#endif // LLVM_CLANG_BASIC_XRAYINSTR_H
diff --git a/clang/include/clang/Basic/arm_mve.td b/clang/include/clang/Basic/arm_mve.td
index 8106f9a5a9de..52185ca07da4 100644
--- a/clang/include/clang/Basic/arm_mve.td
+++ b/clang/include/clang/Basic/arm_mve.td
@@ -575,7 +575,7 @@ defm vmaxnmavq: Reduction<Scalar, "maxnmav", [Scalar, Vector]>;
}
foreach half = [ "b", "t" ] in {
- defvar halfconst = !if(!eq(half, "b"), 0, 1);
+ defvar halfconst = !ne(half, "b");
let params = [f32], pnt = PNT_None in {
def vcvt#half#q_f16: Intrinsic<
@@ -1153,8 +1153,7 @@ defm vshlltq : vshll_imm<1>;
multiclass DyadicImmShift<Type outtype, Immediate imm, string intname = NAME,
dag extraargs = (?)> {
- defvar intparams = !if(!eq(!cast<string>(outtype), !cast<string>(Vector)),
- [Vector], [outtype, Vector]);
+ defvar intparams = !if(!eq(outtype, Vector), [Vector], [outtype, Vector]);
def q_n: Intrinsic<
outtype, (args outtype:$a, Vector:$b, imm:$sh),
@@ -1529,12 +1528,7 @@ let params = T.Usual in {
foreach desttype = T.All in {
// We want a vreinterpretq between every pair of supported vector types
// _except_ that there shouldn't be one from a type to itself.
- //
- // So this foldl expression implements what you'd write in Python as
- // [srctype for srctype in T.All if srctype != desttype]
- let params = !foldl([]<Type>, T.All, tlist, srctype, !listconcat(tlist,
- !if(!eq(!cast<string>(desttype),!cast<string>(srctype)),[],[srctype])))
- in {
+ let params = !filter(srctype, T.All, !ne(srctype, desttype)) in {
def "vreinterpretq_" # desttype: Intrinsic<
VecOf<desttype>, (args Vector:$x), (vreinterpret $x, VecOf<desttype>)>;
}
@@ -1543,7 +1537,7 @@ foreach desttype = T.All in {
let params = T.All in {
let pnt = PNT_None in {
def vcreateq: Intrinsic<Vector, (args u64:$a, u64:$b),
- (bitcast (ielt_const (ielt_const (undef VecOf<u64>), $a, 0),
+ (vreinterpret (ielt_const (ielt_const (undef VecOf<u64>), $a, 0),
$b, 1), Vector)>;
def vuninitializedq: Intrinsic<Vector, (args), (undef Vector)>;
}
@@ -1576,8 +1570,9 @@ foreach desttype = !listconcat(T.Int16, T.Int32, T.Float) in {
defvar is_dest_float = !eq(desttype.kind, "f");
defvar is_dest_unsigned = !eq(desttype.kind, "u");
// First immediate operand of the LLVM intrinsic
- defvar unsigned_flag = !if(is_dest_float, (unsignedflag Scalar),
- !if(is_dest_unsigned, V.True, V.False));
+ defvar unsigned_flag = !cond(is_dest_float: (unsignedflag Scalar),
+ is_dest_unsigned: V.True,
+ true: V.False);
// For float->int conversions _n and _x_n intrinsics are not polymorphic
// because the signedness of the destination type cannot be inferred.
defvar pnt_nx = !if(is_dest_float, PNT_2Type, PNT_None);
diff --git a/clang/include/clang/Basic/arm_neon.td b/clang/include/clang/Basic/arm_neon.td
index 3b2a578f796e..173003d171ee 100644
--- a/clang/include/clang/Basic/arm_neon.td
+++ b/clang/include/clang/Basic/arm_neon.td
@@ -646,7 +646,7 @@ def VABS : SInst<"vabs", "..", "csifQcQsQiQf">;
def VQABS : SInst<"vqabs", "..", "csiQcQsQi">;
def VNEG : SOpInst<"vneg", "..", "csifQcQsQiQf", OP_NEG>;
def VQNEG : SInst<"vqneg", "..", "csiQcQsQi">;
-def VCLS : SInst<"vcls", "..", "csiQcQsQi">;
+def VCLS : SInst<"vcls", "S.", "csiUcUsUiQcQsQiQUcQUsQUi">;
def VCLZ : IInst<"vclz", "..", "csiUcUsUiQcQsQiQUcQUsQUi">;
def VCNT : WInst<"vcnt", "..", "UccPcQUcQcQPc">;
def VRECPE : SInst<"vrecpe", "..", "fUiQfQUi">;
@@ -709,6 +709,11 @@ def SCALAR_HALF_GET_LANEQ : IOpInst<"vget_lane", "1.I", "Qh", OP_SCALAR_HALF_GET
def SCALAR_HALF_SET_LANEQ : IOpInst<"vset_lane", ".1.I", "Qh", OP_SCALAR_HALF_SET_LNQ>;
////////////////////////////////////////////////////////////////////////////////
+// Non poly128_t vaddp for Arm and AArch64
+// TODO: poly128_t not implemented on arm32
+def VADDP : WInst<"vadd", "...", "PcPsPlQPcQPsQPl">;
+
+////////////////////////////////////////////////////////////////////////////////
// AArch64 Intrinsics
let ArchGuard = "defined(__aarch64__)" in {
@@ -1117,12 +1122,14 @@ def VEXT_A64 : WInst<"vext", "...I", "dQdPlQPl">;
////////////////////////////////////////////////////////////////////////////////
// Crypto
-let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_CRYPTO)" in {
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_AES)" in {
def AESE : SInst<"vaese", "...", "QUc">;
def AESD : SInst<"vaesd", "...", "QUc">;
def AESMC : SInst<"vaesmc", "..", "QUc">;
def AESIMC : SInst<"vaesimc", "..", "QUc">;
+}
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA2)" in {
def SHA1H : SInst<"vsha1h", "11", "Ui">;
def SHA1SU1 : SInst<"vsha1su1", "...", "QUi">;
def SHA256SU0 : SInst<"vsha256su0", "...", "QUi">;
@@ -1136,6 +1143,43 @@ def SHA256H2 : SInst<"vsha256h2", "....", "QUi">;
def SHA256SU1 : SInst<"vsha256su1", "....", "QUi">;
}
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA3) && defined(__aarch64__)" in {
+def BCAX : SInst<"vbcax", "....", "QUcQUsQUiQUlQcQsQiQl">;
+def EOR3 : SInst<"veor3", "....", "QUcQUsQUiQUlQcQsQiQl">;
+def RAX1 : SInst<"vrax1", "...", "QUl">;
+
+let isVXAR = 1 in {
+def XAR : SInst<"vxar", "...I", "QUl">;
+}
+}
+
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SHA512) && defined(__aarch64__)" in {
+
+def SHA512SU0 : SInst<"vsha512su0", "...", "QUl">;
+def SHA512su1 : SInst<"vsha512su1", "....", "QUl">;
+def SHA512H : SInst<"vsha512h", "....", "QUl">;
+def SHA512H2 : SInst<"vsha512h2", "....", "QUl">;
+}
+
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM3) && defined(__aarch64__)" in {
+def SM3SS1 : SInst<"vsm3ss1", "....", "QUi">;
+def SM3TT1A : SInst<"vsm3tt1a", "....I", "QUi">;
+def SM3TT1B : SInst<"vsm3tt1b", "....I", "QUi">;
+def SM3TT2A : SInst<"vsm3tt2a", "....I", "QUi">;
+def SM3TT2B : SInst<"vsm3tt2b", "....I", "QUi">;
+def SM3PARTW1 : SInst<"vsm3partw1", "....", "QUi">;
+def SM3PARTW2 : SInst<"vsm3partw2", "....", "QUi">;
+}
+
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__ARM_FEATURE_SM4) && defined(__aarch64__)" in {
+def SM4E : SInst<"vsm4e", "...", "QUi">;
+def SM4EKEY : SInst<"vsm4ekey", "...", "QUi">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// poly128_t vadd for AArch64 only see VADDP for the rest
+def VADDP_Q : WInst<"vadd", "...", "QPk">;
+
////////////////////////////////////////////////////////////////////////////////
// Float -> Int conversions with explicit rounding mode
@@ -1184,6 +1228,13 @@ def FRINTZ_S64 : SInst<"vrnd", "..", "dQd">;
def FRINTI_S64 : SInst<"vrndi", "..", "dQd">;
}
+let ArchGuard = "__ARM_ARCH >= 8 && defined(__aarch64__) && defined(__ARM_FEATURE_FRINT)" in {
+def FRINT32X_S32 : SInst<"vrnd32x", "..", "fQf">;
+def FRINT32Z_S32 : SInst<"vrnd32z", "..", "fQf">;
+def FRINT64X_S32 : SInst<"vrnd64x", "..", "fQf">;
+def FRINT64Z_S32 : SInst<"vrnd64z", "..", "fQf">;
+}
+
////////////////////////////////////////////////////////////////////////////////
// MaxNum/MinNum Floating Point
@@ -1838,7 +1889,7 @@ let ArchGuard = "defined(__ARM_FEATURE_DOTPROD) && defined(__aarch64__)" in {
}
// v8.2-A FP16 fused multiply-add long instructions.
-let ArchGuard = "defined(__ARM_FEATURE_FP16FML) && defined(__aarch64__)" in {
+let ArchGuard = "defined(__ARM_FEATURE_FP16_FML) && defined(__aarch64__)" in {
def VFMLAL_LOW : SInst<"vfmlal_low", ">>..", "hQh">;
def VFMLSL_LOW : SInst<"vfmlsl_low", ">>..", "hQh">;
def VFMLAL_HIGH : SInst<"vfmlal_high", ">>..", "hQh">;
diff --git a/clang/include/clang/Basic/arm_neon_incl.td b/clang/include/clang/Basic/arm_neon_incl.td
index dd20b70433ef..60dbea627d58 100644
--- a/clang/include/clang/Basic/arm_neon_incl.td
+++ b/clang/include/clang/Basic/arm_neon_incl.td
@@ -272,6 +272,7 @@ class Inst <string n, string p, string t, Operation o> {
bit isScalarShift = 0;
bit isScalarNarrowShift = 0;
bit isVCVT_N = 0;
+ bit isVXAR = 0;
// For immediate checks: the immediate will be assumed to specify the lane of
// a Q register. Only used for intrinsics which end up calling polymorphic
// builtins.
diff --git a/clang/include/clang/Basic/arm_sve.td b/clang/include/clang/Basic/arm_sve.td
index 19a42e79c36a..5e9d1c96558b 100644
--- a/clang/include/clang/Basic/arm_sve.td
+++ b/clang/include/clang/Basic/arm_sve.td
@@ -668,16 +668,16 @@ let ArchGuard = "defined(__ARM_FEATURE_SVE_BF16)" in {
// Prefetches
// Prefetch (Scalar base)
-def SVPRFB : MInst<"svprfb", "vPcJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
-def SVPRFH : MInst<"svprfh", "vPcJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
-def SVPRFW : MInst<"svprfw", "vPcJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
-def SVPRFD : MInst<"svprfd", "vPcJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+def SVPRFB : MInst<"svprfb", "vPQJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH : MInst<"svprfh", "vPQJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW : MInst<"svprfw", "vPQJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD : MInst<"svprfd", "vPQJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
// Prefetch (Scalar base, VL displacement)
-def SVPRFB_VNUM : MInst<"svprfb_vnum", "vPclJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
-def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPclJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
-def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPclJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
-def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPclJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
+def SVPRFB_VNUM : MInst<"svprfb_vnum", "vPQlJ", "c", [IsPrefetch], MemEltTyInt8, "aarch64_sve_prf">;
+def SVPRFH_VNUM : MInst<"svprfh_vnum", "vPQlJ", "s", [IsPrefetch], MemEltTyInt16, "aarch64_sve_prf">;
+def SVPRFW_VNUM : MInst<"svprfw_vnum", "vPQlJ", "i", [IsPrefetch], MemEltTyInt32, "aarch64_sve_prf">;
+def SVPRFD_VNUM : MInst<"svprfd_vnum", "vPQlJ", "l", [IsPrefetch], MemEltTyInt64, "aarch64_sve_prf">;
// Prefetch (Vector bases)
def SVPRFB_GATHER_BASES : MInst<"svprfb_gather[_{2}base]", "vPdJ", "UiUl", [IsGatherPrefetch], MemEltTyInt8, "aarch64_sve_prfb_gather_scalar_offset">;
@@ -2073,7 +2073,7 @@ def SVSM4E : SInst<"svsm4e[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm
def SVSM4EKEY : SInst<"svsm4ekey[_{d}]", "ddd", "Ui", MergeNone, "aarch64_sve_sm4ekey", [IsOverloadNone]>;
}
-let ArchGuard = "__ARM_FEATURE_SVE2_BITPERM" in {
+let ArchGuard = "defined (__ARM_FEATURE_SVE2_BITPERM)" in {
def SVBDEP : SInst<"svbdep[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
def SVBDEP_N : SInst<"svbdep[_n_{d}]", "dda", "UcUsUiUl", MergeNone, "aarch64_sve_bdep_x">;
def SVBEXT : SInst<"svbext[_{d}]", "ddd", "UcUsUiUl", MergeNone, "aarch64_sve_bext_x">;
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
new file mode 100644
index 000000000000..48c032dd1422
--- /dev/null
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -0,0 +1,2112 @@
+//==--- riscv_vector.td - RISC-V V-ext Builtin function list --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the builtins for RISC-V V-extension. See:
+//
+// https://github.com/riscv/rvv-intrinsic-doc
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction definitions
+//===----------------------------------------------------------------------===//
+// Each record of the class RVVBuiltin defines a collection of builtins (i.e.
+// "def vadd : RVVBuiltin" will be used to define things like "vadd_vv_i32m1",
+// "vadd_vv_i32m2", etc).
+//
+// The elements of this collection are defined by an instantiation process the
+// range of which is specified by the cross product of the LMUL attribute and
+// every element in the attribute TypeRange. By default builtins have LMUL = [1,
+// 2, 4, 8, 1/2, 1/4, 1/8] so the process is repeated 7 times. In tablegen we
+// use the Log2LMUL [0, 1, 2, 3, -1, -2, -3] to represent the LMUL.
+//
+// LMUL represents the fact that the types of values used by that builtin are
+// values generated by instructions that are executed under that LMUL. However,
+// this does not mean the builtin is necessarily lowered into an instruction
+// that executes under the specified LMUL. An example where this happens are
+// loads and stores of masks. A mask like `vbool8_t` can be generated, for
+// instance, by comparing two `__rvv_int8m1_t` (this is LMUL=1) or comparing two
+// `__rvv_int16m2_t` (this is LMUL=2). The actual load or store, however, will
+// be performed under LMUL=1 because mask registers are not grouped.
+//
+// TypeRange is a non-empty sequence of basic types:
+//
+// c: int8_t (i8)
+// s: int16_t (i16)
+// i: int32_t (i32)
+// l: int64_t (i64)
+// x: float16_t (half)
+// f: float32_t (float)
+// d: float64_t (double)
+//
+// This way, given an LMUL, a record with a TypeRange "sil" will cause the
+// definition of 3 builtins. Each type "t" in the TypeRange (in this example
+// they are int16_t, int32_t, int64_t) is used as a parameter that drives the
+// definition of that particular builtin (for the given LMUL).
+//
+// During the instantiation, types can be transformed or modified using type
+// transformers. Given a type "t" the following primitive type transformers can
+// be applied to it to yield another type.
+//
+// e: type of "t" as is (identity)
+// v: computes a vector type whose element type is "t" for the current LMUL
+// w: computes a vector type identical to what 'v' computes except for the
+// element type which is twice as wide as the element type of 'v'
+// q: computes a vector type identical to what 'v' computes except for the
+// element type which is four times as wide as the element type of 'v'
+// o: computes a vector type identical to what 'v' computes except for the
+// element type which is eight times as wide as the element type of 'v'
+// m: computes a vector type identical to what 'v' computes except for the
+// element type which is bool
+// 0: void type, ignores "t"
+// z: size_t, ignores "t"
+// t: ptrdiff_t, ignores "t"
+// u: unsigned long, ignores "t"
+// l: long, ignores "t"
+//
+// So for instance if t is "i", i.e. int, then "e" will yield int again. "v"
+// will yield an RVV vector type (assume LMUL=1), so __rvv_int32m1_t.
+// Accordingly "w" would yield __rvv_int64m2_t.
+//
+// A type transformer can be prefixed by other non-primitive type transformers.
+//
+// P: constructs a pointer to the current type
+// C: adds const to the type
+// K: requires the integer type to be a constant expression
+// U: given an integer type or vector type, computes its unsigned variant
+// I: given a vector type, compute the vector type with integer type
+// elements of the same width
+// F: given a vector type, compute the vector type with floating-point type
+// elements of the same width
+// S: given a vector type, computes its equivalent one for LMUL=1. This is a
+// no-op if the vector was already LMUL=1
+// (Log2EEW:Value): Log2EEW value could be 3/4/5/6 (8/16/32/64), given a
+// vector type (SEW and LMUL) and EEW (8/16/32/64), computes its
+// equivalent integer vector type with EEW and corresponding ELMUL (elmul =
+// (eew/sew) * lmul). For example, vector type is __rvv_float16m4
+// (SEW=16, LMUL=4) and Log2EEW is 3 (EEW=8), and then equivalent vector
+// type is __rvv_uint8m2_t (elmul=(8/16)*4 = 2). Ignore to define a new
+// builtins if its equivalent type has illegal lmul.
+// (FixedSEW:Value): Given a vector type (SEW and LMUL), and computes another
+// vector type which only changed SEW as given value. Ignore to define a new
+// builtin if its equivalent type has illegal lmul or the SEW does not changed.
+// (SFixedLog2LMUL:Value): Smaller Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be smaller than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+// (LFixedLog2LMUL:Value): Larger Fixed Log2LMUL. Given a vector type (SEW
+// and LMUL), and computes another vector type which only changed LMUL as
+// given value. The new LMUL should be larger than the old one. Ignore to
+// define a new builtin if its equivalent type has illegal lmul.
+//
+// Following with the example above, if t is "i", then "Ue" will yield unsigned
+// int and "Fv" will yield __rvv_float32m1_t (again assuming LMUL=1), Fw would
+// yield __rvv_float64m2_t, etc.
+//
+// Each builtin is then defined by applying each type in TypeRange against the
+// sequence of type transformers described in Suffix and Prototype.
+//
+// The name of the builtin is defined by the Name attribute (which defaults to
+// the name of the class) appended (separated with an underscore) the Suffix
+// attribute. For instance with Name="foo", Suffix = "v" and TypeRange = "il",
+// the builtin generated will be __builtin_rvv_foo_i32m1 and
+// __builtin_rvv_foo_i64m1 (under LMUL=1). If Suffix contains more than one
+// type transformer (say "vv") each of the types is separated with an
+// underscore as in "__builtin_rvv_foo_i32m1_i32m1".
+//
+// The C/C++ prototype of the builtin is defined by the Prototype attribute.
+// Prototype is a non-empty sequence of type transformers, the first of which
+// is the return type of the builtin and the rest are the parameters of the
+// builtin, in order. For instance if Prototype is "wvv" and TypeRange is "si"
+// a first builtin will have type
+// __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t) and the second builtin
+// will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t) (again
+// under LMUL=1).
+//
+// There are a number of attributes that are used to constraint the number and
+// shape of the builtins generated. Refer to the comments below for them.
+class RVVBuiltin<string suffix, string prototype, string type_range,
+ string mangled_suffix = ""> {
+ // Base name that will be prepended in __builtin_rvv_ and appended the
+ // computed Suffix.
+ string Name = NAME;
+
+ // If not empty, each instantiated builtin will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string Suffix = suffix;
+
+ // If empty, default MangledName is sub string of `Name` which end of first
+ // '_'. For example, the default mangled name is `vadd` for Name `vadd_vv`.
+ // It's used for describe some special naming cases.
+ string MangledName = "";
+
+ // If not empty, each MangledName will have this appended after an
+ // underscore (_). It is instantiated like Prototype.
+ string MangledSuffix = mangled_suffix;
+
+ // The different variants of the builtin, parameterised with a type.
+ string TypeRange = type_range;
+
+ // We use each type described in TypeRange and LMUL with prototype to
+ // instantiate a specific element of the set of builtins being defined.
+ // Prototype attribute defines the C/C++ prototype of the builtin. It is a
+ // non-empty sequence of type transformers, the first of which is the return
+ // type of the builtin and the rest are the parameters of the builtin, in
+ // order. For instance if Prototype is "wvv", TypeRange is "si" and LMUL=1, a
+ // first builtin will have type
+ // __rvv_int32m2_t (__rvv_int16m1_t, __rvv_int16m1_t), and the second builtin
+ // will have type __rvv_int64m2_t (__rvv_int32m1_t, __rvv_int32m1_t).
+ string Prototype = prototype;
+
+ // This builtin has a masked form.
+ bit HasMask = true;
+
+ // If HasMask, this flag states that this builtin has a maskedoff operand. It
+ // is always the first operand in builtin and IR intrinsic.
+ bit HasMaskedOffOperand = true;
+
+ // This builtin has a granted vector length parameter in the last position.
+ bit HasVL = true;
+
+ // This builtin supports non-masked function overloading api.
+ // All masked operations support overloading api.
+ bit HasNoMaskedOverloaded = true;
+
+ // Reads or writes "memory" or has other side-effects.
+ bit HasSideEffects = false;
+
+ // This builtin is valid for the given Log2LMULs.
+ list<int> Log2LMUL = [0, 1, 2, 3, -1, -2, -3];
+
+ // Manual code in clang codegen riscv_vector_builtin_cg.inc
+ code ManualCodegen = [{}];
+ code ManualCodegenMask = [{}];
+
+ // When emit the automatic clang codegen, it describes what types we have to use
+ // to obtain the specific LLVM intrinsic. -1 means the return type, otherwise,
+ // k >= 0 meaning the k-th operand (counting from zero) of the codegen'd
+ // parameter of the unmasked version. k can't be the mask operand's position.
+ list<int> IntrinsicTypes = [];
+
+ // If these names are not empty, this is the ID of the LLVM intrinsic
+ // we want to lower to.
+ string IRName = NAME;
+
+ // If HasMask, this is the ID of the LLVM intrinsic we want to lower to.
+ string IRNameMask = NAME #"_mask";
+
+ // If non empty, this is the code emitted in the header, otherwise
+ // an automatic definition in header is emitted.
+ string HeaderCode = "";
+
+ // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg.
+ string RequiredExtension = "";
+
+ // Number of fields for Zvlsseg.
+ int NF = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Basic classes with automatic codegen.
+//===----------------------------------------------------------------------===//
+
+class RVVOutBuiltin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1];
+}
+
+class RVVOp0Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [0];
+}
+
+class RVVOutOp1Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 1];
+}
+
+class RVVOutOp0Op1Builtin<string suffix, string prototype, string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 0, 1];
+}
+
+multiclass RVVBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes,
+ list<int> intrinsic_types> {
+ let IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask",
+ IntrinsicTypes = intrinsic_types in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVBuiltin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+// IntrinsicTypes is output, op0, op1 [-1, 0, 1]
+multiclass RVVOutOp0Op1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes,
+ [-1, 0, 1]>;
+
+multiclass RVVOutBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1]>;
+
+multiclass RVVOp0BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0]>;
+
+// IntrinsicTypes is output, op1 [-1, 1]
+multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1]>;
+
+multiclass RVVOp0Op1BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0, 1]>;
+
+multiclass RVVOutOp1Op2BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes>
+ : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 1, 2]>;
+
+multiclass RVVSignedBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvv"],
+ ["vx", "v", "vve"]]>;
+
+multiclass RVVUnsignedBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvUe"]]>;
+
+multiclass RVVIntBinBuiltinSet
+ : RVVSignedBinBuiltinSet,
+ RVVUnsignedBinBuiltinSet;
+
+multiclass RVVSlideOneBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vx", "v", "vve"],
+ ["vx", "Uv", "UvUve"]]>;
+
+multiclass RVVSignedShiftBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvz"]]>;
+
+multiclass RVVUnsignedShiftBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "Uv", "UvUvUv"],
+ ["vx", "Uv", "UvUvz"]]>;
+
+multiclass RVVShiftBuiltinSet
+ : RVVSignedShiftBuiltinSet,
+ RVVUnsignedShiftBuiltinSet;
+
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ multiclass RVVSignedNShiftBuiltinSet
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "v", "vwUv"],
+ ["wx", "v", "vwz"]]>;
+ multiclass RVVUnsignedNShiftBuiltinSet
+ : RVVOutOp0Op1BuiltinSet<NAME, "csil",
+ [["wv", "Uv", "UvUwUv"],
+ ["wx", "Uv", "UvUwz"]]>;
+}
+
+multiclass RVVCarryinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vvm", "v", "vvvm"],
+ ["vxm", "v", "vvem"],
+ ["vvm", "Uv", "UvUvUvm"],
+ ["vxm", "Uv", "UvUvUem"]]>;
+
+multiclass RVVCarryOutInBuiltinSet<string intrinsic_name>
+ : RVVOp0Op1BuiltinSet<intrinsic_name, "csil",
+ [["vvm", "vm", "mvvm"],
+ ["vxm", "vm", "mvem"],
+ ["vvm", "Uvm", "mUvUvm"],
+ ["vxm", "Uvm", "mUvUem"]]>;
+
+multiclass RVVSignedMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "csil",
+ [["vv", "vm", "mvv"],
+ ["vx", "vm", "mve"]]>;
+
+multiclass RVVUnsignedMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "csil",
+ [["vv", "Uvm", "mUvUv"],
+ ["vx", "Uvm", "mUvUe"]]>;
+
+multiclass RVVIntMaskOutBuiltinSet
+ : RVVSignedMaskOutBuiltinSet,
+ RVVUnsignedMaskOutBuiltinSet;
+
+class RVVIntExt<string intrinsic_name, string suffix, string prototype,
+ string type_range>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IRName = intrinsic_name;
+ let IRNameMask = intrinsic_name # "_mask";
+ let MangledName = NAME;
+ let IntrinsicTypes = [-1, 0];
+}
+
+let HasMaskedOffOperand = false in {
+ multiclass RVVIntTerBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vv", "v", "vvvv"],
+ ["vx", "v", "vvev"],
+ ["vv", "Uv", "UvUvUvUv"],
+ ["vx", "Uv", "UvUvUeUv"]]>;
+ }
+ multiclass RVVFloatingTerBuiltinSet {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvvv"],
+ ["vf", "v", "vvev"]]>;
+ }
+}
+
+let HasMaskedOffOperand = false, Log2LMUL = [-2, -1, 0, 1, 2] in {
+ multiclass RVVFloatingWidenTerBuiltinSet {
+ defm "" : RVVOutOp1Op2BuiltinSet<NAME, "xf",
+ [["vv", "w", "wwvv"],
+ ["vf", "w", "wwev"]]>;
+ }
+}
+
+multiclass RVVFloatingBinBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vv", "v", "vvv"],
+ ["vf", "v", "vve"]]>;
+
+multiclass RVVFloatingBinVFBuiltinSet
+ : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vf", "v", "vve"]]>;
+
+multiclass RVVFloatingMaskOutBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "xfd",
+ [["vv", "vm", "mvv"],
+ ["vf", "vm", "mve"]]>;
+
+multiclass RVVFloatingMaskOutVFBuiltinSet
+ : RVVOp0Op1BuiltinSet<NAME, "fd",
+ [["vf", "vm", "mve"]]>;
+
+class RVVMaskBinBuiltin : RVVOutBuiltin<"m", "mmm", "c"> {
+ let Name = NAME # "_mm";
+ let HasMask = false;
+}
+
+class RVVMaskUnaryBuiltin : RVVOutBuiltin<"m", "mm", "c"> {
+ let Name = NAME # "_m";
+}
+
+class RVVMaskNullaryBuiltin : RVVOutBuiltin<"m", "m", "c"> {
+ let Name = NAME # "_m";
+ let HasMask = false;
+ let HasNoMaskedOverloaded = false;
+}
+
+class RVVMaskOp0Builtin<string prototype> : RVVOp0Builtin<"m", prototype, "c"> {
+ let Name = NAME # "_m";
+ let HasMaskedOffOperand = false;
+}
+
+let HasMaskedOffOperand = false in {
+ multiclass RVVSlideBuiltinSet {
+ defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
+ [["vx","v", "vvvz"]]>;
+ defm "" : RVVOutBuiltinSet<NAME, "csil",
+ [["vx","Uv", "UvUvUvz"]]>;
+ }
+}
+
+class RVVFloatingUnaryBuiltin<string builtin_suffix, string ir_suffix,
+ string prototype>
+ : RVVOutBuiltin<ir_suffix, prototype, "xfd"> {
+ let Name = NAME # "_" # builtin_suffix;
+}
+
+class RVVFloatingUnaryVVBuiltin : RVVFloatingUnaryBuiltin<"v", "v", "vv">;
+
+class RVVConvBuiltin<string suffix, string prototype, string type_range,
+ string mangled_name>
+ : RVVBuiltin<suffix, prototype, type_range> {
+ let IntrinsicTypes = [-1, 0];
+ let MangledName = mangled_name;
+}
+
+class RVVConvToSignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Iv", "Ivv", "xfd", mangled_name>;
+
+class RVVConvToUnsignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Uv", "Uvv", "xfd", mangled_name>;
+
+class RVVConvToWidenSignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Iw", "Iwv", "xf", mangled_name>;
+
+class RVVConvToWidenUnsignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Uw", "Uwv", "xf", mangled_name>;
+
+class RVVConvToNarrowingSignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Iv", "IvFw", "csi", mangled_name>;
+
+class RVVConvToNarrowingUnsignedBuiltin<string mangled_name>
+ : RVVConvBuiltin<"Uv", "UvFw", "csi", mangled_name>;
+
+let HasMaskedOffOperand = false in {
+ multiclass RVVSignedReductionBuiltin {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vs", "vSv", "SvSvvSv"]]>;
+ }
+ multiclass RVVUnsignedReductionBuiltin {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
+ [["vs", "UvUSv", "USvUSvUvUSv"]]>;
+ }
+ multiclass RVVFloatingReductionBuiltin {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
+ [["vs", "vSv", "SvSvvSv"]]>;
+ }
+ multiclass RVVFloatingWidenReductionBuiltin {
+ defm "" : RVVOutOp1BuiltinSet<NAME, "xf",
+ [["vs", "vSw", "SwSwvSw"]]>;
+ }
+}
+
+multiclass RVVIntReductionBuiltinSet
+ : RVVSignedReductionBuiltin,
+ RVVUnsignedReductionBuiltin;
+
+// For widen operation which has different mangling name.
+multiclass RVVWidenBuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+ IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0],
+ MangledName = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVOutOp0Op1Builtin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+// For widen operation with widen operand which has different mangling name.
+multiclass RVVWidenWOp0BuiltinSet<string intrinsic_name, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Log2LMUL = [-3, -2, -1, 0, 1, 2],
+ IRName = intrinsic_name, IRNameMask = intrinsic_name # "_mask" in {
+ foreach s_p = suffixes_prototypes in {
+ let Name = NAME # "_" # s_p[0],
+ MangledName = NAME # "_" # s_p[0] in {
+ defvar suffix = s_p[1];
+ defvar prototype = s_p[2];
+ def : RVVOutOp1Builtin<suffix, prototype, type_range>;
+ }
+ }
+ }
+}
+
+multiclass RVVSignedWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "csi",
+ [["vv", "w", "wvv"],
+ ["vx", "w", "wve"]]>;
+
+multiclass RVVSignedWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
+ [["wv", "w", "wwv"],
+ ["wx", "w", "wwe"]]>;
+
+multiclass RVVUnsignedWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "csi",
+ [["vv", "Uw", "UwUvUv"],
+ ["vx", "Uw", "UwUvUe"]]>;
+
+multiclass RVVUnsignedWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "csi",
+ [["wv", "Uw", "UwUwUv"],
+ ["wx", "Uw", "UwUwUe"]]>;
+
+multiclass RVVFloatingWidenBinBuiltinSet
+ : RVVWidenBuiltinSet<NAME, "xf",
+ [["vv", "w", "wvv"],
+ ["vf", "w", "wve"]]>;
+
+multiclass RVVFloatingWidenOp0BinBuiltinSet
+ : RVVWidenWOp0BuiltinSet<NAME # "_w", "xf",
+ [["wv", "w", "wwv"],
+ ["wf", "w", "wwe"]]>;
+
+defvar TypeList = ["c","s","i","l","x","f","d"];
+defvar EEWList = [["8", "(Log2EEW:3)"],
+ ["16", "(Log2EEW:4)"],
+ ["32", "(Log2EEW:5)"],
+ ["64", "(Log2EEW:6)"]];
+
+class IsFloat<string type> {
+ bit val = !or(!eq(type, "x"), !eq(type, "f"), !eq(type, "d"));
+}
+
+let HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ IntrinsicTypes = {ResultType, Ops[1]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }],
+ ManualCodegenMask= [{
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {ResultType, Ops[3]->getType()};
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
+ }] in {
+ class RVVVLEMaskBuiltin : RVVBuiltin<"m", "mPCUe", "c"> {
+ let Name = "vle1_v";
+ let IRName = "vle1";
+ let HasMask = false;
+ }
+ multiclass RVVVLEBuiltin<list<string> types> {
+ let Name = NAME # "_v",
+ IRName = "vle",
+ IRNameMask ="vle_mask" in {
+ foreach type = types in {
+ def : RVVBuiltin<"v", "vPCe", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "UvPCUe", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVVLEFFBuiltin<list<string> types> {
+ let Name = NAME # "_v",
+ IRName = "vleff",
+ IRNameMask = "vleff_mask",
+ HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ {
+ IntrinsicTypes = {ResultType, Ops[2]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ Value *NewVL = Ops[1];
+ Ops.erase(Ops.begin() + 1);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
+ // Store new_vl.
+ clang::CharUnits Align =
+ CGM.getNaturalTypeAlignment(getContext().getSizeType());
+ Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {1}),
+ Address(NewVL, Align));
+ return V;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {ResultType, Ops[4]->getType()};
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
+ Value *NewVL = Ops[2];
+ Ops.erase(Ops.begin() + 2);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Ops, "");
+ llvm::Value *V = Builder.CreateExtractValue(LoadValue, {0});
+ // Store new_vl.
+ clang::CharUnits Align =
+ CGM.getNaturalTypeAlignment(getContext().getSizeType());
+ Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {1}),
+ Address(NewVL, Align));
+ return V;
+ }
+ }] in {
+ foreach type = types in {
+ def : RVVBuiltin<"v", "vPCePz", type>;
+ // Skip floating types for unsigned versions.
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "UvPCUePz", type>;
+ }
+ }
+ }
+}
+
+multiclass RVVVLSEBuiltin<list<string> types> {
+ let Name = NAME # "_v",
+ IRName = "vlse",
+ IRNameMask ="vlse_mask",
+ HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ IntrinsicTypes = {ResultType, Ops[2]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }],
+ ManualCodegenMask= [{
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {ResultType, Ops[4]->getType()};
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
+ }] in {
+ foreach type = types in {
+ def : RVVBuiltin<"v", "vPCet", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "UvPCUet", type>;
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedLoad<string op> {
+ let ManualCodegen = [{
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[2]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }],
+ ManualCodegenMask = [{
+ // Move mask to right before vl.
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops[4]->getType()};
+ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
+ }] in {
+ foreach type = TypeList in {
+ foreach eew_list = EEWList in {
+ defvar eew = eew_list[0];
+ defvar eew_type = eew_list[1];
+ let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
+ def: RVVBuiltin<"v", "vPCe" # eew_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def: RVVBuiltin<"Uv", "UvPCUe" # eew_type # "Uv", type>;
+ }
+ }
+ }
+ }
+ }
+}
+
+let HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ // Builtin: (ptr, value, vl). Intrinsic: (value, ptr, vl)
+ std::swap(Ops[0], Ops[1]);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType()};
+ }],
+ ManualCodegenMask= [{
+ // Builtin: (mask, ptr, value, vl). Intrinsic: (value, ptr, mask, vl)
+ std::swap(Ops[0], Ops[2]);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
+ }] in {
+ class RVVVSEMaskBuiltin : RVVBuiltin<"m", "0PUem", "c"> {
+ let Name = "vse1_v";
+ let IRName = "vse1";
+ let HasMask = false;
+ }
+ multiclass RVVVSEBuiltin<list<string> types> {
+ let Name = NAME # "_v",
+ IRName = "vse",
+ IRNameMask = "vse_mask" in {
+ foreach type = types in {
+ def : RVVBuiltin<"v", "0Pev", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUeUv", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVVSSEBuiltin<list<string> types> {
+ let Name = NAME # "_v",
+ IRName = "vsse",
+ IRNameMask = "vsse_mask",
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ // Builtin: (ptr, stride, value, vl). Intrinsic: (value, ptr, stride, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[3]->getType()};
+ }],
+ ManualCodegenMask= [{
+ // Builtin: (mask, ptr, stride, value, vl). Intrinsic: (value, ptr, stride, mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[4]->getType()};
+ }] in {
+ foreach type = types in {
+ def : RVVBuiltin<"v", "0Petv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUetUv", type>;
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedStore<string op> {
+ let HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ // Builtin: (ptr, index, value, vl). Intrinsic: (value, ptr, index, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.begin() + 3);
+ Ops[1] = Builder.CreateBitCast(Ops[1],Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[3]->getType()};
+ }],
+ ManualCodegenMask= [{
+ // Builtin: (mask, ptr, index, value, vl). Intrinsic: (value, ptr, index, mask, vl)
+ std::swap(Ops[0], Ops[3]);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType()->getPointerTo());
+ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
+ }] in {
+ foreach type = TypeList in {
+ foreach eew_list = EEWList in {
+ defvar eew = eew_list[0];
+ defvar eew_type = eew_list[1];
+ let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
+ def : RVVBuiltin<"v", "0Pe" # eew_type # "Uvv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUe" # eew_type # "UvUv", type>;
+ }
+ }
+ }
+ }
+ }
+}
+
+defvar NFList = [2, 3, 4, 5, 6, 7, 8];
+
+class PVString<int nf, bit signed> {
+ string S =
+ !cond(!eq(nf, 2): !if(signed, "PvPv", "PUvPUv"),
+ !eq(nf, 3): !if(signed, "PvPvPv", "PUvPUvPUv"),
+ !eq(nf, 4): !if(signed, "PvPvPvPv", "PUvPUvPUvPUv"),
+ !eq(nf, 5): !if(signed, "PvPvPvPvPv", "PUvPUvPUvPUvPUv"),
+ !eq(nf, 6): !if(signed, "PvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUv"),
+ !eq(nf, 7): !if(signed, "PvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUv"),
+ !eq(nf, 8): !if(signed, "PvPvPvPvPvPvPvPv", "PUvPUvPUvPUvPUvPUvPUvPUv"));
+}
+
+multiclass RVVUnitStridedSegLoad<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ {
+ // builtin: (val0 address, val1 address, ..., ptr, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[NF + 1]->getType()};
+ // intrinsic: (ptr, vl)
+ llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1]};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, vl)
+ // intrinsic: (maskedoff0, ..., ptr, mask, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[2 * NF + 2]->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Ops[NF + I + 1]);
+ Operands.push_back(Ops[2 * NF + 1]);
+ Operands.push_back(Ops[NF]);
+ Operands.push_back(Ops[2 * NF + 2]);
+ assert(Operands.size() == NF + 3);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }] in {
+ defvar PV = PVString<nf, /*signed=*/true>.S;
+ defvar PUV = PVString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0" # PV # "PCe", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0" # PUV # "PCUe", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVUnitStridedSegLoadFF<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "ff_v",
+ IRName = op # nf # "ff",
+ IRNameMask = op # nf # "ff_mask",
+ NF = nf,
+ HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ {
+ // builtin: (val0 address, val1 address, ..., ptr, new_vl, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[NF + 2]->getType()};
+ // intrinsic: (ptr, vl)
+ llvm::Value *Operands[] = {Ops[NF], Ops[NF + 2]};
+ Value *NewVL = Ops[NF + 1];
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ for (unsigned I = 0; I < NF; ++I) {
+ Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ // Store new_vl.
+ return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}),
+ Address(NewVL, Align));
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, new_vl, vl)
+ // intrinsic: (maskedoff0, ..., ptr, mask, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[2 * NF + 3]->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Ops[NF + I + 1]);
+ Operands.push_back(Ops[2 * NF + 1]);
+ Operands.push_back(Ops[NF]);
+ Operands.push_back(Ops[2 * NF + 3]);
+ Value *NewVL = Ops[2 * NF + 2];
+ assert(Operands.size() == NF + 3);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ for (unsigned I = 0; I < NF; ++I) {
+ Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ // Store new_vl.
+ return Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {NF}),
+ Address(NewVL, Align));
+ }
+ }] in {
+ defvar PV = PVString<nf, /*signed=*/true>.S;
+ defvar PUV = PVString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0" # PV # "PCe" # "Pz", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "Pz", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVStridedSegLoad<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ {
+ // builtin: (val0 address, val1 address, ..., ptr, stride, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[NF + 2]->getType()};
+ // intrinsic: (ptr, stride, vl)
+ llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, stride, vl)
+ // intrinsic: (maskedoff0, ..., ptr, stride, mask, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[2 * NF + 3]->getType()};
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Ops[NF + I + 1]);
+ Operands.push_back(Ops[2 * NF + 1]);
+ Operands.push_back(Ops[2 * NF + 2]);
+ Operands.push_back(Ops[NF]);
+ Operands.push_back(Ops[2 * NF + 3]);
+ assert(Operands.size() == NF + 4);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }] in {
+ defvar PV = PVString<nf, /*signed=*/true>.S;
+ defvar PUV = PVString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0" # PV # "PCe" # "t", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # "t", type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedSegLoad<string op> {
+ foreach type = TypeList in {
+ foreach eew_info = EEWList in {
+ defvar eew = eew_info[0];
+ defvar eew_type = eew_info[1];
+ foreach nf = NFList in {
+ let Name = op # nf # "ei" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ ManualCodegen = [{
+ {
+ // builtin: (val0 address, val1 address, ..., ptr, index, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
+ // intrinsic: (ptr, index, vl)
+ llvm::Value *Operands[] = {Ops[NF], Ops[NF + 1], Ops[NF + 2]};
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // builtin: (val0 address, ..., mask, maskedoff0, ..., ptr, index, vl)
+ IntrinsicTypes = {Ops[0]->getType()->getPointerElementType(),
+ Ops[2 * NF + 2]->getType(), Ops[2 * NF + 3]->getType()};
+ // intrinsic: (maskedoff0, ..., ptr, index, mask, vl)
+ SmallVector<llvm::Value*, 12> Operands;
+ for (unsigned I = 0; I < NF; ++I)
+ Operands.push_back(Ops[NF + I + 1]);
+ Operands.push_back(Ops[2 * NF + 1]);
+ Operands.push_back(Ops[2 * NF + 2]);
+ Operands.push_back(Ops[NF]);
+ Operands.push_back(Ops[2 * NF + 3]);
+ assert(Operands.size() == NF + 4);
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ llvm::Value *LoadValue = Builder.CreateCall(F, Operands, "");
+ clang::CharUnits Align = CharUnits::fromQuantity(
+ IntrinsicTypes[0]->getScalarSizeInBits() / 8);
+ llvm::Value *V;
+ for (unsigned I = 0; I < NF; ++I) {
+ V = Builder.CreateStore(Builder.CreateExtractValue(LoadValue, {I}),
+ Address(Ops[I], Align));
+ }
+ return V;
+ }
+ }] in {
+ defvar PV = PVString<nf, /*signed=*/true>.S;
+ defvar PUV = PVString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0" # PV # "PCe" # eew_type # "Uv", type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0" # PUV # "PCUe" # eew_type # "Uv", type>;
+ }
+ }
+ }
+ }
+ }
+}
+
+class VString<int nf, bit signed> {
+ string S = !cond(!eq(nf, 2): !if(signed, "vv", "UvUv"),
+ !eq(nf, 3): !if(signed, "vvv", "UvUvUv"),
+ !eq(nf, 4): !if(signed, "vvvv", "UvUvUvUv"),
+ !eq(nf, 5): !if(signed, "vvvvv", "UvUvUvUvUv"),
+ !eq(nf, 6): !if(signed, "vvvvvv", "UvUvUvUvUvUv"),
+ !eq(nf, 7): !if(signed, "vvvvvvv", "UvUvUvUvUvUvUv"),
+ !eq(nf, 8): !if(signed, "vvvvvvvv", "UvUvUvUvUvUvUvUv"));
+}
+
+multiclass RVVUnitStridedSegStore<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ {
+ // Builtin: (ptr, val0, val1, ..., vl)
+ // Intrinsic: (val0, val1, ..., ptr, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
+ assert(Ops.size() == NF + 2);
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // Builtin: (mask, ptr, val0, val1, ..., vl)
+ // Intrinsic: (val0, val1, ..., ptr, mask, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
+ std::swap(Ops[NF], Ops[NF + 1]);
+ IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 2]->getType()};
+ assert(Ops.size() == NF + 3);
+ }
+ }] in {
+ defvar V = VString<nf, /*signed=*/true>.S;
+ defvar UV = VString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0Pe" # V, type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUe" # UV, type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVStridedSegStore<string op> {
+ foreach type = TypeList in {
+ defvar eew = !cond(!eq(type, "c") : "8",
+ !eq(type, "s") : "16",
+ !eq(type, "i") : "32",
+ !eq(type, "l") : "64",
+ !eq(type, "x") : "16",
+ !eq(type, "f") : "32",
+ !eq(type, "d") : "64");
+ foreach nf = NFList in {
+ let Name = op # nf # "e" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ {
+ // Builtin: (ptr, stride, val0, val1, ..., vl).
+ // Intrinsic: (val0, val1, ..., ptr, stride, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
+ IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
+ assert(Ops.size() == NF + 3);
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // Builtin: (mask, ptr, stride, val0, val1, ..., vl).
+ // Intrinsic: (val0, val1, ..., ptr, stride, mask, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
+ std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
+ IntrinsicTypes = {Ops[0]->getType(), Ops[NF + 1]->getType()};
+ assert(Ops.size() == NF + 4);
+ }
+ }] in {
+ defvar V = VString<nf, /*signed=*/true>.S;
+ defvar UV = VString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0Pet" # V, type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUet" # UV, type>;
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVIndexedSegStore<string op> {
+ foreach type = TypeList in {
+ foreach eew_info = EEWList in {
+ defvar eew = eew_info[0];
+ defvar eew_type = eew_info[1];
+ foreach nf = NFList in {
+ let Name = op # nf # "ei" # eew # "_v",
+ IRName = op # nf,
+ IRNameMask = op # nf # "_mask",
+ NF = nf,
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ {
+ // Builtin: (ptr, index, val0, val1, ..., vl)
+ // Intrinsic: (val0, val1, ..., ptr, index, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 2, Ops.end() - 1);
+ IntrinsicTypes = {Ops[0]->getType(),
+ Ops[NF + 1]->getType(), Ops[NF + 2]->getType()};
+ assert(Ops.size() == NF + 3);
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ // Builtin: (mask, ptr, index, val0, val1, ..., vl)
+ // Intrinsic: (val0, val1, ..., ptr, index, mask, vl)
+ std::rotate(Ops.begin(), Ops.begin() + 3, Ops.end() - 1);
+ std::rotate(Ops.begin() + NF, Ops.begin() + NF + 1, Ops.begin() + NF + 3);
+ IntrinsicTypes = {Ops[0]->getType(),
+ Ops[NF + 1]->getType(), Ops[NF + 3]->getType()};
+ assert(Ops.size() == NF + 4);
+ }
+ }] in {
+ defvar V = VString<nf, /*signed=*/true>.S;
+ defvar UV = VString<nf, /*signed=*/false>.S;
+ def : RVVBuiltin<"v", "0Pe" # eew_type # "Uv" # V, type>;
+ if !not(IsFloat<type>.val) then {
+ def : RVVBuiltin<"Uv", "0PUe" # eew_type # "Uv" # UV, type>;
+ }
+ }
+ }
+ }
+ }
+}
+
+multiclass RVVAMOBuiltinSet<bit has_signed = false, bit has_unsigned = false,
+ bit has_fp = false> {
+ defvar type_list = !if(has_fp, ["i","l","f","d"], ["i","l"]);
+ foreach type = type_list in
+ foreach eew_list = EEWList in {
+ defvar eew = eew_list[0];
+ defvar eew_index = eew_list[1];
+ let Name = NAME # "ei" # eew # "_" # "v",
+ IRName = NAME,
+ IRNameMask = NAME # "_mask",
+ HasMaskedOffOperand = false,
+ ManualCodegen = [{
+ // base, bindex, value, vl
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }],
+ ManualCodegenMask = [{
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[4]->getType()};
+ Ops[0] = Builder.CreateBitCast(Ops[0], ResultType->getPointerTo());
+ }] in {
+ if has_signed then
+ def : RVVBuiltin<"v", "vPe" # eew_index # "Uvv", type>;
+ if !and(!not(IsFloat<type>.val), has_unsigned) then
+ def : RVVBuiltin<"Uv", "UvPUe" # eew_index # "UvUv", type>;
+ }
+ }
+}
+
+multiclass RVVPseudoUnaryBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ IRNameMask = IR # "_mask",
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ cast<llvm::VectorType>(ResultType)->getElementType(),
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[1]));
+ break;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ // maskedoff, op1, mask, vl
+ IntrinsicTypes = {ResultType,
+ cast<llvm::VectorType>(ResultType)->getElementType(),
+ Ops[3]->getType()};
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[1]));
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ }
+}
+
+multiclass RVVPseudoVNotBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ IRNameMask = IR # "_mask",
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ cast<llvm::VectorType>(ResultType)->getElementType(),
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1,
+ llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
+ break;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ // maskedoff, op1, mask, vl
+ IntrinsicTypes = {ResultType,
+ cast<llvm::VectorType>(ResultType)->getElementType(),
+ Ops[3]->getType()};
+ Ops.insert(Ops.begin() + 2,
+ llvm::Constant::getAllOnesValue(IntrinsicTypes[1]));
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ def : RVVBuiltin<"Uv", "UvUv", type_range>;
+ }
+}
+
+multiclass RVVPseudoMaskBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ HasMask = false,
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, Ops[0]);
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"m", "mm", type_range>;
+ }
+}
+
+multiclass RVVPseudoVFUnaryBuiltin<string IR, string type_range> {
+ let Name = NAME,
+ IRName = IR,
+ IRNameMask = IR # "_mask",
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ Ops[0]->getType(), Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, Ops[0]);
+ break;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ // maskedoff, op1, mask, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ Ops[3]->getType()};
+ Ops.insert(Ops.begin() + 2, Ops[1]);
+ break;
+ }
+ }] in {
+ def : RVVBuiltin<"v", "vv", type_range>;
+ }
+}
+
+multiclass RVVPseudoVWCVTBuiltin<string IR, string MName, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Name = NAME,
+ MangledName = MName,
+ IRName = IR,
+ IRNameMask = IR # "_mask",
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ Ops[0]->getType(),
+ cast<llvm::VectorType>(Ops[0]->getType())->getElementType(),
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ break;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ // maskedoff, op1, mask, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ cast<llvm::VectorType>(Ops[1]->getType())->getElementType(),
+ Ops[3]->getType()};
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ break;
+ }
+ }] in {
+ foreach s_p = suffixes_prototypes in {
+ def : RVVBuiltin<s_p[0], s_p[1], type_range>;
+ }
+ }
+}
+
+multiclass RVVPseudoVNCVTBuiltin<string IR, string MName, string type_range,
+ list<list<string>> suffixes_prototypes> {
+ let Name = NAME,
+ MangledName = MName,
+ IRName = IR,
+ IRNameMask = IR # "_mask",
+ ManualCodegen = [{
+ {
+ // op1, vl
+ IntrinsicTypes = {ResultType,
+ Ops[0]->getType(),
+ Ops[1]->getType(),
+ Ops[1]->getType()};
+ Ops.insert(Ops.begin() + 1, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ break;
+ }
+ }],
+ ManualCodegenMask = [{
+ {
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+ // maskedoff, op1, mask, vl
+ IntrinsicTypes = {ResultType,
+ Ops[1]->getType(),
+ Ops[3]->getType(),
+ Ops[3]->getType()};
+ Ops.insert(Ops.begin() + 2, llvm::Constant::getNullValue(IntrinsicTypes[2]));
+ break;
+ }
+ }] in {
+ foreach s_p = suffixes_prototypes in {
+ def : RVVBuiltin<s_p[0], s_p[1], type_range>;
+ }
+ }
+}
+
+// 6. Configuration-Setting Instructions
+// 6.1. vsetvli/vsetvl instructions
+let HasVL = false,
+ HasMask = false,
+ HasSideEffects = true,
+ Log2LMUL = [0],
+ ManualCodegen = [{IntrinsicTypes = {ResultType};}] in // Set XLEN type
+{
+ // vsetvl is a macro because for it require constant integers in SEW and LMUL.
+ let HeaderCode =
+[{
+#define vsetvl_e8mf8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 5)
+#define vsetvl_e8mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 6)
+#define vsetvl_e8mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 7)
+#define vsetvl_e8m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 0)
+#define vsetvl_e8m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 1)
+#define vsetvl_e8m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 2)
+#define vsetvl_e8m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 0, 3)
+
+#define vsetvl_e16mf4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 6)
+#define vsetvl_e16mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 7)
+#define vsetvl_e16m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 0)
+#define vsetvl_e16m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 1)
+#define vsetvl_e16m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 2)
+#define vsetvl_e16m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 1, 3)
+
+#define vsetvl_e32mf2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 7)
+#define vsetvl_e32m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 0)
+#define vsetvl_e32m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 1)
+#define vsetvl_e32m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 2)
+#define vsetvl_e32m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 2, 3)
+
+#define vsetvl_e64m1(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 0)
+#define vsetvl_e64m2(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 1)
+#define vsetvl_e64m4(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 2)
+#define vsetvl_e64m8(avl) __builtin_rvv_vsetvli((size_t)(avl), 3, 3)
+
+}] in
+ def vsetvli : RVVBuiltin<"", "zzKzKz", "i">;
+
+ let HeaderCode =
+[{
+#define vsetvlmax_e8mf8() __builtin_rvv_vsetvlimax(0, 5)
+#define vsetvlmax_e8mf4() __builtin_rvv_vsetvlimax(0, 6)
+#define vsetvlmax_e8mf2() __builtin_rvv_vsetvlimax(0, 7)
+#define vsetvlmax_e8m1() __builtin_rvv_vsetvlimax(0, 0)
+#define vsetvlmax_e8m2() __builtin_rvv_vsetvlimax(0, 1)
+#define vsetvlmax_e8m4() __builtin_rvv_vsetvlimax(0, 2)
+#define vsetvlmax_e8m8() __builtin_rvv_vsetvlimax(0, 3)
+
+#define vsetvlmax_e16mf4() __builtin_rvv_vsetvlimax(1, 6)
+#define vsetvlmax_e16mf2() __builtin_rvv_vsetvlimax(1, 7)
+#define vsetvlmax_e16m1() __builtin_rvv_vsetvlimax(1, 0)
+#define vsetvlmax_e16m2() __builtin_rvv_vsetvlimax(1, 1)
+#define vsetvlmax_e16m4() __builtin_rvv_vsetvlimax(1, 2)
+#define vsetvlmax_e16m8() __builtin_rvv_vsetvlimax(1, 3)
+
+#define vsetvlmax_e32mf2() __builtin_rvv_vsetvlimax(2, 7)
+#define vsetvlmax_e32m1() __builtin_rvv_vsetvlimax(2, 0)
+#define vsetvlmax_e32m2() __builtin_rvv_vsetvlimax(2, 1)
+#define vsetvlmax_e32m4() __builtin_rvv_vsetvlimax(2, 2)
+#define vsetvlmax_e32m8() __builtin_rvv_vsetvlimax(2, 3)
+
+#define vsetvlmax_e64m1() __builtin_rvv_vsetvlimax(3, 0)
+#define vsetvlmax_e64m2() __builtin_rvv_vsetvlimax(3, 1)
+#define vsetvlmax_e64m4() __builtin_rvv_vsetvlimax(3, 2)
+#define vsetvlmax_e64m8() __builtin_rvv_vsetvlimax(3, 3)
+
+}] in
+ def vsetvlimax : RVVBuiltin<"", "zKzKz", "i">;
+}
+
+// 7. Vector Loads and Stores
+// 7.4. Vector Unit-Stride Instructions
+def vle1: RVVVLEMaskBuiltin;
+defm vle8: RVVVLEBuiltin<["c"]>;
+defm vle16: RVVVLEBuiltin<["s","x"]>;
+defm vle32: RVVVLEBuiltin<["i","f"]>;
+defm vle64: RVVVLEBuiltin<["l","d"]>;
+
+def vse1 : RVVVSEMaskBuiltin;
+defm vse8 : RVVVSEBuiltin<["c"]>;
+defm vse16: RVVVSEBuiltin<["s","x"]>;
+defm vse32: RVVVSEBuiltin<["i","f"]>;
+defm vse64: RVVVSEBuiltin<["l","d"]>;
+
+// 7.5. Vector Strided Instructions
+defm vlse8: RVVVLSEBuiltin<["c"]>;
+defm vlse16: RVVVLSEBuiltin<["s","x"]>;
+defm vlse32: RVVVLSEBuiltin<["i","f"]>;
+defm vlse64: RVVVLSEBuiltin<["l","d"]>;
+
+defm vsse8 : RVVVSSEBuiltin<["c"]>;
+defm vsse16: RVVVSSEBuiltin<["s","x"]>;
+defm vsse32: RVVVSSEBuiltin<["i","f"]>;
+defm vsse64: RVVVSSEBuiltin<["l","d"]>;
+
+// 7.6. Vector Indexed Instructions
+defm : RVVIndexedLoad<"vluxei">;
+defm : RVVIndexedLoad<"vloxei">;
+
+defm : RVVIndexedStore<"vsuxei">;
+defm : RVVIndexedStore<"vsoxei">;
+
+// 7.7. Unit-stride Fault-Only-First Loads
+defm vle8ff: RVVVLEFFBuiltin<["c"]>;
+defm vle16ff: RVVVLEFFBuiltin<["s","x"]>;
+defm vle32ff: RVVVLEFFBuiltin<["i", "f"]>;
+defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
+
+// 7.8 Vector Load/Store Segment Instructions
+let RequiredExtension = "Zvlsseg" in {
+defm : RVVUnitStridedSegLoad<"vlseg">;
+defm : RVVUnitStridedSegLoadFF<"vlseg">;
+defm : RVVStridedSegLoad<"vlsseg">;
+defm : RVVIndexedSegLoad<"vluxseg">;
+defm : RVVIndexedSegLoad<"vloxseg">;
+defm : RVVUnitStridedSegStore<"vsseg">;
+defm : RVVStridedSegStore<"vssseg">;
+defm : RVVIndexedSegStore<"vsuxseg">;
+defm : RVVIndexedSegStore<"vsoxseg">;
+}
+
+// 8. Vector AMO Operations
+let RequiredExtension = "Zvamo" in {
+defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>;
+defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoand : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamoor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
+defm vamomin : RVVAMOBuiltinSet< /* hasSigned */ true>;
+defm vamomax : RVVAMOBuiltinSet< /* hasSigned */ true>;
+defm vamominu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
+defm vamomaxu : RVVAMOBuiltinSet< /* hasSigned */ false, /* hasUnsigned */ true>;
+}
+
+// 12. Vector Integer Arithmetic Instructions
+// 12.1. Vector Single-Width Integer Add and Subtract
+defm vadd : RVVIntBinBuiltinSet;
+defm vsub : RVVIntBinBuiltinSet;
+defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
+ [["vx", "v", "vve"],
+ ["vx", "Uv", "UvUvUe"]]>;
+defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
+
+// 12.2. Vector Widening Integer Add/Subtract
+// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
+defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
+defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
+// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
+defm vwadd : RVVSignedWidenBinBuiltinSet;
+defm vwsub : RVVSignedWidenBinBuiltinSet;
+// Widening unsigned integer add/subtract, 2*SEW = 2*SEW +/- SEW
+defm vwaddu : RVVUnsignedWidenOp0BinBuiltinSet;
+defm vwsubu : RVVUnsignedWidenOp0BinBuiltinSet;
+// Widening signed integer add/subtract, 2*SEW = 2*SEW +/- SEW
+defm vwadd : RVVSignedWidenOp0BinBuiltinSet;
+defm vwsub : RVVSignedWidenOp0BinBuiltinSet;
+defm vwcvtu_x_x_v : RVVPseudoVWCVTBuiltin<"vwaddu", "vwcvtu_x", "csi",
+ [["Uw", "UwUv"]]>;
+defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
+ [["w", "wv"]]>;
+
+// 12.3. Vector Integer Extension
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
+ def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
+}
+let Log2LMUL = [-3, -2, -1, 0, 1] in {
+ def vsext_vf4 : RVVIntExt<"vsext", "q", "qv", "cs">;
+ def vzext_vf4 : RVVIntExt<"vzext", "Uq", "UqUv", "cs">;
+}
+let Log2LMUL = [-3, -2, -1, 0] in {
+ def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
+ def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
+}
+
+// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
+let HasMask = false in {
+ defm vadc : RVVCarryinBuiltinSet;
+ defm vmadc : RVVCarryOutInBuiltinSet<"vmadc_carry_in">;
+ defm vmadc : RVVIntMaskOutBuiltinSet;
+ defm vsbc : RVVCarryinBuiltinSet;
+ defm vmsbc : RVVCarryOutInBuiltinSet<"vmsbc_borrow_in">;
+ defm vmsbc : RVVIntMaskOutBuiltinSet;
+}
+
+// 12.5. Vector Bitwise Logical Instructions
+defm vand : RVVIntBinBuiltinSet;
+defm vxor : RVVIntBinBuiltinSet;
+defm vor : RVVIntBinBuiltinSet;
+defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
+
+// 12.6. Vector Single-Width Bit Shift Instructions
+defm vsll : RVVShiftBuiltinSet;
+defm vsrl : RVVUnsignedShiftBuiltinSet;
+defm vsra : RVVSignedShiftBuiltinSet;
+
+// 12.7. Vector Narrowing Integer Right Shift Instructions
+defm vnsrl : RVVUnsignedNShiftBuiltinSet;
+defm vnsra : RVVSignedNShiftBuiltinSet;
+defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
+ [["v", "vw"],
+ ["Uv", "UvUw"]]>;
+
+// 12.8. Vector Integer Comparison Instructions
+defm vmseq : RVVIntMaskOutBuiltinSet;
+defm vmsne : RVVIntMaskOutBuiltinSet;
+defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
+defm vmslt : RVVSignedMaskOutBuiltinSet;
+defm vmsleu : RVVUnsignedMaskOutBuiltinSet;
+defm vmsle : RVVSignedMaskOutBuiltinSet;
+defm vmsgtu : RVVUnsignedMaskOutBuiltinSet;
+defm vmsgt : RVVSignedMaskOutBuiltinSet;
+defm vmsgeu : RVVUnsignedMaskOutBuiltinSet;
+defm vmsge : RVVSignedMaskOutBuiltinSet;
+
+// 12.9. Vector Integer Min/Max Instructions
+defm vminu : RVVUnsignedBinBuiltinSet;
+defm vmin : RVVSignedBinBuiltinSet;
+defm vmaxu : RVVUnsignedBinBuiltinSet;
+defm vmax : RVVSignedBinBuiltinSet;
+
+// 12.10. Vector Single-Width Integer Multiply Instructions
+defm vmul : RVVIntBinBuiltinSet;
+defm vmulh : RVVSignedBinBuiltinSet;
+defm vmulhu : RVVUnsignedBinBuiltinSet;
+defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
+ [["vv", "v", "vvUv"],
+ ["vx", "v", "vvUe"]]>;
+
+// 12.11. Vector Integer Divide Instructions
+defm vdivu : RVVUnsignedBinBuiltinSet;
+defm vdiv : RVVSignedBinBuiltinSet;
+defm vremu : RVVUnsignedBinBuiltinSet;
+defm vrem : RVVSignedBinBuiltinSet;
+
+// 12.12. Vector Widening Integer Multiply Instructions
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
+ [["vv", "w", "wvv"],
+ ["vx", "w", "wve"]]>;
+defm vwmulu : RVVOutOp0Op1BuiltinSet<"vwmulu", "csi",
+ [["vv", "Uw", "UwUvUv"],
+ ["vx", "Uw", "UwUvUe"]]>;
+defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
+ [["vv", "w", "wvUv"],
+ ["vx", "w", "wvUe"]]>;
+}
+
+// 12.13. Vector Single-Width Integer Multiply-Add Instructions
+defm vmacc : RVVIntTerBuiltinSet;
+defm vnmsac : RVVIntTerBuiltinSet;
+defm vmadd : RVVIntTerBuiltinSet;
+defm vnmsub : RVVIntTerBuiltinSet;
+
+// 12.14. Vector Widening Integer Multiply-Add Instructions
+let HasMaskedOffOperand = false,
+ Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+defm vwmaccu : RVVOutOp1Op2BuiltinSet<"vwmaccu", "csi",
+ [["vv", "Uw", "UwUwUvUv"],
+ ["vx", "Uw", "UwUwUeUv"]]>;
+defm vwmacc : RVVOutOp1Op2BuiltinSet<"vwmacc", "csi",
+ [["vv", "w", "wwvv"],
+ ["vx", "w", "wwev"]]>;
+defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi",
+ [["vv", "w", "wwvUv"],
+ ["vx", "w", "wweUv"]]>;
+defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
+ [["vx", "w", "wwUev"]]>;
+}
+
+// 12.15. Vector Integer Merge Instructions
+// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
+let HasMask = false,
+ ManualCodegen = [{
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ }] in {
+ defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
+ [["vvm", "v", "vmvv"],
+ ["vxm", "v", "vmve"],
+ ["vvm", "Uv", "UvmUvUv"],
+ ["vxm", "Uv", "UvmUvUe"]]>;
+}
+
+// 12.16. Vector Integer Move Instructions
+let HasMask = false in {
+ let MangledName = "vmv_v" in {
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
+ [["v", "Uv", "UvUv"]]>;
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csilxfd",
+ [["v", "v", "vv"]]>;
+ }
+ let HasNoMaskedOverloaded = false in
+ defm vmv_v : RVVOutBuiltinSet<"vmv_v_x", "csil",
+ [["x", "v", "ve"],
+ ["x", "Uv", "UvUe"]]>;
+}
+
+// 13. Vector Fixed-Point Arithmetic Instructions
+// 13.1. Vector Single-Width Saturating Add and Subtract
+defm vsaddu : RVVUnsignedBinBuiltinSet;
+defm vsadd : RVVSignedBinBuiltinSet;
+defm vssubu : RVVUnsignedBinBuiltinSet;
+defm vssub : RVVSignedBinBuiltinSet;
+
+// 13.2. Vector Single-Width Averaging Add and Subtract
+defm vaaddu : RVVUnsignedBinBuiltinSet;
+defm vaadd : RVVSignedBinBuiltinSet;
+defm vasubu : RVVUnsignedBinBuiltinSet;
+defm vasub : RVVSignedBinBuiltinSet;
+
+// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
+defm vsmul : RVVSignedBinBuiltinSet;
+
+// 13.4. Vector Single-Width Scaling Shift Instructions
+defm vssrl : RVVUnsignedShiftBuiltinSet;
+defm vssra : RVVSignedShiftBuiltinSet;
+
+// 13.5. Vector Narrowing Fixed-Point Clip Instructions
+defm vnclipu : RVVUnsignedNShiftBuiltinSet;
+defm vnclip : RVVSignedNShiftBuiltinSet;
+
+// 14. Vector Floating-Point Instructions
+// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
+defm vfadd : RVVFloatingBinBuiltinSet;
+defm vfsub : RVVFloatingBinBuiltinSet;
+defm vfrsub : RVVFloatingBinVFBuiltinSet;
+
+// 14.3. Vector Widening Floating-Point Add/Subtract Instructions
+// Widening FP add/subtract, 2*SEW = SEW +/- SEW
+defm vfwadd : RVVFloatingWidenBinBuiltinSet;
+defm vfwsub : RVVFloatingWidenBinBuiltinSet;
+// Widening FP add/subtract, 2*SEW = 2*SEW +/- SEW
+defm vfwadd : RVVFloatingWidenOp0BinBuiltinSet;
+defm vfwsub : RVVFloatingWidenOp0BinBuiltinSet;
+
+// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions
+defm vfmul : RVVFloatingBinBuiltinSet;
+defm vfdiv : RVVFloatingBinBuiltinSet;
+defm vfrdiv : RVVFloatingBinVFBuiltinSet;
+
+// 14.5. Vector Widening Floating-Point Multiply
+let Log2LMUL = [-2, -1, 0, 1, 2] in {
+ defm vfwmul : RVVOutOp0Op1BuiltinSet<"vfwmul", "xf",
+ [["vv", "w", "wvv"],
+ ["vf", "w", "wve"]]>;
+}
+
+// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
+defm vfmacc : RVVFloatingTerBuiltinSet;
+defm vfnmacc : RVVFloatingTerBuiltinSet;
+defm vfmsac : RVVFloatingTerBuiltinSet;
+defm vfnmsac : RVVFloatingTerBuiltinSet;
+defm vfmadd : RVVFloatingTerBuiltinSet;
+defm vfnmadd : RVVFloatingTerBuiltinSet;
+defm vfmsub : RVVFloatingTerBuiltinSet;
+defm vfnmsub : RVVFloatingTerBuiltinSet;
+
+// 14.7. Vector Widening Floating-Point Fused Multiply-Add Instructions
+defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
+defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
+defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
+defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
+
+// 14.8. Vector Floating-Point Square-Root Instruction
+def vfsqrt : RVVFloatingUnaryVVBuiltin;
+
+// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
+def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
+
+// 14.10. Vector Floating-Point Reciprocal Estimate Instruction
+def vfrec7 : RVVFloatingUnaryVVBuiltin;
+
+// 14.11. Vector Floating-Point MIN/MAX Instructions
+defm vfmin : RVVFloatingBinBuiltinSet;
+defm vfmax : RVVFloatingBinBuiltinSet;
+
+// 14.12. Vector Floating-Point Sign-Injection Instructions
+defm vfsgnj : RVVFloatingBinBuiltinSet;
+defm vfsgnjn : RVVFloatingBinBuiltinSet;
+defm vfsgnjx : RVVFloatingBinBuiltinSet;
+defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
+defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
+
+// 14.13. Vector Floating-Point Compare Instructions
+defm vmfeq : RVVFloatingMaskOutBuiltinSet;
+defm vmfne : RVVFloatingMaskOutBuiltinSet;
+defm vmflt : RVVFloatingMaskOutBuiltinSet;
+defm vmfle : RVVFloatingMaskOutBuiltinSet;
+defm vmfgt : RVVFloatingMaskOutBuiltinSet;
+defm vmfge : RVVFloatingMaskOutBuiltinSet;
+
+// 14.14. Vector Floating-Point Classify Instruction
+let Name = "vfclass_v" in
+ def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
+
+// 14.15. Vector Floating-Point Merge Instructio
+// C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
+let HasMask = false,
+ ManualCodegen = [{
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
+ IntrinsicTypes = {ResultType, Ops[1]->getType(), Ops[3]->getType()};
+ }] in {
+ defm vmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
+ [["vvm", "v", "vmvv"]]>;
+ defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
+ [["vfm", "v", "vmve"]]>;
+}
+
+// 14.16. Vector Floating-Point Move Instruction
+let HasMask = false, HasNoMaskedOverloaded = false in
+ defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
+ [["f", "v", "ve"]]>;
+
+// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
+def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
+def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
+def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
+def vfcvt_rtz_x_f_v : RVVConvToSignedBuiltin<"vfcvt_rtz_x">;
+def vfcvt_f_xu_v : RVVConvBuiltin<"Fv", "FvUv", "sil", "vfcvt_f">;
+def vfcvt_f_x_v : RVVConvBuiltin<"Fv", "Fvv", "sil", "vfcvt_f">;
+
+// 14.18. Widening Floating-Point/Integer Type-Convert Instructions
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ def vfwcvt_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_xu">;
+ def vfwcvt_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_x">;
+ def vfwcvt_rtz_xu_f_v : RVVConvToWidenUnsignedBuiltin<"vfwcvt_rtz_xu">;
+ def vfwcvt_rtz_x_f_v : RVVConvToWidenSignedBuiltin<"vfwcvt_rtz_x">;
+ def vfwcvt_f_xu_v : RVVConvBuiltin<"Fw", "FwUv", "csi", "vfwcvt_f">;
+ def vfwcvt_f_x_v : RVVConvBuiltin<"Fw", "Fwv", "csi", "vfwcvt_f">;
+ def vfwcvt_f_f_v : RVVConvBuiltin<"w", "wv", "xf", "vfwcvt_f">;
+}
+
+// 14.19. Narrowing Floating-Point/Integer Type-Convert Instructions
+let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
+ def vfncvt_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_xu">;
+ def vfncvt_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_x">;
+ def vfncvt_rtz_xu_f_w : RVVConvToNarrowingUnsignedBuiltin<"vfncvt_rtz_xu">;
+ def vfncvt_rtz_x_f_w : RVVConvToNarrowingSignedBuiltin<"vfncvt_rtz_x">;
+ def vfncvt_f_xu_w : RVVConvBuiltin<"Fv", "FvUw", "csi", "vfncvt_f">;
+ def vfncvt_f_x_w : RVVConvBuiltin<"Fv", "Fvw", "csi", "vfncvt_f">;
+ def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
+ def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
+}
+
+// 15. Vector Reduction Operations
+// 15.1. Vector Single-Width Integer Reduction Instructions
+defm vredsum : RVVIntReductionBuiltinSet;
+defm vredmaxu : RVVUnsignedReductionBuiltin;
+defm vredmax : RVVSignedReductionBuiltin;
+defm vredminu : RVVUnsignedReductionBuiltin;
+defm vredmin : RVVSignedReductionBuiltin;
+defm vredand : RVVIntReductionBuiltinSet;
+defm vredor : RVVIntReductionBuiltinSet;
+defm vredxor : RVVIntReductionBuiltinSet;
+
+// 15.2. Vector Widening Integer Reduction Instructions
+// Vector Widening Integer Reduction Operations
+let HasMaskedOffOperand = false in {
+ defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi",
+ [["vs", "vSw", "SwSwvSw"]]>;
+ defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi",
+ [["vs", "UvUSw", "USwUSwUvUSw"]]>;
+}
+
+// 15.3. Vector Single-Width Floating-Point Reduction Instructions
+defm vfredmax : RVVFloatingReductionBuiltin;
+defm vfredmin : RVVFloatingReductionBuiltin;
+defm vfredsum : RVVFloatingReductionBuiltin;
+defm vfredosum : RVVFloatingReductionBuiltin;
+
+// 15.4. Vector Widening Floating-Point Reduction Instructions
+defm vfwredsum : RVVFloatingWidenReductionBuiltin;
+defm vfwredosum : RVVFloatingWidenReductionBuiltin;
+
+// 16. Vector Mask Instructions
+// 16.1. Vector Mask-Register Logical Instructions
+def vmand : RVVMaskBinBuiltin;
+def vmnand : RVVMaskBinBuiltin;
+def vmandnot : RVVMaskBinBuiltin;
+def vmxor : RVVMaskBinBuiltin;
+def vmor : RVVMaskBinBuiltin;
+def vmnor : RVVMaskBinBuiltin;
+def vmornot : RVVMaskBinBuiltin;
+def vmxnor : RVVMaskBinBuiltin;
+// pseudoinstructions
+def vmclr : RVVMaskNullaryBuiltin;
+def vmset : RVVMaskNullaryBuiltin;
+defm vmmv_m : RVVPseudoMaskBuiltin<"vmand", "c">;
+defm vmnot_m : RVVPseudoMaskBuiltin<"vmnand", "c">;
+
+// 16.2. Vector mask population count vpopc
+def vpopc : RVVMaskOp0Builtin<"um">;
+
+// 16.3. vfirst find-first-set mask bit
+def vfirst : RVVMaskOp0Builtin<"lm">;
+
+// 16.4. vmsbf.m set-before-first mask bit
+def vmsbf : RVVMaskUnaryBuiltin;
+
+// 16.5. vmsif.m set-including-first mask bit
+def vmsif : RVVMaskUnaryBuiltin;
+
+// 16.6. vmsof.m set-only-first mask bit
+def vmsof : RVVMaskUnaryBuiltin;
+
+let HasNoMaskedOverloaded = false in {
+ // 16.8. Vector Iota Instruction
+ defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
+
+ // 16.9. Vector Element Index Instruction
+ defm vid : RVVOutBuiltinSet<"vid", "csil", [["v", "v", "v"],
+ ["v", "Uv", "Uv"]]>;
+}
+
+// 17. Vector Permutation Instructions
+// 17.1. Integer Scalar Move Instructions
+let HasMask = false in {
+ let HasVL = false, MangledName = "vmv_x" in
+ defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
+ [["s", "ve", "ev"],
+ ["s", "UvUe", "UeUv"]]>;
+ let MangledName = "vmv_s" in
+ defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil",
+ [["x", "v", "vve"],
+ ["x", "Uv", "UvUvUe"]]>;
+}
+
+// 17.2. Floating-Point Scalar Move Instructions
+let HasMask = false in {
+ let HasVL = false, MangledName = "vfmv_f" in
+ defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd",
+ [["s", "ve", "ev"]]>;
+ let MangledName = "vfmv_s" in
+ defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd",
+ [["f", "v", "vve"],
+ ["x", "Uv", "UvUvUe"]]>;
+}
+
+// 17.3. Vector Slide Instructions
+// 17.3.1. Vector Slideup Instructions
+defm vslideup : RVVSlideBuiltinSet;
+// 17.3.2. Vector Slidedown Instructions
+defm vslidedown : RVVSlideBuiltinSet;
+
+// 17.3.3. Vector Slide1up Instructions
+defm vslide1up : RVVSlideOneBuiltinSet;
+defm vfslide1up : RVVFloatingBinVFBuiltinSet;
+
+// 17.3.4. Vector Slide1down Instruction
+defm vslide1down : RVVSlideOneBuiltinSet;
+defm vfslide1down : RVVFloatingBinVFBuiltinSet;
+
+// 17.4. Vector Register Gather Instructions
+// signed and floating type
+defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csilxfd",
+ [["vv", "v", "vvUv"]]>;
+defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csilxfd",
+ [["vx", "v", "vvz"]]>;
+defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csilxfd",
+ [["vv", "v", "vv(Log2EEW:4)Uv"]]>;
+// unsigned type
+defm vrgather : RVVOutBuiltinSet<"vrgather_vv", "csil",
+ [["vv", "Uv", "UvUvUv"]]>;
+defm vrgather : RVVOutBuiltinSet<"vrgather_vx", "csil",
+ [["vx", "Uv", "UvUvz"]]>;
+defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
+ [["vv", "Uv", "UvUv(Log2EEW:4)Uv"]]>;
+
+// 17.5. Vector Compress Instruction
+let HasMask = false,
+ ManualCodegen = [{
+ std::rotate(Ops.begin(), Ops.begin() + 1, Ops.begin() + 3);
+ IntrinsicTypes = {ResultType, Ops[3]->getType()};
+ }] in {
+ // signed and floating type
+ defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
+ [["vm", "v", "vmvv"]]>;
+ // unsigned type
+ defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
+ [["vm", "Uv", "UvmUvUv"]]>;
+}
+
+// Miscellaneous
+let HasMask = false, HasVL = false, IRName = "" in {
+ let Name = "vreinterpret_v",
+ ManualCodegen = [{
+ return Builder.CreateBitCast(Ops[0], ResultType);
+ }] in {
+ // Reinterpret between different type under the same SEW and LMUL
+ def vreinterpret_i_u : RVVBuiltin<"Uvv", "vUv", "csil", "v">;
+ def vreinterpret_i_f : RVVBuiltin<"Fvv", "vFv", "sil", "v">;
+ def vreinterpret_u_i : RVVBuiltin<"vUv", "Uvv", "csil", "Uv">;
+ def vreinterpret_u_f : RVVBuiltin<"FvUv", "UvFv", "sil", "Uv">;
+ def vreinterpret_f_i : RVVBuiltin<"vFv", "Fvv", "sil", "Fv">;
+ def vreinterpret_f_u : RVVBuiltin<"UvFv", "FvUv", "sil", "Fv">;
+
+ // Reinterpret between different SEW under the same LMUL
+ foreach dst_sew = ["(FixedSEW:8)", "(FixedSEW:16)", "(FixedSEW:32)",
+ "(FixedSEW:64)"] in {
+ def vreinterpret_i_ # dst_sew : RVVBuiltin<"v" # dst_sew # "v",
+ dst_sew # "vv", "csil", dst_sew # "v">;
+ def vreinterpret_u_ # dst_sew : RVVBuiltin<"Uv" # dst_sew # "Uv",
+ dst_sew # "UvUv", "csil", dst_sew # "Uv">;
+ }
+ }
+
+ let Name = "vundefined", HasNoMaskedOverloaded = false,
+ ManualCodegen = [{
+ return llvm::UndefValue::get(ResultType);
+ }] in {
+ def vundefined : RVVBuiltin<"v", "v", "csilxfd">;
+ def vundefined_u : RVVBuiltin<"Uv", "Uv", "csil">;
+ }
+
+ // LMUL truncation
+ // C/C++ Operand: VecTy, IR Operand: VecTy, Index
+ let Name = "vlmul_trunc_v", MangledName = "vlmul_trunc",
+ ManualCodegen = [{ {
+ ID = Intrinsic::experimental_vector_extract;
+ IntrinsicTypes = {ResultType, Ops[0]->getType()};
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ } }] in {
+ foreach dst_lmul = ["(SFixedLog2LMUL:-3)", "(SFixedLog2LMUL:-2)", "(SFixedLog2LMUL:-1)",
+ "(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
+ def vlmul_trunc # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
+ dst_lmul # "vv", "csilxfd", dst_lmul # "v">;
+ def vlmul_trunc_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
+ dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
+ }
+ }
+
+ // LMUL extension
+ // C/C++ Operand: SubVecTy, IR Operand: VecTy, SubVecTy, Index
+ let Name = "vlmul_ext_v", MangledName = "vlmul_ext",
+ ManualCodegen = [{
+ ID = Intrinsic::experimental_vector_insert;
+ IntrinsicTypes = {ResultType, Ops[0]->getType()};
+ Ops.push_back(llvm::UndefValue::get(ResultType));
+ std::swap(Ops[0], Ops[1]);
+ Ops.push_back(ConstantInt::get(Int64Ty, 0));
+ return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ }] in {
+ foreach dst_lmul = ["(LFixedLog2LMUL:-2)", "(LFixedLog2LMUL:-1)", "(LFixedLog2LMUL:-0)",
+ "(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
+ def vlmul_ext # dst_lmul : RVVBuiltin<"v" # dst_lmul # "v",
+ dst_lmul # "vv", "csilxfd", dst_lmul # "v">;
+ def vlmul_ext_u # dst_lmul : RVVBuiltin<"Uv" # dst_lmul # "Uv",
+ dst_lmul # "UvUv", "csil", dst_lmul # "Uv">;
+ }
+ }
+
+ let Name = "vget_v",
+ ManualCodegen = [{
+ {
+ ID = Intrinsic::experimental_vector_extract;
+ ScalableVectorType *VecTy = cast<ScalableVectorType>(ResultType);
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ IntrinsicTypes = {ResultType, Ops[0]->getType()};
+ return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ }
+ }] in {
+ foreach dst_lmul = ["(SFixedLog2LMUL:0)", "(SFixedLog2LMUL:1)", "(SFixedLog2LMUL:2)"] in {
+ def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "vvKz", "csilfd", dst_lmul # "v">;
+ def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "UvUvKz", "csil", dst_lmul # "Uv">;
+ }
+ }
+
+ let Name = "vset_v", Log2LMUL = [0, 1, 2],
+ ManualCodegen = [{
+ {
+ ID = Intrinsic::experimental_vector_insert;
+ IntrinsicTypes = {ResultType, Ops[2]->getType()};
+ ScalableVectorType *VecTy = cast<ScalableVectorType>(Ops[2]->getType());
+ Ops[1] = Builder.CreateMul(Ops[1],
+ ConstantInt::get(Ops[1]->getType(),
+ VecTy->getMinNumElements()));
+ std::swap(Ops[1], Ops[2]);
+ return Builder.CreateCall(CGM.getIntrinsic(ID, IntrinsicTypes), Ops, "");
+ }
+ }] in {
+ foreach dst_lmul = ["(LFixedLog2LMUL:1)", "(LFixedLog2LMUL:2)", "(LFixedLog2LMUL:3)"] in {
+ def : RVVBuiltin<"v" # dst_lmul # "v", dst_lmul # "v" # dst_lmul # "vKzv", "csilfd">;
+ def : RVVBuiltin<"Uv" # dst_lmul # "Uv", dst_lmul # "Uv" # dst_lmul #"UvKzUv", "csil">;
+ }
+ }
+}
diff --git a/clang/include/clang/CodeGen/BackendUtil.h b/clang/include/clang/CodeGen/BackendUtil.h
index 43de07cc145b..77d500079f01 100644
--- a/clang/include/clang/CodeGen/BackendUtil.h
+++ b/clang/include/clang/CodeGen/BackendUtil.h
@@ -39,8 +39,7 @@ namespace clang {
void EmitBackendOutput(DiagnosticsEngine &Diags, const HeaderSearchOptions &,
const CodeGenOptions &CGOpts,
const TargetOptions &TOpts, const LangOptions &LOpts,
- const llvm::DataLayout &TDesc, llvm::Module *M,
- BackendAction Action,
+ StringRef TDesc, llvm::Module *M, BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS);
void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
diff --git a/clang/include/clang/CodeGen/CGFunctionInfo.h b/clang/include/clang/CodeGen/CGFunctionInfo.h
index 253ef946ce15..4899c9deda6a 100644
--- a/clang/include/clang/CodeGen/CGFunctionInfo.h
+++ b/clang/include/clang/CodeGen/CGFunctionInfo.h
@@ -93,13 +93,20 @@ private:
llvm::Type *PaddingType; // canHavePaddingType()
llvm::Type *UnpaddedCoerceAndExpandType; // isCoerceAndExpand()
};
+ struct DirectAttrInfo {
+ unsigned Offset;
+ unsigned Align;
+ };
+ struct IndirectAttrInfo {
+ unsigned Align;
+ unsigned AddrSpace;
+ };
union {
- unsigned DirectOffset; // isDirect() || isExtend()
- unsigned IndirectAlign; // isIndirect()
+ DirectAttrInfo DirectAttr; // isDirect() || isExtend()
+ IndirectAttrInfo IndirectAttr; // isIndirect()
unsigned AllocaFieldIndex; // isInAlloca()
};
Kind TheKind;
- unsigned IndirectAddrSpace : 24; // isIndirect()
bool PaddingInReg : 1;
bool InAllocaSRet : 1; // isInAlloca()
bool InAllocaIndirect : 1;// isInAlloca()
@@ -126,19 +133,20 @@ private:
public:
ABIArgInfo(Kind K = Direct)
- : TypeData(nullptr), PaddingType(nullptr), DirectOffset(0), TheKind(K),
- IndirectAddrSpace(0), PaddingInReg(false), InAllocaSRet(false),
+ : TypeData(nullptr), PaddingType(nullptr), DirectAttr{0, 0}, TheKind(K),
+ PaddingInReg(false), InAllocaSRet(false),
InAllocaIndirect(false), IndirectByVal(false), IndirectRealign(false),
SRetAfterThis(false), InReg(false), CanBeFlattened(false),
SignExt(false) {}
static ABIArgInfo getDirect(llvm::Type *T = nullptr, unsigned Offset = 0,
llvm::Type *Padding = nullptr,
- bool CanBeFlattened = true) {
+ bool CanBeFlattened = true, unsigned Align = 0) {
auto AI = ABIArgInfo(Direct);
AI.setCoerceToType(T);
AI.setPaddingType(Padding);
AI.setDirectOffset(Offset);
+ AI.setDirectAlign(Align);
AI.setCanBeFlattened(CanBeFlattened);
return AI;
}
@@ -154,6 +162,7 @@ public:
AI.setCoerceToType(T);
AI.setPaddingType(nullptr);
AI.setDirectOffset(0);
+ AI.setDirectAlign(0);
AI.setSignExt(true);
return AI;
}
@@ -164,6 +173,7 @@ public:
AI.setCoerceToType(T);
AI.setPaddingType(nullptr);
AI.setDirectOffset(0);
+ AI.setDirectAlign(0);
AI.setSignExt(false);
return AI;
}
@@ -299,11 +309,20 @@ public:
// Direct/Extend accessors
unsigned getDirectOffset() const {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
- return DirectOffset;
+ return DirectAttr.Offset;
}
void setDirectOffset(unsigned Offset) {
assert((isDirect() || isExtend()) && "Not a direct or extend kind");
- DirectOffset = Offset;
+ DirectAttr.Offset = Offset;
+ }
+
+ unsigned getDirectAlign() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return DirectAttr.Align;
+ }
+ void setDirectAlign(unsigned Align) {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ DirectAttr.Align = Align;
}
bool isSignExt() const {
@@ -369,11 +388,11 @@ public:
// Indirect accessors
CharUnits getIndirectAlign() const {
assert((isIndirect() || isIndirectAliased()) && "Invalid kind!");
- return CharUnits::fromQuantity(IndirectAlign);
+ return CharUnits::fromQuantity(IndirectAttr.Align);
}
void setIndirectAlign(CharUnits IA) {
assert((isIndirect() || isIndirectAliased()) && "Invalid kind!");
- IndirectAlign = IA.getQuantity();
+ IndirectAttr.Align = IA.getQuantity();
}
bool getIndirectByVal() const {
@@ -387,12 +406,12 @@ public:
unsigned getIndirectAddrSpace() const {
assert(isIndirectAliased() && "Invalid kind!");
- return IndirectAddrSpace;
+ return IndirectAttr.AddrSpace;
}
void setIndirectAddrSpace(unsigned AddrSpace) {
assert(isIndirectAliased() && "Invalid kind!");
- IndirectAddrSpace = AddrSpace;
+ IndirectAttr.AddrSpace = AddrSpace;
}
bool getIndirectRealign() const {
diff --git a/clang/include/clang/CodeGen/CodeGenAction.h b/clang/include/clang/CodeGen/CodeGenAction.h
index 1db904ea974c..b5721344046d 100644
--- a/clang/include/clang/CodeGen/CodeGenAction.h
+++ b/clang/include/clang/CodeGen/CodeGenAction.h
@@ -19,6 +19,7 @@ namespace llvm {
namespace clang {
class BackendConsumer;
+class CodeGenerator;
class CodeGenAction : public ASTFrontendAction {
private:
@@ -77,6 +78,8 @@ public:
/// Take the LLVM context used by this action.
llvm::LLVMContext *takeLLVMContext();
+ CodeGenerator *getCodeGenerator() const;
+
BackendConsumer *BEConsumer;
};
diff --git a/clang/include/clang/CodeGen/SwiftCallingConv.h b/clang/include/clang/CodeGen/SwiftCallingConv.h
index 2c5e9a6de7e2..b1a638a58a09 100644
--- a/clang/include/clang/CodeGen/SwiftCallingConv.h
+++ b/clang/include/clang/CodeGen/SwiftCallingConv.h
@@ -6,7 +6,8 @@
//
//===----------------------------------------------------------------------===//
//
-// Defines constants and types related to Swift ABI lowering.
+// Defines constants and types related to Swift ABI lowering. The same ABI
+// lowering applies to both sync and async functions.
//
//===----------------------------------------------------------------------===//
diff --git a/clang/include/clang/CrossTU/CrossTranslationUnit.h b/clang/include/clang/CrossTU/CrossTranslationUnit.h
index 027c6f16430b..d9f9c51fccd9 100644
--- a/clang/include/clang/CrossTU/CrossTranslationUnit.h
+++ b/clang/include/clang/CrossTU/CrossTranslationUnit.h
@@ -15,6 +15,7 @@
#define LLVM_CLANG_CROSSTU_CROSSTRANSLATIONUNIT_H
#include "clang/AST/ASTImporterSharedState.h"
+#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
@@ -37,6 +38,7 @@ class TranslationUnitDecl;
namespace cross_tu {
enum class index_error_code {
+ success = 0,
unspecified = 1,
missing_index_file,
invalid_index_format,
@@ -182,21 +184,18 @@ public:
/// Emit diagnostics for the user for potential configuration errors.
void emitCrossTUDiagnostics(const IndexError &IE);
- /// Determine the original source location in the original TU for an
- /// imported source location.
+ /// Returns the MacroExpansionContext for the imported TU to which the given
+ /// source-location corresponds.
/// \p ToLoc Source location in the imported-to AST.
- /// \return Source location in the imported-from AST and the corresponding
- /// ASTUnit object (the AST was loaded from a file using an internal ASTUnit
- /// object that is returned here).
- /// If any error happens (ToLoc is a non-imported source location) empty is
- /// returned.
- llvm::Optional<std::pair<SourceLocation /*FromLoc*/, ASTUnit *>>
- getImportedFromSourceLocation(const clang::SourceLocation &ToLoc) const;
+ /// \note If any error happens such as \p ToLoc is a non-imported
+ /// source-location, empty is returned.
+ /// \note Macro expansion tracking for imported TUs is not implemented yet.
+ /// It returns empty unconditionally.
+ llvm::Optional<clang::MacroExpansionContext>
+ getMacroExpansionContextForSourceLocation(
+ const clang::SourceLocation &ToLoc) const;
private:
- using ImportedFileIDMap =
- llvm::DenseMap<FileID, std::pair<FileID, ASTUnit *>>;
-
void lazyInitImporterSharedSt(TranslationUnitDecl *ToTU);
ASTImporter &getOrCreateASTImporter(ASTUnit *Unit);
template <typename T>
@@ -217,14 +216,6 @@ private:
ASTContext &Context;
std::shared_ptr<ASTImporterSharedState> ImporterSharedSt;
- /// Map of imported FileID's (in "To" context) to FileID in "From" context
- /// and the ASTUnit for the From context.
- /// This map is used by getImportedFromSourceLocation to lookup a FileID and
- /// its Preprocessor when knowing only the FileID in the 'To' context. The
- /// FileID could be imported by any of multiple 'From' ASTImporter objects.
- /// we do not want to loop over all ASTImporter's to find the one that
- /// imported the FileID.
- ImportedFileIDMap ImportedFileIDs;
using LoadResultTy = llvm::Expected<std::unique_ptr<ASTUnit>>;
@@ -263,6 +254,7 @@ private:
/// In case of on-demand parsing, the invocations for parsing the source
/// files is stored.
llvm::Optional<InvocationListTy> InvocationList;
+ index_error_code PreviousParsingResult = index_error_code::success;
};
/// Maintain number of AST loads and check for reaching the load limit.
diff --git a/clang/include/clang/Driver/Action.h b/clang/include/clang/Driver/Action.h
index 27c95c6f89d4..ba84d886a6cf 100644
--- a/clang/include/clang/Driver/Action.h
+++ b/clang/include/clang/Driver/Action.h
@@ -214,14 +214,18 @@ public:
class InputAction : public Action {
const llvm::opt::Arg &Input;
-
+ std::string Id;
virtual void anchor();
public:
- InputAction(const llvm::opt::Arg &Input, types::ID Type);
+ InputAction(const llvm::opt::Arg &Input, types::ID Type,
+ StringRef Id = StringRef());
const llvm::opt::Arg &getInputArg() const { return Input; }
+ void setId(StringRef _Id) { Id = _Id.str(); }
+ StringRef getId() const { return Id; }
+
static bool classof(const Action *A) {
return A->getKind() == InputClass;
}
diff --git a/clang/include/clang/Driver/ClangOptionDocs.td b/clang/include/clang/Driver/ClangOptionDocs.td
index 55136421614d..3f914afea735 100644
--- a/clang/include/clang/Driver/ClangOptionDocs.td
+++ b/clang/include/clang/Driver/ClangOptionDocs.td
@@ -29,7 +29,7 @@ GCC-compatible ``clang`` and ``clang++`` drivers.
string Program = "clang";
list<string> ExcludedFlags = ["HelpHidden", "NoDriverOption",
- "CLOption", "Unsupported", "Ignored"];
+ "CLOption", "Unsupported", "Ignored", "FlangOnlyOption"];
}
include "Options.td"
diff --git a/clang/include/clang/Driver/DarwinSDKInfo.h b/clang/include/clang/Driver/DarwinSDKInfo.h
deleted file mode 100644
index f7075a8d3b7f..000000000000
--- a/clang/include/clang/Driver/DarwinSDKInfo.h
+++ /dev/null
@@ -1,41 +0,0 @@
-//===--- DarwinSDKInfo.h - SDK Information parser for darwin ----*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_DRIVER_DARWIN_SDK_INFO_H
-#define LLVM_CLANG_DRIVER_DARWIN_SDK_INFO_H
-
-#include "clang/Basic/LLVM.h"
-#include "llvm/Support/Error.h"
-#include "llvm/Support/VersionTuple.h"
-#include "llvm/Support/VirtualFileSystem.h"
-
-namespace clang {
-namespace driver {
-
-/// The information about the darwin SDK that was used during this compilation.
-class DarwinSDKInfo {
-public:
- DarwinSDKInfo(llvm::VersionTuple Version) : Version(Version) {}
-
- const llvm::VersionTuple &getVersion() const { return Version; }
-
-private:
- llvm::VersionTuple Version;
-};
-
-/// Parse the SDK information from the SDKSettings.json file.
-///
-/// \returns an error if the SDKSettings.json file is invalid, None if the
-/// SDK has no SDKSettings.json, or a valid \c DarwinSDKInfo otherwise.
-Expected<Optional<DarwinSDKInfo>> parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS,
- StringRef SDKRootPath);
-
-} // end namespace driver
-} // end namespace clang
-
-#endif // LLVM_CLANG_DRIVER_DARWIN_SDK_INFO_H
diff --git a/clang/include/clang/Driver/Distro.h b/clang/include/clang/Driver/Distro.h
index 0e98d00764c1..0d2a0939639e 100644
--- a/clang/include/clang/Driver/Distro.h
+++ b/clang/include/clang/Driver/Distro.h
@@ -71,6 +71,7 @@ public:
UbuntuFocal,
UbuntuGroovy,
UbuntuHirsute,
+ UbuntuImpish,
UnknownDistro
};
@@ -122,7 +123,7 @@ public:
}
bool IsUbuntu() const {
- return DistroVal >= UbuntuHardy && DistroVal <= UbuntuHirsute;
+ return DistroVal >= UbuntuHardy && DistroVal <= UbuntuImpish;
}
bool IsAlpineLinux() const { return DistroVal == AlpineLinux; }
diff --git a/clang/include/clang/Driver/Driver.h b/clang/include/clang/Driver/Driver.h
index 74a9cf3dab81..da7e8386a151 100644
--- a/clang/include/clang/Driver/Driver.h
+++ b/clang/include/clang/Driver/Driver.h
@@ -84,6 +84,9 @@ class Driver {
/// LTO mode selected via -f(no-)?lto(=.*)? options.
LTOKind LTOMode;
+ /// LTO mode selected via -f(no-offload-)?lto(=.*)? options.
+ LTOKind OffloadLTOMode;
+
public:
enum OpenMPRuntimeKind {
/// An unknown OpenMP runtime. We can't generate effective OpenMP code
@@ -156,14 +159,17 @@ public:
/// Information about the host which can be overridden by the user.
std::string HostBits, HostMachine, HostSystem, HostRelease;
+ /// The file to log CC_PRINT_PROC_STAT_FILE output to, if enabled.
+ std::string CCPrintStatReportFilename;
+
/// The file to log CC_PRINT_OPTIONS output to, if enabled.
- const char *CCPrintOptionsFilename;
+ std::string CCPrintOptionsFilename;
/// The file to log CC_PRINT_HEADERS output to, if enabled.
- const char *CCPrintHeadersFilename;
+ std::string CCPrintHeadersFilename;
/// The file to log CC_LOG_DIAGNOSTICS output to, if enabled.
- const char *CCLogDiagnosticsFilename;
+ std::string CCLogDiagnosticsFilename;
/// A list of inputs and their types for the given arguments.
typedef SmallVector<std::pair<types::ID, const llvm::opt::Arg *>, 16>
@@ -204,6 +210,10 @@ public:
/// Whether the driver is generating diagnostics for debugging purposes.
unsigned CCGenDiagnostics : 1;
+ /// Set CC_PRINT_PROC_STAT mode, which causes the driver to dump
+ /// performance report to CC_PRINT_PROC_STAT_FILE or to stdout.
+ unsigned CCPrintProcessStats : 1;
+
/// Pointer to the ExecuteCC1Tool function, if available.
/// When the clangDriver lib is used through clang.exe, this provides a
/// shortcut for executing the -cc1 command-line directly, in the same
@@ -370,12 +380,6 @@ public:
/// to determine if an error occurred.
Compilation *BuildCompilation(ArrayRef<const char *> Args);
- /// @name Driver Steps
- /// @{
-
- /// ParseDriverMode - Look for and handle the driver mode option in Args.
- void ParseDriverMode(StringRef ProgramName, ArrayRef<const char *> Args);
-
/// ParseArgStrings - Parse the given list of strings into an
/// ArgList.
llvm::opt::InputArgList ParseArgStrings(ArrayRef<const char *> Args,
@@ -552,10 +556,14 @@ public:
bool ShouldEmitStaticLibrary(const llvm::opt::ArgList &Args) const;
/// Returns true if we are performing any kind of LTO.
- bool isUsingLTO() const { return LTOMode != LTOK_None; }
+ bool isUsingLTO(bool IsOffload = false) const {
+ return getLTOMode(IsOffload) != LTOK_None;
+ }
/// Get the specific kind of LTO being performed.
- LTOKind getLTOMode() const { return LTOMode; }
+ LTOKind getLTOMode(bool IsOffload = false) const {
+ return IsOffload ? OffloadLTOMode : LTOMode;
+ }
private:
@@ -570,9 +578,9 @@ private:
/// \returns true, if error occurred while reading.
bool readConfigFile(StringRef FileName);
- /// Set the driver mode (cl, gcc, etc) from an option string of the form
- /// --driver-mode=<mode>.
- void setDriverModeFromOption(StringRef Opt);
+ /// Set the driver mode (cl, gcc, etc) from the value of the `--driver-mode`
+ /// option.
+ void setDriverMode(StringRef DriverModeValue);
/// Parse the \p Args list for LTO options and record the type of LTO
/// compilation based on which -f(no-)?lto(=.*)? option occurs last.
@@ -632,6 +640,16 @@ bool isOptimizationLevelFast(const llvm::opt::ArgList &Args);
/// \return True if the argument combination will end up generating remarks.
bool willEmitRemarks(const llvm::opt::ArgList &Args);
+/// Returns the driver mode option's value, i.e. `X` in `--driver-mode=X`. If \p
+/// Args doesn't mention one explicitly, tries to deduce from `ProgName`.
+/// Returns empty on failure.
+/// Common values are "gcc", "g++", "cpp", "cl" and "flang". Returned value need
+/// not be one of these.
+llvm::StringRef getDriverMode(StringRef ProgName, ArrayRef<const char *> Args);
+
+/// Checks whether the value produced by getDriverMode is for CL mode.
+bool IsClangCL(StringRef DriverMode);
+
} // end namespace driver
} // end namespace clang
diff --git a/clang/lib/Driver/InputInfo.h b/clang/include/clang/Driver/InputInfo.h
index a6b6f7f344bc..a1ecdb623331 100644
--- a/clang/lib/Driver/InputInfo.h
+++ b/clang/include/clang/Driver/InputInfo.h
@@ -6,8 +6,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
-#define LLVM_CLANG_LIB_DRIVER_INPUTINFO_H
+#ifndef LLVM_CLANG_DRIVER_INPUTINFO_H
+#define LLVM_CLANG_DRIVER_INPUTINFO_H
#include "clang/Driver/Action.h"
#include "clang/Driver/Types.h"
diff --git a/clang/include/clang/Driver/Job.h b/clang/include/clang/Driver/Job.h
index 199387cddd5c..8b287638a271 100644
--- a/clang/include/clang/Driver/Job.h
+++ b/clang/include/clang/Driver/Job.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_DRIVER_JOB_H
#include "clang/Basic/LLVM.h"
+#include "clang/Driver/InputInfo.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SmallVector.h"
@@ -119,8 +120,8 @@ class Command {
/// argument, which will be the executable).
llvm::opt::ArgStringList Arguments;
- /// The list of program arguments which are inputs.
- llvm::opt::ArgStringList InputFilenames;
+ /// The list of program inputs.
+ std::vector<InputInfo> InputInfoList;
/// The list of program arguments which are outputs. May be empty.
std::vector<std::string> OutputFilenames;
@@ -207,9 +208,7 @@ public:
const llvm::opt::ArgStringList &getArguments() const { return Arguments; }
- const llvm::opt::ArgStringList &getInputFilenames() const {
- return InputFilenames;
- }
+ const std::vector<InputInfo> &getInputInfos() const { return InputInfoList; }
const std::vector<std::string> &getOutputFilenames() const {
return OutputFilenames;
@@ -241,26 +240,6 @@ public:
void setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) override;
};
-/// Like Command, but with a fallback which is executed in case
-/// the primary command crashes.
-class FallbackCommand : public Command {
-public:
- FallbackCommand(const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport, const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs, ArrayRef<InputInfo> Outputs,
- std::unique_ptr<Command> Fallback_);
-
- void Print(llvm::raw_ostream &OS, const char *Terminator, bool Quote,
- CrashReportInfo *CrashInfo = nullptr) const override;
-
- int Execute(ArrayRef<Optional<StringRef>> Redirects, std::string *ErrMsg,
- bool *ExecutionFailed) const override;
-
-private:
- std::unique_ptr<Command> Fallback;
-};
-
/// Like Command, but always pretends that the wrapped command succeeded.
class ForceSuccessCommand : public Command {
public:
diff --git a/clang/include/clang/Driver/Options.h b/clang/include/clang/Driver/Options.h
index b641f64c3116..056660192ac5 100644
--- a/clang/include/clang/Driver/Options.h
+++ b/clang/include/clang/Driver/Options.h
@@ -9,8 +9,6 @@
#ifndef LLVM_CLANG_DRIVER_OPTIONS_H
#define LLVM_CLANG_DRIVER_OPTIONS_H
-#include <memory>
-
namespace llvm {
namespace opt {
class OptTable;
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 42c5319041d0..5a9fd078390e 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -160,6 +160,8 @@ def m_hexagon_Features_Group : OptionGroup<"<hexagon features group>">,
// These are explicitly handled.
def m_hexagon_Features_HVX_Group : OptionGroup<"<hexagon features group>">,
Group<m_Group>, DocName<"Hexagon">;
+def m_m68k_Features_Group: OptionGroup<"<m68k features group>">,
+ Group<m_Group>, DocName<"M68k">;
def m_mips_Features_Group : OptionGroup<"<mips features group>">,
Group<m_Group>, DocName<"MIPS">;
def m_ppc_Features_Group : OptionGroup<"<ppc features group>">,
@@ -246,53 +248,58 @@ class DiagnosticOpts<string base>
class LangOpts<string base>
: KeyPathAndMacro<"LangOpts->", base, "LANG_"> {}
class TargetOpts<string base>
- : KeyPathAndMacro<"TargetOpts->", base> {}
+ : KeyPathAndMacro<"TargetOpts->", base, "TARGET_"> {}
class FrontendOpts<string base>
- : KeyPathAndMacro<"FrontendOpts.", base> {}
+ : KeyPathAndMacro<"FrontendOpts.", base, "FRONTEND_"> {}
class PreprocessorOutputOpts<string base>
- : KeyPathAndMacro<"PreprocessorOutputOpts.", base> {}
+ : KeyPathAndMacro<"PreprocessorOutputOpts.", base, "PREPROCESSOR_OUTPUT_"> {}
class DependencyOutputOpts<string base>
- : KeyPathAndMacro<"DependencyOutputOpts.", base> {}
+ : KeyPathAndMacro<"DependencyOutputOpts.", base, "DEPENDENCY_OUTPUT_"> {}
class CodeGenOpts<string base>
: KeyPathAndMacro<"CodeGenOpts.", base, "CODEGEN_"> {}
class HeaderSearchOpts<string base>
- : KeyPathAndMacro<"HeaderSearchOpts->", base> {}
+ : KeyPathAndMacro<"HeaderSearchOpts->", base, "HEADER_SEARCH_"> {}
class PreprocessorOpts<string base>
- : KeyPathAndMacro<"PreprocessorOpts->", base> {}
+ : KeyPathAndMacro<"PreprocessorOpts->", base, "PREPROCESSOR_"> {}
class FileSystemOpts<string base>
- : KeyPathAndMacro<"FileSystemOpts.", base> {}
+ : KeyPathAndMacro<"FileSystemOpts.", base, "FILE_SYSTEM_"> {}
class AnalyzerOpts<string base>
- : KeyPathAndMacro<"AnalyzerOpts->", base> {}
+ : KeyPathAndMacro<"AnalyzerOpts->", base, "ANALYZER_"> {}
class MigratorOpts<string base>
- : KeyPathAndMacro<"MigratorOpts.", base> {}
+ : KeyPathAndMacro<"MigratorOpts.", base, "MIGRATOR_"> {}
// A boolean option which is opt-in in CC1. The positive option exists in CC1 and
-// Args.hasArg(OPT_ffoo) is used to check that the flag is enabled.
+// Args.hasArg(OPT_ffoo) can be used to check that the flag is enabled.
// This is useful if the option is usually disabled.
+// Use this only when the option cannot be declared via BoolFOption.
multiclass OptInFFlag<string name, string pos_prefix, string neg_prefix="",
- string help="", list<OptionFlag> flags=[],
- KeyPathAndMacro kpm = EmptyKPM,
- list<string> enablers = []> {
- def f#NAME : Flag<["-"], "f"#name>, Flags<!listconcat([CC1Option], flags)>,
- Group<f_Group>, HelpText<!strconcat(pos_prefix, help)>,
- MarshallingInfoFlag<kpm, "false">,
- ImpliedByAnyOf<enablers, "true">;
+ string help="", list<OptionFlag> flags=[]> {
+ def f#NAME : Flag<["-"], "f"#name>, Flags<[CC1Option] # flags>,
+ Group<f_Group>, HelpText<pos_prefix # help>;
def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<flags>,
- Group<f_Group>, HelpText<!strconcat(neg_prefix, help)>;
+ Group<f_Group>, HelpText<neg_prefix # help>;
}
// A boolean option which is opt-out in CC1. The negative option exists in CC1 and
-// Args.hasArg(OPT_fno_foo) is used to check that the flag is disabled.
+// Args.hasArg(OPT_fno_foo) can be used to check that the flag is disabled.
+// Use this only when the option cannot be declared via BoolFOption.
multiclass OptOutFFlag<string name, string pos_prefix, string neg_prefix,
- string help="", list<OptionFlag> flags=[],
- KeyPathAndMacro kpm = EmptyKPM,
- list<string> disablers = []> {
+ string help="", list<OptionFlag> flags=[]> {
def f#NAME : Flag<["-"], "f"#name>, Flags<flags>,
- Group<f_Group>, HelpText<!strconcat(pos_prefix, help)>;
- def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<!listconcat([CC1Option], flags)>,
- Group<f_Group>, HelpText<!strconcat(neg_prefix, help)>,
- MarshallingInfoFlag<kpm, "false">,
- ImpliedByAnyOf<disablers, "true">;
+ Group<f_Group>, HelpText<pos_prefix # help>;
+ def fno_#NAME : Flag<["-"], "fno-"#name>, Flags<[CC1Option] # flags>,
+ Group<f_Group>, HelpText<neg_prefix # help>;
+}
+
+// Creates a positive and negative flags where both of them are prefixed with
+// "m", have help text specified for positive and negative option, and a Group
+// optionally specified by the opt_group argument, otherwise Group<m_Group>.
+multiclass SimpleMFlag<string name, string pos_prefix, string neg_prefix,
+ string help, OptionGroup opt_group = m_Group> {
+ def m#NAME : Flag<["-"], "m"#name>, Group<opt_group>,
+ HelpText<pos_prefix # help>;
+ def mno_#NAME : Flag<["-"], "mno-"#name>, Group<opt_group>,
+ HelpText<neg_prefix # help>;
}
//===----------------------------------------------------------------------===//
@@ -342,8 +349,8 @@ class BothFlags<list<OptionFlag> option_flags, string help = ""> {
class ApplySuffix<FlagDef flag, BothFlags suffix> {
FlagDef Result
= FlagDef<flag.Polarity, flag.Value,
- !listconcat(flag.OptionFlags, suffix.OptionFlags),
- !strconcat(flag.Help, suffix.Help), flag.ImpliedBy>;
+ flag.OptionFlags # suffix.OptionFlags,
+ flag.Help # suffix.Help, flag.ImpliedBy>;
}
// Definition of the command line flag with positive spelling, e.g. "-ffoo".
@@ -361,16 +368,16 @@ class FlagDefExpanded<FlagDef flag, string prefix, string name, string spelling>
: FlagDef<flag.Polarity, flag.Value, flag.OptionFlags, flag.Help,
flag.ImpliedBy> {
// Name of the TableGen record.
- string RecordName = prefix#!cond(flag.Polarity : "", true : "no_")#name;
+ string RecordName = prefix # !if(flag.Polarity, "", "no_") # name;
// Spelling of the flag.
- string Spelling = prefix#!cond(flag.Polarity : "", true : "no-")#spelling;
+ string Spelling = prefix # !if(flag.Polarity, "", "no-") # spelling;
// Can the flag be implied by another flag?
bit CanBeImplied = !not(!empty(flag.ImpliedBy));
// C++ code that will be assigned to the keypath when the flag is present.
- code ValueAsCode = !cond(flag.Value : "true", true: "false");
+ code ValueAsCode = !if(flag.Value, "true", "false");
}
// TableGen record for a single marshalled flag.
@@ -387,6 +394,8 @@ class MarshalledFlagRec<FlagDefExpanded flag, FlagDefExpanded other,
// key path via the marshalling infrastructure.
// Names of the records consist of the specified prefix, "no_" for the negative
// flag, and NAME.
+// Used for -cc1 frontend options. Driver-only options do not map to
+// CompilerInvocation.
multiclass BoolOption<string prefix = "", string spelling_base,
KeyPathAndMacro kpm, Default default,
FlagDef flag1_base, FlagDef flag2_base,
@@ -397,11 +406,19 @@ multiclass BoolOption<string prefix = "", string spelling_base,
defvar flag2 = FlagDefExpanded<ApplySuffix<flag2_base, suffix>.Result, prefix,
NAME, spelling_base>;
- // TODO: Assert that the flags have different polarity.
- // TODO: Assert that the flags have different value.
- // TODO: Assert that only one of the flags can be implied.
+ // The flags must have different polarity, different values, and only
+ // one can be implied.
+ assert !xor(flag1.Polarity, flag2.Polarity),
+ "the flags must have different polarity: flag1: " #
+ flag1.Polarity # ", flag2: " # flag2.Polarity;
+ assert !ne(flag1.Value, flag2.Value),
+ "the flags must have different values: flag1: " #
+ flag1.Value # ", flag2: " # flag2.Value;
+ assert !not(!and(flag1.CanBeImplied, flag2.CanBeImplied)),
+ "only one of the flags can be implied: flag1: " #
+ flag1.CanBeImplied # ", flag2: " # flag2.CanBeImplied;
- defvar implied = !cond(flag1.CanBeImplied: flag1, true: flag2);
+ defvar implied = !if(flag1.CanBeImplied, flag1, flag2);
def flag1.RecordName : MarshalledFlagRec<flag1, flag2, implied, kpm, default>;
def flag2.RecordName : MarshalledFlagRec<flag2, flag1, implied, kpm, default>;
@@ -409,6 +426,8 @@ multiclass BoolOption<string prefix = "", string spelling_base,
/// Creates a BoolOption where both of the flags are prefixed with "f", are in
/// the Group<f_Group>.
+/// Used for -cc1 frontend options. Driver-only options do not map to
+/// CompilerInvocation.
multiclass BoolFOption<string flag_base, KeyPathAndMacro kpm,
Default default, FlagDef flag1, FlagDef flag2,
BothFlags both = BothFlags<[], "">> {
@@ -418,6 +437,8 @@ multiclass BoolFOption<string flag_base, KeyPathAndMacro kpm,
// Creates a BoolOption where both of the flags are prefixed with "g" and have
// the Group<g_Group>.
+// Used for -cc1 frontend options. Driver-only options do not map to
+// CompilerInvocation.
multiclass BoolGOption<string flag_base, KeyPathAndMacro kpm,
Default default, FlagDef flag1, FlagDef flag2,
BothFlags both = BothFlags<[], "">> {
@@ -427,20 +448,26 @@ multiclass BoolGOption<string flag_base, KeyPathAndMacro kpm,
// FIXME: Diagnose if target does not support protected visibility.
class MarshallingInfoVisibility<KeyPathAndMacro kpm, code default>
- : MarshallingInfoString<kpm, default>,
+ : MarshallingInfoEnum<kpm, default>,
Values<"default,hidden,internal,protected">,
NormalizedValues<["DefaultVisibility", "HiddenVisibility",
- "HiddenVisibility", "ProtectedVisibility"]>,
- AutoNormalizeEnum {}
+ "HiddenVisibility", "ProtectedVisibility"]> {}
// Key paths that are constant during parsing of options with the same key path prefix.
defvar cplusplus = LangOpts<"CPlusPlus">;
+defvar cpp11 = LangOpts<"CPlusPlus11">;
+defvar cpp17 = LangOpts<"CPlusPlus17">;
+defvar cpp20 = LangOpts<"CPlusPlus20">;
defvar c99 = LangOpts<"C99">;
+defvar c2x = LangOpts<"C2x">;
defvar lang_std = LangOpts<"LangStd">;
defvar open_cl = LangOpts<"OpenCL">;
+defvar cuda = LangOpts<"CUDA">;
defvar render_script = LangOpts<"RenderScript">;
defvar hip = LangOpts<"HIP">;
defvar gnu_mode = LangOpts<"GNUMode">;
+defvar asm_preprocessor = LangOpts<"AsmPreprocessor">;
+defvar cpp_modules = LangOpts<"CPlusPlusModules">;
defvar std = !strconcat("LangStandard::getLangStandardForKind(", lang_std.KeyPath, ")");
@@ -513,6 +540,11 @@ def gen_reproducer: Flag<["-"], "gen-reproducer">, InternalDebugOpt,
def gen_cdb_fragment_path: Separate<["-"], "gen-cdb-fragment-path">, InternalDebugOpt,
HelpText<"Emit a compilation database fragment to the specified directory">;
+def round_trip_args : Flag<["-"], "round-trip-args">, Flags<[CC1Option, NoDriverOption]>,
+ HelpText<"Enable command line arguments round-trip.">;
+def no_round_trip_args : Flag<["-"], "no-round-trip-args">, Flags<[CC1Option, NoDriverOption]>,
+ HelpText<"Disable command line arguments round-trip.">;
+
def _migrate : Flag<["--"], "migrate">, Flags<[NoXarchOption]>,
HelpText<"Run the migrator">;
def ccc_objcmt_migrate : Separate<["-"], "ccc-objcmt-migrate">,
@@ -583,8 +615,14 @@ def _HASH_HASH_HASH : Flag<["-"], "###">, Flags<[NoXarchOption, CoreOption, Flan
def _DASH_DASH : Option<["--"], "", KIND_REMAINING_ARGS>,
Flags<[NoXarchOption, CoreOption]>;
def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>, Group<gfortran_Group>;
-def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<dir>">,
- HelpText<"Add <dir> to search path for binaries and object files used implicitly">;
+def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<prefix>">,
+ HelpText<"Search $prefix/$triple-$file and $prefix$file for executables, libraries, "
+ "includes, and data files used by the compiler. $prefix may or may not be a directory">;
+def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[NoXarchOption]>,
+ HelpText<"Search for GCC installation in the specified directory on targets which commonly use GCC. "
+ "The directory usually contains 'lib{,32,64}/gcc{,-cross}/$triple' and 'include'. If specified, "
+ "sysroot is skipped for GCC detection. Note: executables (e.g. ld) used by the compiler are not "
+ "overridden by the selected GCC installation">;
def CC : Flag<["-"], "CC">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Include comments from within macros in preprocessed output">,
MarshallingInfoFlag<PreprocessorOutputOpts<"ShowMacroComments">>;
@@ -605,6 +643,12 @@ def G_EQ : Joined<["-"], "G=">, Flags<[NoXarchOption]>, Group<m_Group>, Alias<G>
def H : Flag<["-"], "H">, Flags<[CC1Option]>, Group<Preprocessor_Group>,
HelpText<"Show header includes and nesting depth">,
MarshallingInfoFlag<DependencyOutputOpts<"ShowHeaderIncludes">>;
+def fshow_skipped_includes : Flag<["-"], "fshow-skipped-includes">,
+ Flags<[CC1Option]>, HelpText<"Show skipped includes in -H output.">,
+ DocBrief<[{#include files may be "skipped" due to include guard optimization
+ or #pragma once. This flag makes -H show also such includes.}]>,
+ MarshallingInfoFlag<DependencyOutputOpts<"ShowSkippedHeaderIncludes">>;
+
def I_ : Flag<["-"], "I-">, Group<I_Group>,
HelpText<"Restrict all prior -I flags to double-quoted inclusion and "
"remove current directory from include path">;
@@ -664,8 +708,10 @@ def Qy : Flag<["-"], "Qy">, Flags<[CC1Option]>,
HelpText<"Emit metadata containing compiler name and version">;
def Qn : Flag<["-"], "Qn">, Flags<[CC1Option]>,
HelpText<"Do not emit metadata containing compiler name and version">;
-def : Flag<["-"], "fident">, Group<f_Group>, Alias<Qy>, Flags<[CC1Option]>;
-def : Flag<["-"], "fno-ident">, Group<f_Group>, Alias<Qn>, Flags<[CC1Option]>;
+def : Flag<["-"], "fident">, Group<f_Group>, Alias<Qy>,
+ Flags<[CoreOption, CC1Option]>;
+def : Flag<["-"], "fno-ident">, Group<f_Group>, Alias<Qn>,
+ Flags<[CoreOption, CC1Option]>;
def Qunused_arguments : Flag<["-"], "Qunused-arguments">, Flags<[NoXarchOption, CoreOption]>,
HelpText<"Don't emit warning for unused driver arguments">;
def Q : Flag<["-"], "Q">, IgnoredGCCCompat;
@@ -720,7 +766,7 @@ def Wundef_prefix_EQ : CommaJoined<["-"], "Wundef-prefix=">, Group<W_value_Group
MarshallingInfoStringVector<DiagnosticOpts<"UndefPrefixes">>;
def Wwrite_strings : Flag<["-"], "Wwrite-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
def Wno_write_strings : Flag<["-"], "Wno-write-strings">, Group<W_Group>, Flags<[CC1Option, HelpHidden]>;
-def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption]>,
+def W_Joined : Joined<["-"], "W">, Group<W_Group>, Flags<[CC1Option, CoreOption, FC1Option, FlangOption]>,
MetaVarName<"<warning>">, HelpText<"Enable the specified warning">;
def Xanalyzer : Separate<["-"], "Xanalyzer">,
HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">,
@@ -802,10 +848,12 @@ def cl_denorms_are_zero : Flag<["-"], "cl-denorms-are-zero">, Group<opencl_Group
HelpText<"OpenCL only. Allow denormals to be flushed to zero.">;
def cl_fp32_correctly_rounded_divide_sqrt : Flag<["-"], "cl-fp32-correctly-rounded-divide-sqrt">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Specify that single precision floating-point divide and sqrt used in the program source are correctly rounded.">,
- MarshallingInfoFlag<CodeGenOpts<"CorrectlyRoundedDivSqrt">>;
+ MarshallingInfoFlag<CodeGenOpts<"OpenCLCorrectlyRoundedDivSqrt">>;
def cl_uniform_work_group_size : Flag<["-"], "cl-uniform-work-group-size">, Group<opencl_Group>, Flags<[CC1Option]>,
HelpText<"OpenCL only. Defines that the global work-size be a multiple of the work-group size specified to clEnqueueNDRangeKernel">,
MarshallingInfoFlag<CodeGenOpts<"UniformWGSize">>;
+def cl_no_stdinc : Flag<["-"], "cl-no-stdinc">, Group<opencl_Group>,
+ HelpText<"OpenCL only. Disables all standard includes containing non-native compiler types and functions.">;
def client__name : JoinedOrSeparate<["-"], "client_name">;
def combine : Flag<["-", "--"], "combine">, Flags<[NoXarchOption, Unsupported]>;
def compatibility__version : JoinedOrSeparate<["-"], "compatibility_version">;
@@ -869,20 +917,29 @@ def cuda_path_ignore_env : Flag<["--"], "cuda-path-ignore-env">, Group<i_Group>,
HelpText<"Ignore environment variables to detect CUDA installation">;
def ptxas_path_EQ : Joined<["--"], "ptxas-path=">, Group<i_Group>,
HelpText<"Path to ptxas (used for compiling CUDA code)">;
+def fgpu_flush_denormals_to_zero : Flag<["-"], "fgpu-flush-denormals-to-zero">,
+ HelpText<"Flush denormal floating point values to zero in CUDA/HIP device mode.">;
+def fno_gpu_flush_denormals_to_zero : Flag<["-"], "fno-gpu-flush-denormals-to-zero">;
def fcuda_flush_denormals_to_zero : Flag<["-"], "fcuda-flush-denormals-to-zero">,
- HelpText<"Flush denormal floating point values to zero in CUDA device mode.">;
-def fno_cuda_flush_denormals_to_zero : Flag<["-"], "fno-cuda-flush-denormals-to-zero">;
+ Alias<fgpu_flush_denormals_to_zero>;
+def fno_cuda_flush_denormals_to_zero : Flag<["-"], "fno-cuda-flush-denormals-to-zero">,
+ Alias<fno_gpu_flush_denormals_to_zero>;
defm gpu_rdc : BoolFOption<"gpu-rdc",
LangOpts<"GPURelocatableDeviceCode">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Generate relocatable device code, also known as separate compilation mode">,
NegFlag<SetFalse>>;
def : Flag<["-"], "fcuda-rdc">, Alias<fgpu_rdc>;
def : Flag<["-"], "fno-cuda-rdc">, Alias<fno_gpu_rdc>;
-defm cuda_short_ptr : OptInFFlag<"cuda-short-ptr",
- "Use 32-bit pointers for accessing const/local/shared address spaces", "", "",
- [], TargetOpts<"NVPTXUseShortPointers">>;
+defm cuda_short_ptr : BoolFOption<"cuda-short-ptr",
+ TargetOpts<"NVPTXUseShortPointers">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Use 32-bit pointers for accessing const/local/shared address spaces">,
+ NegFlag<SetFalse>>;
def rocm_path_EQ : Joined<["--"], "rocm-path=">, Group<i_Group>,
HelpText<"ROCm installation path, used for finding and automatically linking required bitcode libraries.">;
+def hip_path_EQ : Joined<["--"], "hip-path=">, Group<i_Group>,
+ HelpText<"HIP runtime installation path, used for finding HIP version and adding HIP include path.">;
+def amdgpu_arch_tool_EQ : Joined<["--"], "amdgpu-arch-tool=">, Group<i_Group>,
+ HelpText<"Tool used for detecting AMD GPU arch in the system.">;
def rocm_device_lib_path_EQ : Joined<["--"], "rocm-device-lib-path=">, Group<Link_Group>,
HelpText<"ROCm device library path. Alternative to rocm-path.">;
def : Joined<["--"], "hip-device-lib-path=">, Alias<rocm_device_lib_path_EQ>;
@@ -896,10 +953,17 @@ defm hip_new_launch_api : BoolFOption<"hip-new-launch-api",
LangOpts<"HIPUseNewLaunchAPI">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
BothFlags<[], " new kernel launching API for HIP">>;
+defm hip_fp32_correctly_rounded_divide_sqrt : BoolFOption<"hip-fp32-correctly-rounded-divide-sqrt",
+ CodeGenOpts<"HIPCorrectlyRoundedDivSqrt">, DefaultTrue,
+ PosFlag<SetTrue, [], "Specify">,
+ NegFlag<SetFalse, [CC1Option], "Don't specify">,
+ BothFlags<[], " that single precision floating-point divide and sqrt used in "
+ "the program source are correctly rounded (HIP device compilation only)">>,
+ ShouldParseIf<hip.KeyPath>;
defm gpu_allow_device_init : BoolFOption<"gpu-allow-device-init",
LangOpts<"GPUAllowDeviceInit">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Allow">, NegFlag<SetFalse, [], "Don't allow">,
- BothFlags<[], " device side init function in HIP">>,
+ BothFlags<[], " device side init function in HIP (experimental)">>,
ShouldParseIf<hip.KeyPath>;
defm gpu_defer_diag : BoolFOption<"gpu-defer-diag",
LangOpts<"GPUDeferDiag">, DefaultFalse,
@@ -913,11 +977,36 @@ defm gpu_exclude_wrong_side_overloads : BoolFOption<"gpu-exclude-wrong-side-over
def gpu_max_threads_per_block_EQ : Joined<["--"], "gpu-max-threads-per-block=">,
Flags<[CC1Option]>,
HelpText<"Default max threads per block for kernel launch bounds for HIP">,
- MarshallingInfoStringInt<LangOpts<"GPUMaxThreadsPerBlock">, "256">,
+ MarshallingInfoInt<LangOpts<"GPUMaxThreadsPerBlock">, "1024">,
ShouldParseIf<hip.KeyPath>;
+def fgpu_inline_threshold_EQ : Joined<["-"], "fgpu-inline-threshold=">,
+ Flags<[HelpHidden]>,
+ HelpText<"Inline threshold for device compilation for CUDA/HIP">;
def gpu_instrument_lib_EQ : Joined<["--"], "gpu-instrument-lib=">,
HelpText<"Instrument device library for HIP, which is a LLVM bitcode containing "
"__cyg_profile_func_enter and __cyg_profile_func_exit">;
+def fgpu_sanitize : Flag<["-"], "fgpu-sanitize">, Group<f_Group>,
+ HelpText<"Enable sanitizer for AMDGPU target">;
+def fno_gpu_sanitize : Flag<["-"], "fno-gpu-sanitize">, Group<f_Group>;
+def gpu_bundle_output : Flag<["--"], "gpu-bundle-output">,
+ Group<f_Group>, HelpText<"Bundle output files of HIP device compilation">;
+def no_gpu_bundle_output : Flag<["--"], "no-gpu-bundle-output">,
+ Group<f_Group>, HelpText<"Do not bundle output files of HIP device compilation">;
+def cuid_EQ : Joined<["-"], "cuid=">, Flags<[CC1Option]>,
+ HelpText<"An ID for compilation unit, which should be the same for the same "
+ "compilation unit but different for different compilation units. "
+ "It is used to externalize device-side static variables for single "
+ "source offloading languages CUDA and HIP so that they can be "
+ "accessed by the host code of the same compilation unit.">,
+ MarshallingInfoString<LangOpts<"CUID">>;
+def fuse_cuid_EQ : Joined<["-"], "fuse-cuid=">,
+ HelpText<"Method to generate ID's for compilation units for single source "
+ "offloading languages CUDA and HIP: 'hash' (ID's generated by hashing "
+ "file path and command line options) | 'random' (ID's generated as "
+ "random numbers) | 'none' (disabled). Default is 'hash'. This option "
+ "will be overriden by option '-cuid=[ID]' if it is specified." >;
+def libomptarget_amdgcn_bc_path_EQ : Joined<["--"], "libomptarget-amdgcn-bc-path=">, Group<i_Group>,
+ HelpText<"Path to libomptarget-amdgcn bitcode library">;
def libomptarget_nvptx_bc_path_EQ : Joined<["--"], "libomptarget-nvptx-bc-path=">, Group<i_Group>,
HelpText<"Path to libomptarget-nvptx bitcode library">;
def dD : Flag<["-"], "dD">, Group<d_Group>, Flags<[CC1Option]>,
@@ -964,7 +1053,7 @@ def exported__symbols__list : Separate<["-"], "exported_symbols_list">;
def e : JoinedOrSeparate<["-"], "e">, Flags<[LinkerInput]>, Group<Link_Group>;
def fmax_tokens_EQ : Joined<["-"], "fmax-tokens=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Max total number of preprocessed tokens for -Wmax-tokens.">,
- MarshallingInfoStringInt<LangOpts<"MaxTokens">>;
+ MarshallingInfoInt<LangOpts<"MaxTokens">>;
def fPIC : Flag<["-"], "fPIC">, Group<f_Group>;
def fno_PIC : Flag<["-"], "fno-PIC">, Group<f_Group>;
def fPIE : Flag<["-"], "fPIE">, Group<f_Group>;
@@ -988,8 +1077,10 @@ defm apple_pragma_pack : BoolFOption<"apple-pragma-pack",
LangOpts<"ApplePragmaPack">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable Apple gcc-compatible #pragma pack handling">,
NegFlag<SetFalse>>;
-def fxl_pragma_pack : Flag<["-"], "fxl-pragma-pack">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable IBM XL #pragma pack handling">;
+defm xl_pragma_pack : BoolFOption<"xl-pragma-pack",
+ LangOpts<"XLPragmaPack">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable IBM XL #pragma pack handling">,
+ NegFlag<SetFalse>>;
def shared_libsan : Flag<["-"], "shared-libsan">,
HelpText<"Dynamically link the sanitizer runtime">;
def static_libsan : Flag<["-"], "static-libsan">,
@@ -997,20 +1088,16 @@ def static_libsan : Flag<["-"], "static-libsan">,
def : Flag<["-"], "shared-libasan">, Alias<shared_libsan>;
def fasm : Flag<["-"], "fasm">, Group<f_Group>;
-defm asm_blocks : OptInFFlag<"asm-blocks", "">;
-
def fassume_sane_operator_new : Flag<["-"], "fassume-sane-operator-new">, Group<f_Group>;
def fastcp : Flag<["-"], "fastcp">, Group<f_Group>;
def fastf : Flag<["-"], "fastf">, Group<f_Group>;
def fast : Flag<["-"], "fast">, Group<f_Group>;
def fasynchronous_unwind_tables : Flag<["-"], "fasynchronous-unwind-tables">, Group<f_Group>;
-def fdouble_square_bracket_attributes : Flag<[ "-" ], "fdouble-square-bracket-attributes">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
- HelpText<"Enable '[[]]' attributes in all C and C++ language modes">;
-def fno_double_square_bracket_attributes : Flag<[ "-" ], "fno-double-square-bracket-attributes">,
- Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
- HelpText<"Disable '[[]]' attributes in all C and C++ language modes">;
+defm double_square_bracket_attributes : BoolFOption<"double-square-bracket-attributes",
+ LangOpts<"DoubleSquareBracketAttributes">, Default<!strconcat(cpp11.KeyPath, "||", c2x.KeyPath)>,
+ PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[NoXarchOption, CC1Option], " '[[]]' attributes in all C and C++ language modes">>;
defm autolink : BoolFOption<"autolink",
CodeGenOpts<"Autolink">, DefaultTrue,
@@ -1018,14 +1105,17 @@ defm autolink : BoolFOption<"autolink",
PosFlag<SetTrue>>;
// C++ Coroutines TS
-defm coroutines_ts : OptInFFlag<"coroutines-ts", "Enable support for the C++ Coroutines TS">;
+defm coroutines_ts : BoolFOption<"coroutines-ts",
+ LangOpts<"Coroutines">, Default<cpp20.KeyPath>,
+ PosFlag<SetTrue, [CC1Option], "Enable support for the C++ Coroutines TS">,
+ NegFlag<SetFalse>>;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
Group<f_Group>, Flags<[NoXarchOption, CC1Option, CC1AsOption]>, MetaVarName<"<option>">,
HelpText<"Embed LLVM bitcode (option: off, all, bitcode, marker)">,
Values<"off,all,bitcode,marker">, NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["Embed_Off", "Embed_All", "Embed_Bitcode", "Embed_Marker"]>,
- MarshallingInfoString<CodeGenOpts<"EmbedBitcode">, "Embed_Off">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"EmbedBitcode">, "Embed_Off">;
def fembed_bitcode : Flag<["-"], "fembed-bitcode">, Group<f_Group>,
Alias<fembed_bitcode_EQ>, AliasArgs<["all"]>,
HelpText<"Embed LLVM IR bitcode as data">;
@@ -1064,13 +1154,20 @@ def fauto_profile_accurate : Flag<["-"], "fauto-profile-accurate">,
Group<f_Group>, Alias<fprofile_sample_accurate>;
def fno_auto_profile_accurate : Flag<["-"], "fno-auto-profile-accurate">,
Group<f_Group>, Alias<fno_profile_sample_accurate>;
-def fdebug_compilation_dir : Separate<["-"], "fdebug-compilation-dir">,
+def fdebug_compilation_dir_EQ : Joined<["-"], "fdebug-compilation-dir=">,
Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
- HelpText<"The compilation directory to embed in the debug info.">,
+ HelpText<"The compilation directory to embed in the debug info">,
MarshallingInfoString<CodeGenOpts<"DebugCompilationDir">>;
-def fdebug_compilation_dir_EQ : Joined<["-"], "fdebug-compilation-dir=">,
+def fdebug_compilation_dir : Separate<["-"], "fdebug-compilation-dir">,
+ Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
+ Alias<fdebug_compilation_dir_EQ>;
+def fcoverage_compilation_dir_EQ : Joined<["-"], "fcoverage-compilation-dir=">,
Group<f_Group>, Flags<[CC1Option, CC1AsOption, CoreOption]>,
- Alias<fdebug_compilation_dir>;
+ HelpText<"The compilation directory to embed in the coverage mapping.">,
+ MarshallingInfoString<CodeGenOpts<"CoverageCompilationDir">>;
+def ffile_compilation_dir_EQ : Joined<["-"], "ffile-compilation-dir=">, Group<f_Group>,
+ Flags<[CoreOption]>,
+ HelpText<"The compilation directory to embed in the debug info and coverage mapping.">;
defm debug_info_for_profiling : BoolFOption<"debug-info-for-profiling",
CodeGenOpts<"DebugInfoForProfiling">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Emit extra debug info to make sample profile more accurate">,
@@ -1090,8 +1187,6 @@ def fprofile_remapping_file_EQ : Joined<["-"], "fprofile-remapping-file=">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>, MetaVarName<"<file>">,
HelpText<"Use the remappings described in <file> to match the profile data against names in the program">,
MarshallingInfoString<CodeGenOpts<"ProfileRemappingFile">>;
-def fprofile_remapping_file : Separate<["-"], "fprofile-remapping-file">,
- Group<f_Group>, Flags<[CoreOption]>, Alias<fprofile_remapping_file_EQ>;
defm coverage_mapping : BoolFOption<"coverage-mapping",
CodeGenOpts<"CoverageMapping">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Generate coverage mapping to enable code coverage analysis">,
@@ -1147,7 +1242,7 @@ def fprofile_update_EQ : Joined<["-"], "fprofile-update=">,
defm pseudo_probe_for_profiling : BoolFOption<"pseudo-probe-for-profiling",
CodeGenOpts<"PseudoProbeForProfiling">, DefaultFalse,
PosFlag<SetTrue, [], "Emit">, NegFlag<SetFalse, [], "Do not emit">,
- BothFlags<[NoXarchOption, CC1Option], " pseudo probes for sample profiler">>;
+ BothFlags<[NoXarchOption, CC1Option], " pseudo probes for sample profiling">>;
def forder_file_instrumentation : Flag<["-"], "forder-file-instrumentation">,
Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Generate instrumented code to collect order file into default.profraw file (overridden by '=' form of option or LLVM_PROFILE_FILE env var)">;
@@ -1209,7 +1304,7 @@ def fcf_runtime_abi_EQ : Joined<["-"], "fcf-runtime-abi=">, Group<f_Group>,
Flags<[CC1Option]>, Values<"unspecified,standalone,objc,swift,swift-5.0,swift-4.2,swift-4.1">,
NormalizedValuesScope<"LangOptions::CoreFoundationABI">,
NormalizedValues<["ObjectiveC", "ObjectiveC", "ObjectiveC", "Swift5_0", "Swift5_0", "Swift4_2", "Swift4_1"]>,
- MarshallingInfoString<LangOpts<"CFRuntime">, "ObjectiveC">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"CFRuntime">, "ObjectiveC">;
defm constant_cfstrings : BoolFOption<"constant-cfstrings",
LangOpts<"NoConstantCFStrings">, DefaultFalse,
NegFlag<SetTrue, [CC1Option], "Disable creation of CodeFoundation-type constant strings">,
@@ -1224,11 +1319,16 @@ def fconstexpr_backtrace_limit_EQ : Joined<["-"], "fconstexpr-backtrace-limit=">
Group<f_Group>;
def fno_crash_diagnostics : Flag<["-"], "fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
HelpText<"Disable auto-generation of preprocessed source files and a script for reproduction during a clang crash">;
-def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">, Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>;
+def fcrash_diagnostics_dir : Joined<["-"], "fcrash-diagnostics-dir=">,
+ Group<f_clang_Group>, Flags<[NoArgumentUnused, CoreOption]>,
+ HelpText<"Put crash-report files in <dir>">, MetaVarName<"<dir>">;
def fcreate_profile : Flag<["-"], "fcreate-profile">, Group<f_Group>;
defm cxx_exceptions: BoolFOption<"cxx-exceptions",
LangOpts<"CXXExceptions">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable C++ exceptions">, NegFlag<SetFalse>>;
+defm async_exceptions: BoolFOption<"async-exceptions",
+ LangOpts<"EHAsynch">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable EH Asynchronous exceptions">, NegFlag<SetFalse>>;
def fcxx_modules : Flag <["-"], "fcxx-modules">, Group<f_Group>,
Flags<[NoXarchOption]>;
def fdebug_pass_arguments : Flag<["-"], "fdebug-pass-arguments">, Group<f_Group>;
@@ -1267,14 +1367,14 @@ def fdiagnostics_show_template_tree : Flag<["-"], "fdiagnostics-show-template-tr
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Print a template comparison tree for differing templates">,
MarshallingInfoFlag<DiagnosticOpts<"ShowTemplateTree">>;
-def fdeclspec : Flag<["-"], "fdeclspec">, Group<f_clang_Group>,
- HelpText<"Allow __declspec as a keyword">, Flags<[CC1Option]>;
def fdiscard_value_names : Flag<["-"], "fdiscard-value-names">, Group<f_clang_Group>,
HelpText<"Discard value names in LLVM IR">, Flags<[NoXarchOption]>;
def fno_discard_value_names : Flag<["-"], "fno-discard-value-names">, Group<f_clang_Group>,
HelpText<"Do not discard value names in LLVM IR">, Flags<[NoXarchOption]>;
-def fdollars_in_identifiers : Flag<["-"], "fdollars-in-identifiers">, Group<f_Group>,
- HelpText<"Allow '$' in identifiers">, Flags<[CC1Option]>;
+defm dollars_in_identifiers : BoolFOption<"dollars-in-identifiers",
+ LangOpts<"DollarIdents">, Default<!strconcat("!", asm_preprocessor.KeyPath)>,
+ PosFlag<SetTrue, [], "Allow">, NegFlag<SetFalse, [], "Disallow">,
+ BothFlags<[CC1Option], " '$' in identifiers">>;
def fdwarf2_cfi_asm : Flag<["-"], "fdwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
def fno_dwarf2_cfi_asm : Flag<["-"], "fno-dwarf2-cfi-asm">, Group<clang_ignored_f_Group>;
defm dwarf_directory_asm : BoolFOption<"dwarf-directory-asm",
@@ -1315,10 +1415,9 @@ def fwasm_exceptions : Flag<["-"], "fwasm-exceptions">, Group<f_Group>,
def exception_model : Separate<["-"], "exception-model">,
Flags<[CC1Option, NoDriverOption]>, HelpText<"The exception model: dwarf|sjlj|seh|wasm">,
Values<"dwarf,sjlj,seh,wasm">,
- NormalizedValuesScope<"llvm::ExceptionHandling">,
+ NormalizedValuesScope<"LangOptions::ExceptionHandlingKind">,
NormalizedValues<["DwarfCFI", "SjLj", "WinEH", "Wasm"]>,
- MarshallingInfoString<LangOpts<"ExceptionHandling">, "None">,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"ExceptionHandling">, "None">;
def exception_model_EQ : Joined<["-"], "exception-model=">,
Flags<[CC1Option, NoDriverOption]>, Alias<exception_model>;
def fignore_exceptions : Flag<["-"], "fignore-exceptions">, Group<f_Group>, Flags<[CC1Option]>,
@@ -1338,14 +1437,32 @@ def fdenormal_fp_math_EQ : Joined<["-"], "fdenormal-fp-math=">, Group<f_Group>,
def ffp_model_EQ : Joined<["-"], "ffp-model=">, Group<f_Group>, Flags<[NoXarchOption]>,
HelpText<"Controls the semantics of floating-point calculations.">;
def ffp_exception_behavior_EQ : Joined<["-"], "ffp-exception-behavior=">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Specifies the exception behavior of floating-point operations.">;
-defm fast_math : OptInFFlag<"fast-math", "Allow aggressive, lossy floating-point optimizations", "", "", [],
- LangOpts<"FastMath">, [cl_fast_relaxed_math.KeyPath]>;
+ HelpText<"Specifies the exception behavior of floating-point operations.">,
+ Values<"ignore,maytrap,strict">, NormalizedValuesScope<"LangOptions">,
+ NormalizedValues<["FPE_Ignore", "FPE_MayTrap", "FPE_Strict"]>,
+ MarshallingInfoEnum<LangOpts<"FPExceptionMode">, "FPE_Ignore">;
+defm fast_math : BoolFOption<"fast-math",
+ LangOpts<"FastMath">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Allow aggressive, lossy floating-point optimizations",
+ [cl_fast_relaxed_math.KeyPath]>,
+ NegFlag<SetFalse>>;
def menable_unsafe_fp_math : Flag<["-"], "menable-unsafe-fp-math">, Flags<[CC1Option]>,
HelpText<"Allow unsafe floating-point math optimizations which may decrease precision">,
MarshallingInfoFlag<LangOpts<"UnsafeFPMath">>,
ImpliedByAnyOf<[cl_unsafe_math_optimizations.KeyPath, ffast_math.KeyPath]>;
-defm math_errno : OptInFFlag<"math-errno", "Require math functions to indicate errors by setting errno">;
+defm math_errno : BoolFOption<"math-errno",
+ LangOpts<"MathErrno">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Require math functions to indicate errors by setting errno">,
+ NegFlag<SetFalse>>,
+ ShouldParseIf<!strconcat("!", open_cl.KeyPath)>;
+def fextend_args_EQ : Joined<["-"], "fextend-arguments=">, Group<f_Group>,
+ Flags<[CC1Option, NoArgumentUnused]>,
+ HelpText<"Controls how scalar integer arguments are extended in calls "
+ "to unprototyped and varargs functions">,
+ Values<"32,64">,
+ NormalizedValues<["ExtendTo32", "ExtendTo64"]>,
+ NormalizedValuesScope<"LangOptions::ExtendArgsKind">,
+ MarshallingInfoEnum<LangOpts<"ExtendIntArgs">,"ExtendTo32">;
def fbracket_depth_EQ : Joined<["-"], "fbracket-depth=">, Group<f_Group>, Flags<[CoreOption]>;
def fsignaling_math : Flag<["-"], "fsignaling-math">, Group<f_Group>;
def fno_signaling_math : Flag<["-"], "fno-signaling-math">, Group<f_Group>;
@@ -1353,12 +1470,18 @@ defm jump_tables : BoolFOption<"jump-tables",
CodeGenOpts<"NoUseJumpTables">, DefaultFalse,
NegFlag<SetTrue, [CC1Option], "Do not use">, PosFlag<SetFalse, [], "Use">,
BothFlags<[], " jump tables for lowering switches">>;
-defm force_enable_int128 : OptInFFlag<"force-enable-int128", "Enable", "Disable", " support for int128_t type", [], TargetOpts<"ForceEnableInt128">>;
+defm force_enable_int128 : BoolFOption<"force-enable-int128",
+ TargetOpts<"ForceEnableInt128">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[], " support for int128_t type">>;
defm keep_static_consts : BoolFOption<"keep-static-consts",
CodeGenOpts<"KeepStaticConsts">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Keep">, NegFlag<SetFalse, [], "Don't keep">,
BothFlags<[NoXarchOption], " static const variables if unused">>;
-defm fixed_point : OptInFFlag<"fixed-point", "Enable", "Disable", " fixed point types">;
+defm fixed_point : BoolFOption<"fixed-point",
+ LangOpts<"FixedPoint">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[], " fixed point types">>, ShouldParseIf<!strconcat("!", cplusplus.KeyPath)>;
defm cxx_static_destructors : BoolFOption<"c++-static-destructors",
LangOpts<"RegisterStaticDestructors">, DefaultTrue,
NegFlag<SetFalse, [CC1Option], "Disable C++ static destructor registration">,
@@ -1381,25 +1504,33 @@ def fsanitize_EQ : CommaJoined<["-"], "fsanitize=">, Group<f_clang_Group>,
"or suspicious behavior. See user manual for available checks">;
def fno_sanitize_EQ : CommaJoined<["-"], "fno-sanitize=">, Group<f_clang_Group>,
Flags<[CoreOption, NoXarchOption]>;
-def fsanitize_blacklist : Joined<["-"], "fsanitize-blacklist=">,
- Group<f_clang_Group>,
- HelpText<"Path to blacklist file for sanitizers">,
- MarshallingInfoStringVector<LangOpts<"SanitizerBlacklistFiles">>;
-def fsanitize_system_blacklist : Joined<["-"], "fsanitize-system-blacklist=">,
- HelpText<"Path to system blacklist file for sanitizers">,
- Flags<[CC1Option]>;
-def fno_sanitize_blacklist : Flag<["-"], "fno-sanitize-blacklist">,
- Group<f_clang_Group>,
- HelpText<"Don't use blacklist file for sanitizers">;
-def fsanitize_coverage
- : CommaJoined<["-"], "fsanitize-coverage=">,
- Group<f_clang_Group>,
- HelpText<"Specify the type of coverage instrumentation for Sanitizers">;
-def fno_sanitize_coverage
- : CommaJoined<["-"], "fno-sanitize-coverage=">,
- Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
- HelpText<"Disable specified features of coverage instrumentation for "
- "Sanitizers">, Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters,inline-bool-flag">;
+
+def fsanitize_ignorelist_EQ : Joined<["-"], "fsanitize-ignorelist=">,
+ Group<f_clang_Group>, HelpText<"Path to ignorelist file for sanitizers">;
+def : Joined<["-"], "fsanitize-blacklist=">,
+ Group<f_clang_Group>, Flags<[HelpHidden]>, Alias<fsanitize_ignorelist_EQ>,
+ HelpText<"Alias for -fsanitize-ignorelist=">;
+
+def fsanitize_system_ignorelist_EQ : Joined<["-"], "fsanitize-system-ignorelist=">,
+ HelpText<"Path to system ignorelist file for sanitizers">, Flags<[CC1Option]>;
+def : Joined<["-"], "fsanitize-system-blacklist=">,
+ HelpText<"Alias for -fsanitize-system-ignorelist=">,
+ Flags<[CC1Option, HelpHidden]>, Alias<fsanitize_system_ignorelist_EQ>;
+
+def fno_sanitize_ignorelist : Flag<["-"], "fno-sanitize-ignorelist">,
+ Group<f_clang_Group>, HelpText<"Don't use ignorelist file for sanitizers">;
+def : Flag<["-"], "fno-sanitize-blacklist">,
+ Group<f_clang_Group>, Flags<[HelpHidden]>, Alias<fno_sanitize_ignorelist>;
+
+def fsanitize_coverage : CommaJoined<["-"], "fsanitize-coverage=">,
+ Group<f_clang_Group>,
+ HelpText<"Specify the type of coverage instrumentation for Sanitizers">;
+def fno_sanitize_coverage : CommaJoined<["-"], "fno-sanitize-coverage=">,
+ Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ HelpText<"Disable features of coverage instrumentation for Sanitizers">,
+ Values<"func,bb,edge,indirect-calls,trace-bb,trace-cmp,trace-div,trace-gep,"
+ "8bit-counters,trace-pc,trace-pc-guard,no-prune,inline-8bit-counters,"
+ "inline-bool-flag">;
def fsanitize_coverage_allowlist : Joined<["-"], "fsanitize-coverage-allowlist=">,
Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
HelpText<"Restrict sanitizer coverage instrumentation exclusively to modules and functions that match the provided special case list, except the blocked ones">,
@@ -1407,17 +1538,19 @@ def fsanitize_coverage_allowlist : Joined<["-"], "fsanitize-coverage-allowlist="
def : Joined<["-"], "fsanitize-coverage-whitelist=">,
Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>, Alias<fsanitize_coverage_allowlist>,
HelpText<"Deprecated, use -fsanitize-coverage-allowlist= instead">;
-def fsanitize_coverage_blocklist : Joined<["-"], "fsanitize-coverage-blocklist=">,
+def fsanitize_coverage_ignorelist : Joined<["-"], "fsanitize-coverage-ignorelist=">,
Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
- HelpText<"Disable sanitizer coverage instrumentation for modules and functions that match the provided special case list, even the allowed ones">,
- MarshallingInfoStringVector<CodeGenOpts<"SanitizeCoverageBlocklistFiles">>;
+ HelpText<"Disable sanitizer coverage instrumentation for modules and functions "
+ "that match the provided special case list, even the allowed ones">,
+ MarshallingInfoStringVector<CodeGenOpts<"SanitizeCoverageIgnorelistFiles">>;
def : Joined<["-"], "fsanitize-coverage-blacklist=">,
- Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>, Alias<fsanitize_coverage_blocklist>,
- HelpText<"Deprecated, use -fsanitize-coverage-blocklist= instead">;
+ Group<f_clang_Group>, Flags<[CoreOption, HelpHidden]>,
+ Alias<fsanitize_coverage_ignorelist>,
+ HelpText<"Deprecated, use -fsanitize-coverage-ignorelist= instead">;
def fsanitize_memory_track_origins_EQ : Joined<["-"], "fsanitize-memory-track-origins=">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">,
- MarshallingInfoStringInt<CodeGenOpts<"SanitizeMemoryTrackOrigins">>;
+ MarshallingInfoInt<CodeGenOpts<"SanitizeMemoryTrackOrigins">>;
def fsanitize_memory_track_origins : Flag<["-"], "fsanitize-memory-track-origins">,
Group<f_clang_Group>,
HelpText<"Enable origins tracking in MemorySanitizer">;
@@ -1425,6 +1558,20 @@ def fno_sanitize_memory_track_origins : Flag<["-"], "fno-sanitize-memory-track-o
Group<f_clang_Group>,
Flags<[CoreOption, NoXarchOption]>,
HelpText<"Disable origins tracking in MemorySanitizer">;
+def fsanitize_address_outline_instrumentation : Flag<["-"], "fsanitize-address-outline-instrumentation">,
+ Group<f_clang_Group>,
+ HelpText<"Always generate function calls for address sanitizer instrumentation">;
+def fno_sanitize_address_outline_instrumentation : Flag<["-"], "fno-sanitize-address-outline-instrumentation">,
+ Group<f_clang_Group>,
+ HelpText<"Use default code inlining logic for the address sanitizer">;
+def fsanitize_hwaddress_experimental_aliasing
+ : Flag<["-"], "fsanitize-hwaddress-experimental-aliasing">,
+ Group<f_clang_Group>,
+ HelpText<"Enable aliasing mode in HWAddressSanitizer">;
+def fno_sanitize_hwaddress_experimental_aliasing
+ : Flag<["-"], "fno-sanitize-hwaddress-experimental-aliasing">,
+ Group<f_clang_Group>, Flags<[CoreOption, NoXarchOption]>,
+ HelpText<"Disable aliasing mode in HWAddressSanitizer">;
defm sanitize_memory_use_after_dtor : BoolOption<"f", "sanitize-memory-use-after-dtor",
CodeGenOpts<"SanitizeMemoryUseAfterDtor">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
@@ -1433,12 +1580,22 @@ defm sanitize_memory_use_after_dtor : BoolOption<"f", "sanitize-memory-use-after
def fsanitize_address_field_padding : Joined<["-"], "fsanitize-address-field-padding=">,
Group<f_clang_Group>,
HelpText<"Level of field padding for AddressSanitizer">,
- MarshallingInfoStringInt<LangOpts<"SanitizeAddressFieldPadding">>;
+ MarshallingInfoInt<LangOpts<"SanitizeAddressFieldPadding">>;
defm sanitize_address_use_after_scope : BoolOption<"f", "sanitize-address-use-after-scope",
CodeGenOpts<"SanitizeAddressUseAfterScope">, DefaultFalse,
PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [CoreOption, NoXarchOption], "Disable">,
BothFlags<[], " use-after-scope detection in AddressSanitizer">>,
Group<f_clang_Group>;
+def sanitize_address_use_after_return_EQ
+ : Joined<["-"], "fsanitize-address-use-after-return=">,
+ MetaVarName<"<mode>">,
+ Flags<[CC1Option]>,
+ HelpText<"Select the mode of detecting stack use-after-return in AddressSanitizer: never | runtime (default) | always">,
+ Group<f_clang_Group>,
+ Values<"never,runtime,always">,
+ NormalizedValuesScope<"llvm::AsanDetectStackUseAfterReturnMode">,
+ NormalizedValues<["Never", "Runtime", "Always"]>,
+ MarshallingInfoEnum<CodeGenOpts<"SanitizeAddressUseAfterReturn">, "Runtime">;
defm sanitize_address_poison_custom_array_cookie : BoolOption<"f", "sanitize-address-poison-custom-array-cookie",
CodeGenOpts<"SanitizeAddressPoisonCustomArrayCookie">, DefaultFalse,
PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
@@ -1453,6 +1610,15 @@ defm sanitize_address_use_odr_indicator : BoolOption<"f", "sanitize-address-use-
" reports in partially sanitized programs at the cost of an increase in binary size">,
NegFlag<SetFalse, [], "Disable ODR indicator globals">>,
Group<f_clang_Group>;
+def sanitize_address_destructor_EQ
+ : Joined<["-"], "fsanitize-address-destructor=">,
+ Flags<[CC1Option]>,
+ HelpText<"Set destructor type used in ASan instrumentation">,
+ Group<f_clang_Group>,
+ Values<"none,global">,
+ NormalizedValuesScope<"llvm::AsanDtorKind">,
+ NormalizedValues<["None", "Global"]>,
+ MarshallingInfoEnum<CodeGenOpts<"SanitizeAddressDtor">, "Global">;
// Note: This flag was introduced when it was necessary to distinguish between
// ABI for correct codegen. This is no longer needed, but the flag is
// not removed since targeting either ABI will behave the same.
@@ -1548,7 +1714,7 @@ def fsanitize_undefined_strip_path_components_EQ : Joined<["-"], "fsanitize-unde
Group<f_clang_Group>, MetaVarName<"<number>">,
HelpText<"Strip (or keep only, if negative) a given number of path components "
"when emitting check metadata.">,
- MarshallingInfoStringInt<CodeGenOpts<"EmitCheckPathComponentsToStrip">, "0", "int">;
+ MarshallingInfoInt<CodeGenOpts<"EmitCheckPathComponentsToStrip">, "0", "int">;
} // end -f[no-]sanitize* flags
@@ -1558,12 +1724,17 @@ def fno_unsafe_math_optimizations : Flag<["-"], "fno-unsafe-math-optimizations">
Group<f_Group>;
def fassociative_math : Flag<["-"], "fassociative-math">, Group<f_Group>;
def fno_associative_math : Flag<["-"], "fno-associative-math">, Group<f_Group>;
-defm reciprocal_math : OptInFFlag<"reciprocal-math", "Allow division operations to be reassociated", "", "", [],
- LangOpts<"AllowRecip">, [menable_unsafe_fp_math.KeyPath]>;
+defm reciprocal_math : BoolFOption<"reciprocal-math",
+ LangOpts<"AllowRecip">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Allow division operations to be reassociated",
+ [menable_unsafe_fp_math.KeyPath]>,
+ NegFlag<SetFalse>>;
def fapprox_func : Flag<["-"], "fapprox-func">, Group<f_Group>, Flags<[CC1Option, NoDriverOption]>,
MarshallingInfoFlag<LangOpts<"ApproxFunc">>, ImpliedByAnyOf<[menable_unsafe_fp_math.KeyPath]>;
-defm finite_math_only : OptInFFlag<"finite-math-only", "", "", "", [],
- LangOpts<"FiniteMathOnly">, [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>;
+defm finite_math_only : BoolFOption<"finite-math-only",
+ LangOpts<"FiniteMathOnly">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "", [cl_finite_math_only.KeyPath, ffast_math.KeyPath]>,
+ NegFlag<SetFalse>>;
defm signed_zeros : BoolFOption<"signed-zeros",
LangOpts<"NoSignedZero">, DefaultFalse,
NegFlag<SetTrue, [CC1Option], "Allow optimizations that ignore the sign of floating point zeros",
@@ -1597,6 +1768,13 @@ defm strict_float_cast_overflow : BoolFOption<"strict-float-cast-overflow",
" of the target's native float-to-int conversion instructions">,
PosFlag<SetTrue, [], "Assume that overflowing float-to-int casts are undefined (default)">>;
+defm protect_parens : BoolFOption<"protect-parens",
+ LangOpts<"ProtectParens">, DefaultFalse,
+ PosFlag<SetTrue, [CoreOption, CC1Option],
+ "Determines whether the optimizer honors parentheses when "
+ "floating-point expressions are evaluated">,
+ NegFlag<SetFalse>>;
+
def ffor_scope : Flag<["-"], "ffor-scope">, Group<f_Group>;
def fno_for_scope : Flag<["-"], "fno-for-scope">, Group<f_Group>;
@@ -1613,17 +1791,17 @@ defm delete_null_pointer_checks : BoolFOption<"delete-null-pointer-checks",
PosFlag<SetFalse, [], "Treat usage of null pointers as undefined behavior (default)">,
BothFlags<[CoreOption]>>;
-def frewrite_map_file : Separate<["-"], "frewrite-map-file">,
- Group<f_Group>,
- Flags<[ NoXarchOption, CC1Option ]>,
- MarshallingInfoStringVector<CodeGenOpts<"RewriteMapFiles">>;
def frewrite_map_file_EQ : Joined<["-"], "frewrite-map-file=">,
Group<f_Group>,
- Flags<[NoXarchOption]>;
+ Flags<[NoXarchOption, CC1Option]>,
+ MarshallingInfoStringVector<CodeGenOpts<"RewriteMapFiles">>;
defm use_line_directives : BoolFOption<"use-line-directives",
PreprocessorOutputOpts<"UseLineDirectives">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use #line in preprocessed output">, NegFlag<SetFalse>>;
+defm minimize_whitespace : BoolFOption<"minimize-whitespace",
+ PreprocessorOutputOpts<"MinimizeWhitespace">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Minimize whitespace when emitting preprocessor output">, NegFlag<SetFalse>>;
def ffreestanding : Flag<["-"], "ffreestanding">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Assert that the compilation takes place in a freestanding environment">,
@@ -1673,7 +1851,8 @@ def fexperimental_strict_floating_point : Flag<["-"], "fexperimental-strict-floa
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Enables experimental strict floating point in LLVM.">,
MarshallingInfoFlag<LangOpts<"ExpStrictFP">>;
-def finput_charset_EQ : Joined<["-"], "finput-charset=">, Group<f_Group>;
+def finput_charset_EQ : Joined<["-"], "finput-charset=">, Flags<[FlangOption, FC1Option]>, Group<f_Group>,
+ HelpText<"Specify the default character set for source files">;
def fexec_charset_EQ : Joined<["-"], "fexec-charset=">, Group<f_Group>;
def finstrument_functions : Flag<["-"], "finstrument-functions">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Generate calls to instrument function entry and exit">,
@@ -1690,13 +1869,16 @@ def fcf_protection : Flag<["-"], "fcf-protection">, Group<f_Group>, Flags<[CoreO
Alias<fcf_protection_EQ>, AliasArgs<["full"]>,
HelpText<"Enable cf-protection in 'full' mode">;
-defm xray_instrument : OptInFFlag<"xray-instrument", "Generate XRay instrumentation sleds on function entry and exit", "", "", [], LangOpts<"XRayInstrument">>;
+defm xray_instrument : BoolFOption<"xray-instrument",
+ LangOpts<"XRayInstrument">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Generate XRay instrumentation sleds on function entry and exit">,
+ NegFlag<SetFalse>>;
def fxray_instruction_threshold_EQ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Sets the minimum function size to instrument with XRay">,
- MarshallingInfoStringInt<CodeGenOpts<"XRayInstructionThreshold">, "200">;
+ MarshallingInfoInt<CodeGenOpts<"XRayInstructionThreshold">, "200">;
def fxray_instruction_threshold_ :
JoinedOrSeparate<["-"], "fxray-instruction-threshold">,
Group<f_Group>, Flags<[CC1Option]>;
@@ -1721,16 +1903,29 @@ def fxray_modes :
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"List of modes to link in by default into XRay instrumented binaries.">;
-defm xray_always_emit_customevents : OptInFFlag<"xray-always-emit-customevents",
- "Always emit __xray_customevent(...) calls even if the containing function is not always instrumented", "", "", [], LangOpts<"XRayAlwaysEmitCustomEvents">>;
+defm xray_always_emit_customevents : BoolFOption<"xray-always-emit-customevents",
+ LangOpts<"XRayAlwaysEmitCustomEvents">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Always emit __xray_customevent(...) calls"
+ " even if the containing function is not always instrumented">,
+ NegFlag<SetFalse>>;
+
+defm xray_always_emit_typedevents : BoolFOption<"xray-always-emit-typedevents",
+ LangOpts<"XRayAlwaysEmitTypedEvents">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Always emit __xray_typedevent(...) calls"
+ " even if the containing function is not always instrumented">,
+ NegFlag<SetFalse>>;
-defm xray_always_emit_typedevents : OptInFFlag<"xray-always-emit-typedevents",
- "Always emit __xray_typedevent(...) calls even if the containing function is not always instrumented", "", "", [], LangOpts<"XRayAlwaysEmitTypedEvents">>;
+defm xray_ignore_loops : BoolFOption<"xray-ignore-loops",
+ CodeGenOpts<"XRayIgnoreLoops">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Don't instrument functions with loops"
+ " unless they also meet the minimum function size">,
+ NegFlag<SetFalse>>;
-defm xray_ignore_loops : OptInFFlag<"xray-ignore-loops",
- "Don't instrument functions with loops unless they also meet the minimum function size", "", "", [], CodeGenOpts<"XRayIgnoreLoops">>;
-defm xray_function_index : OptOutFFlag<"xray-function-index", "",
- "Omit function index section at the expense of single-function patching performance", "", [], CodeGenOpts<"XRayOmitFunctionIndex">>;
+defm xray_function_index : BoolFOption<"xray-function-index",
+ CodeGenOpts<"XRayOmitFunctionIndex">, DefaultTrue,
+ NegFlag<SetFalse, [CC1Option], "Omit function index section at the"
+ " expense of single-function patching performance">,
+ PosFlag<SetTrue>>;
def fxray_link_deps : Flag<["-"], "fxray-link-deps">, Group<f_Group>,
Flags<[CC1Option]>,
@@ -1747,13 +1942,13 @@ def fxray_function_groups :
Joined<["-"], "fxray-function-groups=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Only instrument 1 of N groups">,
- MarshallingInfoStringInt<CodeGenOpts<"XRayTotalFunctionGroups">, "1">;
+ MarshallingInfoInt<CodeGenOpts<"XRayTotalFunctionGroups">, "1">;
def fxray_selected_function_group :
Joined<["-"], "fxray-selected-function-group=">,
Group<f_Group>, Flags<[CC1Option]>,
HelpText<"When using -fxray-function-groups, select which group of functions to instrument. Valid range is 0 to fxray-function-groups - 1">,
- MarshallingInfoStringInt<CodeGenOpts<"XRaySelectedFunctionGroup">, "0">;
+ MarshallingInfoInt<CodeGenOpts<"XRaySelectedFunctionGroup">, "0">;
defm fine_grained_bitfield_accesses : BoolOption<"f", "fine-grained-bitfield-accesses",
@@ -1763,10 +1958,18 @@ defm fine_grained_bitfield_accesses : BoolOption<"f", "fine-grained-bitfield-acc
BothFlags<[CC1Option]>>,
Group<f_clang_Group>;
-defm experimental_relative_cxx_abi_vtables : BoolFOption<"experimental-relative-c++-abi-vtables",
- LangOpts<"RelativeCXXABIVTables">, DefaultFalse,
- PosFlag<SetTrue, [], "Use">, NegFlag<SetFalse, [], "Do not use">,
- BothFlags<[CC1Option], " the experimental C++ class ABI for classes with virtual tables">>;
+def fexperimental_relative_cxx_abi_vtables :
+ Flag<["-"], "fexperimental-relative-c++-abi-vtables">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Use the experimental C++ class ABI for classes with virtual tables">;
+def fno_experimental_relative_cxx_abi_vtables :
+ Flag<["-"], "fno-experimental-relative-c++-abi-vtables">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"Do not use the experimental C++ class ABI for classes with virtual tables">;
+
+def fcxx_abi_EQ : Joined<["-"], "fc++-abi=">,
+ Group<f_clang_Group>, Flags<[CC1Option]>,
+ HelpText<"C++ ABI to use. This will override the target C++ ABI.">;
def flat__namespace : Flag<["-"], "flat_namespace">;
def flax_vector_conversions_EQ : Joined<["-"], "flax-vector-conversions=">, Group<f_Group>,
@@ -1774,10 +1977,10 @@ def flax_vector_conversions_EQ : Joined<["-"], "flax-vector-conversions=">, Grou
NormalizedValues<["LangOptions::LaxVectorConversionKind::None",
"LangOptions::LaxVectorConversionKind::Integer",
"LangOptions::LaxVectorConversionKind::All"]>,
- MarshallingInfoString<LangOpts<"LaxVectorConversions">,
- !strconcat(open_cl.KeyPath, " ? LangOptions::LaxVectorConversionKind::None"
- " : LangOptions::LaxVectorConversionKind::All")>,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"LaxVectorConversions">,
+ open_cl.KeyPath #
+ " ? LangOptions::LaxVectorConversionKind::None" #
+ " : LangOptions::LaxVectorConversionKind::All">;
def flax_vector_conversions : Flag<["-"], "flax-vector-conversions">, Group<f_Group>,
Alias<flax_vector_conversions_EQ>, AliasArgs<["integer"]>;
def flimited_precision_EQ : Joined<["-"], "flimited-precision=">, Group<f_Group>;
@@ -1785,10 +1988,18 @@ def fapple_link_rtlib : Flag<["-"], "fapple-link-rtlib">, Group<f_Group>,
HelpText<"Force linking the clang builtins runtime library">;
def flto_EQ : Joined<["-"], "flto=">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Set LTO mode to either 'full' or 'thin'">, Values<"thin,full">;
+def flto_EQ_jobserver : Flag<["-"], "flto=jobserver">, Group<f_Group>;
+def flto_EQ_auto : Flag<["-"], "flto=auto">, Group<f_Group>;
def flto : Flag<["-"], "flto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Enable LTO in 'full' mode">;
def fno_lto : Flag<["-"], "fno-lto">, Flags<[CoreOption, CC1Option]>, Group<f_Group>,
HelpText<"Disable LTO mode (default)">;
+def foffload_lto_EQ : Joined<["-"], "foffload-lto=">, Flags<[CoreOption]>, Group<f_Group>,
+ HelpText<"Set LTO mode to either 'full' or 'thin' for offload compilation">, Values<"thin,full">;
+def foffload_lto : Flag<["-"], "foffload-lto">, Flags<[CoreOption]>, Group<f_Group>,
+ HelpText<"Enable LTO in 'full' mode for offload compilation">;
+def fno_offload_lto : Flag<["-"], "fno-offload-lto">, Flags<[CoreOption]>, Group<f_Group>,
+ HelpText<"Disable LTO mode (default) for offload compilation">;
def flto_jobs_EQ : Joined<["-"], "flto-jobs=">,
Flags<[CC1Option]>, Group<f_Group>,
HelpText<"Controls the backend parallelism of -flto=thin (default "
@@ -1809,12 +2020,16 @@ defm merge_all_constants : BoolFOption<"merge-all-constants",
BothFlags<[], " merging of constants">>;
def fmessage_length_EQ : Joined<["-"], "fmessage-length=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Format message diagnostics so that they fit within N columns">,
- MarshallingInfoStringInt<DiagnosticOpts<"MessageLength">>;
-def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
- HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
+ MarshallingInfoInt<DiagnosticOpts<"MessageLength">>;
def fms_compatibility : Flag<["-"], "fms-compatibility">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Enable full Microsoft Visual C++ compatibility">,
MarshallingInfoFlag<LangOpts<"MSVCCompat">>;
+def fms_extensions : Flag<["-"], "fms-extensions">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
+ HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">,
+ MarshallingInfoFlag<LangOpts<"MicrosoftExt">>, ImpliedByAnyOf<[fms_compatibility.KeyPath]>;
+defm asm_blocks : BoolFOption<"asm-blocks",
+ LangOpts<"AsmBlocks">, Default<fms_extensions.KeyPath>,
+ PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>>;
def fms_volatile : Flag<["-"], "fms-volatile">, Group<f_Group>, Flags<[CC1Option]>,
MarshallingInfoFlag<CodeGenOpts<"MSVolatile">>;
def fmsc_version : Joined<["-"], "fmsc-version=">, Group<f_Group>, Flags<[NoXarchOption, CoreOption]>,
@@ -1835,8 +2050,20 @@ def fms_memptr_rep_EQ : Joined<["-"], "fms-memptr-rep=">, Group<f_Group>, Flags<
Values<"single,multiple,virtual">, NormalizedValuesScope<"LangOptions">,
NormalizedValues<["PPTMK_FullGeneralitySingleInheritance", "PPTMK_FullGeneralityMultipleInheritance",
"PPTMK_FullGeneralityVirtualInheritance"]>,
- MarshallingInfoString<LangOpts<"MSPointerToMemberRepresentationMethod">, "PPTMK_BestCase">,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"MSPointerToMemberRepresentationMethod">, "PPTMK_BestCase">;
+// __declspec is enabled by default for the PS4 by the driver, and also
+// enabled for Microsoft Extensions or Borland Extensions, here.
+//
+// FIXME: __declspec is also currently enabled for CUDA, but isn't really a
+// CUDA extension. However, it is required for supporting
+// __clang_cuda_builtin_vars.h, which uses __declspec(property). Once that has
+// been rewritten in terms of something more generic, remove the Opts.CUDA
+// term here.
+defm declspec : BoolOption<"f", "declspec",
+ LangOpts<"DeclSpecKeyword">, DefaultFalse,
+ PosFlag<SetTrue, [], "Allow", [fms_extensions.KeyPath, fborland_extensions.KeyPath, cuda.KeyPath]>,
+ NegFlag<SetFalse, [], "Disallow">,
+ BothFlags<[CC1Option], " __declspec as a keyword">>, Group<f_clang_Group>;
def fmodules_cache_path : Joined<["-"], "fmodules-cache-path=">, Group<i_Group>,
Flags<[NoXarchOption, CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the module cache path">;
@@ -1847,24 +2074,23 @@ def fmodules_user_build_path : Separate<["-"], "fmodules-user-build-path">, Grou
def fprebuilt_module_path : Joined<["-"], "fprebuilt-module-path=">, Group<i_Group>,
Flags<[NoXarchOption, CC1Option]>, MetaVarName<"<directory>">,
HelpText<"Specify the prebuilt module path">;
-defm prebuilt_implicit_modules : OptInFFlag<"prebuilt-implicit-modules",
- "Look up implicit modules in the prebuilt module path", "", "",
- [NoXarchOption, CC1Option], HeaderSearchOpts<"EnablePrebuiltImplicitModules">>;
+defm prebuilt_implicit_modules : BoolFOption<"prebuilt-implicit-modules",
+ HeaderSearchOpts<"EnablePrebuiltImplicitModules">, DefaultFalse,
+ PosFlag<SetTrue, [], "Look up implicit modules in the prebuilt module path">,
+ NegFlag<SetFalse>, BothFlags<[NoXarchOption, CC1Option]>>;
+
def fmodules_prune_interval : Joined<["-"], "fmodules-prune-interval=">, Group<i_Group>,
Flags<[CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) between attempts to prune the module cache">,
- MarshallingInfoStringInt<HeaderSearchOpts<"ModuleCachePruneInterval">, "7 * 24 * 60 * 60">;
+ MarshallingInfoInt<HeaderSearchOpts<"ModuleCachePruneInterval">, "7 * 24 * 60 * 60">;
def fmodules_prune_after : Joined<["-"], "fmodules-prune-after=">, Group<i_Group>,
Flags<[CC1Option]>, MetaVarName<"<seconds>">,
HelpText<"Specify the interval (in seconds) after which a module file will be considered unused">,
- MarshallingInfoStringInt<HeaderSearchOpts<"ModuleCachePruneAfter">, "31 * 24 * 60 * 60">;
-def fmodules_search_all : Flag <["-"], "fmodules-search-all">, Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>,
- HelpText<"Search even non-imported modules to resolve references">;
+ MarshallingInfoInt<HeaderSearchOpts<"ModuleCachePruneAfter">, "31 * 24 * 60 * 60">;
def fbuild_session_timestamp : Joined<["-"], "fbuild-session-timestamp=">,
Group<i_Group>, Flags<[CC1Option]>, MetaVarName<"<time since Epoch in seconds>">,
HelpText<"Time when the current build session started">,
- MarshallingInfoStringInt<HeaderSearchOpts<"BuildSessionTimestamp">, "0", "uint64_t">;
+ MarshallingInfoInt<HeaderSearchOpts<"BuildSessionTimestamp">, "0", "uint64_t">;
def fbuild_session_file : Joined<["-"], "fbuild-session-file=">,
Group<i_Group>, MetaVarName<"<file>">,
HelpText<"Use the last modification time of <file> as the build session timestamp">;
@@ -1912,48 +2138,53 @@ defm pch_codegen: OptInFFlag<"pch-codegen", "Generate ", "Do not generate ",
defm pch_debuginfo: OptInFFlag<"pch-debuginfo", "Generate ", "Do not generate ",
"debug info for types in an object file built from this PCH and do not generate them elsewhere">;
-def fmodules : Flag <["-"], "fmodules">, Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>,
- HelpText<"Enable the 'modules' language feature">;
def fimplicit_module_maps : Flag <["-"], "fimplicit-module-maps">, Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>,
+ Flags<[NoXarchOption, CC1Option, CoreOption]>,
HelpText<"Implicitly search the file system for module map files.">,
MarshallingInfoFlag<HeaderSearchOpts<"ImplicitModuleMaps">>;
def fmodules_ts : Flag <["-"], "fmodules-ts">, Group<f_Group>,
Flags<[CC1Option]>, HelpText<"Enable support for the C++ Modules TS">,
MarshallingInfoFlag<LangOpts<"ModulesTS">>;
-def fmodule_maps : Flag <["-"], "fmodule-maps">, Alias<fimplicit_module_maps>;
+defm modules : BoolFOption<"modules",
+ LangOpts<"Modules">, Default<!strconcat(fmodules_ts.KeyPath, "||", cpp_modules.KeyPath)>,
+ PosFlag<SetTrue, [CC1Option], "Enable the 'modules' language feature">,
+ NegFlag<SetFalse>, BothFlags<[NoXarchOption, CoreOption]>>;
+def fmodule_maps : Flag <["-"], "fmodule-maps">, Flags<[CoreOption]>, Alias<fimplicit_module_maps>;
def fmodule_name_EQ : Joined<["-"], "fmodule-name=">, Group<f_Group>,
- Flags<[NoXarchOption,CC1Option]>, MetaVarName<"<name>">,
+ Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"<name>">,
HelpText<"Specify the name of the module to build">,
MarshallingInfoString<LangOpts<"ModuleName">>;
-def fmodule_name : Separate<["-"], "fmodule-name">, Alias<fmodule_name_EQ>;
def fmodule_implementation_of : Separate<["-"], "fmodule-implementation-of">,
- Flags<[CC1Option]>, Alias<fmodule_name_EQ>;
-def fsystem_module : Flag<["-"], "fsystem-module">, Flags<[CC1Option]>,
+ Flags<[CC1Option,CoreOption]>, Alias<fmodule_name_EQ>;
+def fsystem_module : Flag<["-"], "fsystem-module">, Flags<[CC1Option,CoreOption]>,
HelpText<"Build this module as a system module. Only used with -emit-module">,
MarshallingInfoFlag<FrontendOpts<"IsSystemModule">>;
def fmodule_map_file : Joined<["-"], "fmodule-map-file=">,
- Group<f_Group>, Flags<[NoXarchOption,CC1Option]>, MetaVarName<"<file>">,
+ Group<f_Group>, Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"<file>">,
HelpText<"Load this module map file">,
MarshallingInfoStringVector<FrontendOpts<"ModuleMapFiles">>;
def fmodule_file : Joined<["-"], "fmodule-file=">,
- Group<i_Group>, Flags<[NoXarchOption,CC1Option]>, MetaVarName<"[<name>=]<file>">,
+ Group<i_Group>, Flags<[NoXarchOption,CC1Option,CoreOption]>, MetaVarName<"[<name>=]<file>">,
HelpText<"Specify the mapping of module name to precompiled module file, or load a module file if name is omitted.">;
-def fmodules_ignore_macro : Joined<["-"], "fmodules-ignore-macro=">, Group<f_Group>, Flags<[CC1Option]>,
+def fmodules_ignore_macro : Joined<["-"], "fmodules-ignore-macro=">, Group<f_Group>,
+ Flags<[CC1Option,CoreOption]>,
HelpText<"Ignore the definition of the given macro when building and loading modules">;
-def fmodules_decluse : Flag <["-"], "fmodules-decluse">, Group<f_Group>,
- Flags<[NoXarchOption,CC1Option]>,
- HelpText<"Require declaration of modules used within a module">;
def fmodules_strict_decluse : Flag <["-"], "fmodules-strict-decluse">, Group<f_Group>,
- Flags<[NoXarchOption,CC1Option]>,
+ Flags<[NoXarchOption,CC1Option,CoreOption]>,
HelpText<"Like -fmodules-decluse but requires all headers to be in modules">,
MarshallingInfoFlag<LangOpts<"ModulesStrictDeclUse">>;
-def fno_modules_search_all : Flag <["-"], "fno-modules-search-all">, Group<f_Group>,
- Flags<[NoXarchOption, CC1Option]>;
+defm modules_decluse : BoolFOption<"modules-decluse",
+ LangOpts<"ModulesDeclUse">, Default<fmodules_strict_decluse.KeyPath>,
+ PosFlag<SetTrue, [CC1Option], "Require declaration of modules used within a module">,
+ NegFlag<SetFalse>, BothFlags<[NoXarchOption,CoreOption]>>;
+defm modules_search_all : BoolFOption<"modules-search-all",
+ LangOpts<"ModulesSearchAll">, DefaultFalse,
+ PosFlag<SetTrue, [], "Search even non-imported modules to resolve references">,
+ NegFlag<SetFalse>, BothFlags<[NoXarchOption, CC1Option,CoreOption]>>,
+ ShouldParseIf<fmodules.KeyPath>;
defm implicit_modules : BoolFOption<"implicit-modules",
LangOpts<"ImplicitModules">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[NoXarchOption]>>;
+ NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[NoXarchOption,CoreOption]>>;
def fretain_comments_from_system_headers : Flag<["-"], "fretain-comments-from-system-headers">, Group<f_Group>, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"RetainCommentsFromSystemHeaders">>;
@@ -1961,7 +2192,6 @@ def fmudflapth : Flag<["-"], "fmudflapth">, Group<f_Group>;
def fmudflap : Flag<["-"], "fmudflap">, Group<f_Group>;
def fnested_functions : Flag<["-"], "fnested-functions">, Group<f_Group>;
def fnext_runtime : Flag<["-"], "fnext-runtime">, Group<f_Group>;
-def fno_xl_pragma_pack : Flag<["-"], "fno-xl-pragma-pack">, Group<f_Group>;
def fno_asm : Flag<["-"], "fno-asm">, Group<f_Group>;
def fno_asynchronous_unwind_tables : Flag<["-"], "fno-asynchronous-unwind-tables">, Group<f_Group>;
def fno_assume_sane_operator_new : Flag<["-"], "fno-assume-sane-operator-new">, Group<f_Group>,
@@ -1982,10 +2212,6 @@ defm digraphs : BoolFOption<"digraphs",
PosFlag<SetTrue, [], "Enable alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:' (default)">,
NegFlag<SetFalse, [], "Disallow alternative token representations '<:', ':>', '<%', '%>', '%:', '%:%:'">,
BothFlags<[CC1Option]>>;
-def fno_declspec : Flag<["-"], "fno-declspec">, Group<f_clang_Group>,
- HelpText<"Disallow __declspec as a keyword">, Flags<[CC1Option]>;
-def fno_dollars_in_identifiers : Flag<["-"], "fno-dollars-in-identifiers">, Group<f_Group>,
- HelpText<"Disallow '$' in identifiers">, Flags<[CC1Option]>;
def fno_eliminate_unused_debug_symbols : Flag<["-"], "fno-eliminate-unused-debug-symbols">, Group<f_Group>;
def fno_inline_functions : Flag<["-"], "fno-inline-functions">, Group<f_clang_Group>, Flags<[CC1Option]>;
def fno_inline : Flag<["-"], "fno-inline">, Group<f_clang_Group>, Flags<[CC1Option]>;
@@ -1995,19 +2221,16 @@ def fno_experimental_isel : Flag<["-"], "fno-experimental-isel">, Group<f_clang_
Alias<fno_global_isel>;
def fveclib : Joined<["-"], "fveclib=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Use the given vector functions library">,
- Values<"Accelerate,libmvec,MASSV,SVML,none">,
+ Values<"Accelerate,libmvec,MASSV,SVML,Darwin_libsystem_m,none">,
NormalizedValuesScope<"CodeGenOptions">,
- NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML", "NoLibrary"]>,
- MarshallingInfoString<CodeGenOpts<"VecLib">, "NoLibrary">, AutoNormalizeEnum;
+ NormalizedValues<["Accelerate", "LIBMVEC", "MASSV", "SVML",
+ "Darwin_libsystem_m", "NoLibrary"]>,
+ MarshallingInfoEnum<CodeGenOpts<"VecLib">, "NoLibrary">;
def fno_lax_vector_conversions : Flag<["-"], "fno-lax-vector-conversions">, Group<f_Group>,
Alias<flax_vector_conversions_EQ>, AliasArgs<["none"]>;
-def fno_modules : Flag <["-"], "fno-modules">, Group<f_Group>,
- Flags<[NoXarchOption]>;
def fno_implicit_module_maps : Flag <["-"], "fno-implicit-module-maps">, Group<f_Group>,
Flags<[NoXarchOption]>;
def fno_module_maps : Flag <["-"], "fno-module-maps">, Alias<fno_implicit_module_maps>;
-def fno_modules_decluse : Flag <["-"], "fno-modules-decluse">, Group<f_Group>,
- Flags<[NoXarchOption]>;
def fno_modules_strict_decluse : Flag <["-"], "fno-strict-modules-decluse">, Group<f_Group>,
Flags<[NoXarchOption]>;
def fmodule_file_deps : Flag <["-"], "fmodule-file-deps">, Group<f_Group>,
@@ -2021,9 +2244,10 @@ def fno_ms_compatibility : Flag<["-"], "fno-ms-compatibility">, Group<f_Group>,
def fno_objc_legacy_dispatch : Flag<["-"], "fno-objc-legacy-dispatch">, Group<f_Group>;
def fno_objc_weak : Flag<["-"], "fno-objc-weak">, Group<f_Group>, Flags<[CC1Option]>;
def fno_omit_frame_pointer : Flag<["-"], "fno-omit-frame-pointer">, Group<f_Group>;
-def fno_operator_names : Flag<["-"], "fno-operator-names">, Group<f_Group>,
- HelpText<"Do not treat C++ operator name keywords as synonyms for operators">,
- Flags<[CC1Option]>;
+defm operator_names : BoolFOption<"operator-names",
+ LangOpts<"CXXOperatorNames">, Default<cplusplus.KeyPath>,
+ NegFlag<SetFalse, [CC1Option], "Do not treat C++ operator name keywords as synonyms for operators">,
+ PosFlag<SetTrue>>;
def fdiagnostics_absolute_paths : Flag<["-"], "fdiagnostics-absolute-paths">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Print absolute paths in diagnostics">,
MarshallingInfoFlag<DiagnosticOpts<"AbsolutePath">>;
@@ -2052,12 +2276,17 @@ def fno_wrapv : Flag<["-"], "fno-wrapv">, Group<f_Group>;
def fobjc_arc : Flag<["-"], "fobjc-arc">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Synthesize retain and release calls for Objective-C pointers">;
def fno_objc_arc : Flag<["-"], "fno-objc-arc">, Group<f_Group>;
+defm objc_encode_cxx_class_template_spec : BoolFOption<"objc-encode-cxx-class-template-spec",
+ LangOpts<"EncodeCXXClassTemplateSpec">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Fully encode c++ class template specialization">,
+ NegFlag<SetFalse>>;
defm objc_convert_messages_to_runtime_calls : BoolFOption<"objc-convert-messages-to-runtime-calls",
CodeGenOpts<"ObjCConvertMessagesToRuntimeCalls">, DefaultTrue,
NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>>;
-defm objc_arc_exceptions : OptInFFlag<"objc-arc-exceptions",
- "Use EH-safe code when synthesizing retains and releases in -fobjc-arc",
- "", "", [], CodeGenOpts<"ObjCAutoRefCountExceptions">>;
+defm objc_arc_exceptions : BoolFOption<"objc-arc-exceptions",
+ CodeGenOpts<"ObjCAutoRefCountExceptions">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Use EH-safe code when synthesizing retains and releases in -fobjc-arc">,
+ NegFlag<SetFalse>>;
def fobjc_atdefs : Flag<["-"], "fobjc-atdefs">, Group<clang_ignored_f_Group>;
def fobjc_call_cxx_cdtors : Flag<["-"], "fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
defm objc_exceptions : BoolFOption<"objc-exceptions",
@@ -2075,14 +2304,14 @@ defm sized_deallocation : BoolFOption<"sized-deallocation",
LangOpts<"SizedDeallocation">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enable C++14 sized global deallocation functions">,
NegFlag<SetFalse>>;
-def faligned_allocation : Flag<["-"], "faligned-allocation">, Flags<[CC1Option]>,
- HelpText<"Enable C++17 aligned allocation functions">, Group<f_Group>;
-def fno_aligned_allocation: Flag<["-"], "fno-aligned-allocation">,
- Group<f_Group>, Flags<[CC1Option]>;
+defm aligned_allocation : BoolFOption<"aligned-allocation",
+ LangOpts<"AlignedAllocation">, Default<cpp17.KeyPath>,
+ PosFlag<SetTrue, [], "Enable C++17 aligned allocation functions">,
+ NegFlag<SetFalse>, BothFlags<[CC1Option]>>;
def fnew_alignment_EQ : Joined<["-"], "fnew-alignment=">,
HelpText<"Specifies the largest alignment guaranteed by '::operator new(size_t)'">,
MetaVarName<"<align>">, Group<f_Group>, Flags<[CC1Option]>,
- MarshallingInfoStringInt<LangOpts<"NewAlignOverride">>;
+ MarshallingInfoInt<LangOpts<"NewAlignOverride">>;
def : Separate<["-"], "fnew-alignment">, Alias<fnew_alignment_EQ>;
def : Flag<["-"], "faligned-new">, Alias<faligned_allocation>;
def : Flag<["-"], "fno-aligned-new">, Alias<fno_aligned_allocation>;
@@ -2107,10 +2336,15 @@ def fobjc_nonfragile_abi : Flag<["-"], "fobjc-nonfragile-abi">, Group<f_Group>;
def fno_objc_nonfragile_abi : Flag<["-"], "fno-objc-nonfragile-abi">, Group<f_Group>;
def fobjc_sender_dependent_dispatch : Flag<["-"], "fobjc-sender-dependent-dispatch">, Group<f_Group>;
+def fobjc_disable_direct_methods_for_testing :
+ Flag<["-"], "fobjc-disable-direct-methods-for-testing">,
+ Group<f_Group>, Flags<[CC1Option]>,
+ HelpText<"Ignore attribute objc_direct so that direct methods can be tested">,
+ MarshallingInfoFlag<LangOpts<"ObjCDisableDirectMethodsForTesting">>;
+
def fomit_frame_pointer : Flag<["-"], "fomit-frame-pointer">, Group<f_Group>;
-def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>,
- HelpText<"Parse OpenMP pragmas and generate parallel code.">,
- MarshallingInfoFlag<LangOpts<"OpenMP">, "0u">, Normalizer<"makeFlagToValueNormalizer(50u)">;
+def fopenmp : Flag<["-"], "fopenmp">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused, FlangOption, FC1Option]>,
+ HelpText<"Parse OpenMP pragmas and generate parallel code.">;
def fno_openmp : Flag<["-"], "fno-openmp">, Group<f_Group>, Flags<[NoArgumentUnused]>;
def fopenmp_version_EQ : Joined<["-"], "fopenmp-version=">, Group<f_Group>, Flags<[CC1Option, NoArgumentUnused]>;
def fopenmp_EQ : Joined<["-"], "fopenmp=">, Group<f_Group>;
@@ -2143,15 +2377,13 @@ def fopenmp_cuda_blocks_per_sm_EQ : Joined<["-"], "fopenmp-cuda-blocks-per-sm=">
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
def fopenmp_cuda_teams_reduction_recs_num_EQ : Joined<["-"], "fopenmp-cuda-teams-reduction-recs-num=">, Group<f_Group>,
Flags<[CC1Option, NoArgumentUnused, HelpHidden]>;
+defm openmp_target_new_runtime: BoolFOption<"openmp-target-new-runtime",
+ LangOpts<"OpenMPTargetNewRuntime">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Use the new bitcode library for OpenMP offloading">,
+ NegFlag<SetFalse>>;
defm openmp_optimistic_collapse : BoolFOption<"openmp-optimistic-collapse",
LangOpts<"OpenMPOptimisticCollapse">, DefaultFalse,
PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[NoArgumentUnused, HelpHidden]>>;
-def fopenmp_cuda_parallel_target_regions : Flag<["-"], "fopenmp-cuda-parallel-target-regions">, Group<f_Group>,
- Flags<[CC1Option, NoArgumentUnused, HelpHidden]>,
- HelpText<"Support parallel execution of target regions on Cuda-based devices.">;
-def fno_openmp_cuda_parallel_target_regions : Flag<["-"], "fno-openmp-cuda-parallel-target-regions">, Group<f_Group>,
- Flags<[NoArgumentUnused, HelpHidden]>,
- HelpText<"Support only serial execution of target regions on Cuda-based devices.">;
def static_openmp: Flag<["-"], "static-openmp">,
HelpText<"Use the static host OpenMP runtime while linking.">;
def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
@@ -2168,10 +2400,10 @@ def fpack_struct : Flag<["-"], "fpack-struct">, Group<f_Group>;
def fno_pack_struct : Flag<["-"], "fno-pack-struct">, Group<f_Group>;
def fpack_struct_EQ : Joined<["-"], "fpack-struct=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the default maximum struct packing alignment">,
- MarshallingInfoStringInt<LangOpts<"PackStruct">>;
+ MarshallingInfoInt<LangOpts<"PackStruct">>;
def fmax_type_align_EQ : Joined<["-"], "fmax-type-align=">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Specify the maximum alignment to enforce on pointers lacking an explicit alignment">,
- MarshallingInfoStringInt<LangOpts<"MaxTypeAlign">>;
+ MarshallingInfoInt<LangOpts<"MaxTypeAlign">>;
def fno_max_type_align : Flag<["-"], "fno-max-type-align">, Group<f_Group>;
defm pascal_strings : BoolFOption<"pascal-strings",
LangOpts<"PascalStrings">, DefaultFalse,
@@ -2182,7 +2414,7 @@ defm pascal_strings : BoolFOption<"pascal-strings",
// are treated as a single integer.
def fpatchable_function_entry_EQ : Joined<["-"], "fpatchable-function-entry=">, Group<f_Group>, Flags<[CC1Option]>,
MetaVarName<"<N,M>">, HelpText<"Generate M NOPs before function entry and N-M NOPs after function entry">,
- MarshallingInfoStringInt<CodeGenOpts<"PatchableFunctionEntryCount">>;
+ MarshallingInfoInt<CodeGenOpts<"PatchableFunctionEntryCount">>;
def fpcc_struct_return : Flag<["-"], "fpcc-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return all structs on the stack">;
def fpch_preprocess : Flag<["-"], "fpch-preprocess">, Group<f_Group>;
@@ -2220,18 +2452,24 @@ def framework : Separate<["-"], "framework">, Flags<[LinkerInput]>;
def frandom_seed_EQ : Joined<["-"], "frandom-seed=">, Group<clang_ignored_f_Group>;
def freg_struct_return : Flag<["-"], "freg-struct-return">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Override the default ABI to return small structs in registers">;
-defm rtti : OptOutFFlag<"rtti", "", "Disable generation of rtti information">;
-defm rtti_data : OptOutFFlag<"rtti-data", "", "Disable generation of RTTI data">;
+defm rtti : BoolFOption<"rtti",
+ LangOpts<"RTTI">, Default<cplusplus.KeyPath>,
+ NegFlag<SetFalse, [CC1Option], "Disable generation of rtti information">,
+ PosFlag<SetTrue>>, ShouldParseIf<cplusplus.KeyPath>;
+defm rtti_data : BoolFOption<"rtti-data",
+ LangOpts<"RTTIData">, Default<frtti.KeyPath>,
+ NegFlag<SetFalse, [CC1Option], "Disable generation of RTTI data">,
+ PosFlag<SetTrue>>, ShouldParseIf<frtti.KeyPath>;
def : Flag<["-"], "fsched-interblock">, Group<clang_ignored_f_Group>;
defm short_enums : BoolFOption<"short-enums",
LangOpts<"ShortEnums">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Allocate to an enum type only as many bytes as it"
" needs for the declared range of possible values">,
NegFlag<SetFalse>>;
-def fchar8__t : Flag<["-"], "fchar8_t">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Enable C++ builtin type char8_t">;
-def fno_char8__t : Flag<["-"], "fno-char8_t">, Group<f_Group>, Flags<[CC1Option]>,
- HelpText<"Disable C++ builtin type char8_t">;
+defm char8__t : BoolFOption<"char8_t",
+ LangOpts<"Char8">, Default<cpp20.KeyPath>,
+ PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
+ BothFlags<[CC1Option], " C++ builtin type char8_t">>;
def fshort_wchar : Flag<["-"], "fshort-wchar">, Group<f_Group>,
HelpText<"Force wchar_t to be a short unsigned int">;
def fno_short_wchar : Flag<["-"], "fno-short-wchar">, Group<f_Group>,
@@ -2240,7 +2478,7 @@ def fshow_overloads_EQ : Joined<["-"], "fshow-overloads=">, Group<f_Group>, Flag
HelpText<"Which overload candidates to show when overload resolution fails: "
"best|all; defaults to all">, Values<"best,all">,
NormalizedValues<["Ovl_Best", "Ovl_All"]>,
- MarshallingInfoString<DiagnosticOpts<"ShowOverloads">, "Ovl_All">, AutoNormalizeEnum;
+ MarshallingInfoEnum<DiagnosticOpts<"ShowOverloads">, "Ovl_All">;
defm show_column : BoolFOption<"show-column",
DiagnosticOpts<"ShowColumn">, DefaultTrue,
NegFlag<SetFalse, [CC1Option], "Do not include column number on diagnostics">,
@@ -2254,8 +2492,14 @@ defm spell_checking : BoolFOption<"spell-checking",
NegFlag<SetFalse, [CC1Option], "Disable spell-checking">, PosFlag<SetTrue>>;
def fspell_checking_limit_EQ : Joined<["-"], "fspell-checking-limit=">, Group<f_Group>;
def fsigned_bitfields : Flag<["-"], "fsigned-bitfields">, Group<f_Group>;
-defm signed_char : OptOutFFlag<"signed-char", "char is signed", "char is unsigned">;
-def fsplit_stack : Flag<["-"], "fsplit-stack">, Group<f_Group>;
+defm signed_char : BoolFOption<"signed-char",
+ LangOpts<"CharIsSigned">, DefaultTrue,
+ NegFlag<SetFalse, [CC1Option], "char is unsigned">, PosFlag<SetTrue, [], "char is signed">>,
+ ShouldParseIf<!strconcat("!", open_cl.KeyPath)>;
+defm split_stack : BoolFOption<"split-stack",
+ CodeGenOpts<"EnableSegmentedStacks">, DefaultFalse,
+ NegFlag<SetFalse, [], "Wouldn't use segmented stack">,
+ PosFlag<SetTrue, [CC1Option], "Use segmented stack">>;
def fstack_protector_all : Flag<["-"], "fstack-protector-all">, Group<f_Group>,
HelpText<"Enable stack protectors for all functions">;
defm stack_clash_protection : BoolFOption<"stack-clash-protection",
@@ -2283,13 +2527,13 @@ def ftrivial_auto_var_init : Joined<["-"], "ftrivial-auto-var-init=">, Group<f_G
" | pattern">, Values<"uninitialized,zero,pattern">,
NormalizedValuesScope<"LangOptions::TrivialAutoVarInitKind">,
NormalizedValues<["Uninitialized", "Zero", "Pattern"]>,
- MarshallingInfoString<LangOpts<"TrivialAutoVarInit">, "Uninitialized">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"TrivialAutoVarInit">, "Uninitialized">;
def enable_trivial_var_init_zero : Flag<["-"], "enable-trivial-auto-var-init-zero-knowing-it-will-be-removed-from-clang">,
Flags<[CC1Option, CoreOption]>,
HelpText<"Trivial automatic variable initialization to zero is only here for benchmarks, it'll eventually be removed, and I'm OK with that because I'm only using it to benchmark">;
def ftrivial_auto_var_init_stop_after : Joined<["-"], "ftrivial-auto-var-init-stop-after=">, Group<f_Group>,
Flags<[CC1Option, CoreOption]>, HelpText<"Stop initializing trivial automatic stack variables after the specified number of instances">,
- MarshallingInfoStringInt<LangOpts<"TrivialAutoVarInitStopAfter">>;
+ MarshallingInfoInt<LangOpts<"TrivialAutoVarInitStopAfter">>;
def fstandalone_debug : Flag<["-"], "fstandalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
HelpText<"Emit full debug info for all types used by the program">;
def fno_standalone_debug : Flag<["-"], "fno-standalone-debug">, Group<f_Group>, Flags<[CoreOption]>,
@@ -2352,13 +2596,18 @@ def Wlarge_by_value_copy_def : Flag<["-"], "Wlarge-by-value-copy">,
HelpText<"Warn if a function definition returns or accepts an object larger "
"in bytes than a given value">, Flags<[HelpHidden]>;
def Wlarge_by_value_copy_EQ : Joined<["-"], "Wlarge-by-value-copy=">, Flags<[CC1Option]>,
- MarshallingInfoStringInt<LangOpts<"NumLargeByValueCopy">>;
+ MarshallingInfoInt<LangOpts<"NumLargeByValueCopy">>;
// These "special" warning flags are effectively processed as f_Group flags by the driver:
// Just silence warnings about -Wlarger-than for now.
def Wlarger_than_EQ : Joined<["-"], "Wlarger-than=">, Group<clang_ignored_f_Group>;
def Wlarger_than_ : Joined<["-"], "Wlarger-than-">, Alias<Wlarger_than_EQ>;
-def Wframe_larger_than_EQ : Joined<["-"], "Wframe-larger-than=">, Group<f_Group>, Flags<[NoXarchOption]>;
+
+// This is converted to -fwarn-stack-size=N and also passed through by the driver.
+// FIXME: The driver should strip out the =<value> when passing W_value_Group through.
+def Wframe_larger_than_EQ : Joined<["-"], "Wframe-larger-than=">, Group<W_value_Group>,
+ Flags<[NoXarchOption, CC1Option]>;
+def Wframe_larger_than : Flag<["-"], "Wframe-larger-than">, Alias<Wframe_larger_than_EQ>;
def : Flag<["-"], "fterminated-vtables">, Alias<fapple_kext>;
defm threadsafe_statics : BoolFOption<"threadsafe-statics",
@@ -2383,7 +2632,7 @@ can be analyzed with chrome://tracing or `Speedscope App
def ftime_trace_granularity_EQ : Joined<["-"], "ftime-trace-granularity=">, Group<f_Group>,
HelpText<"Minimum time granularity (in microseconds) traced by time profiler">,
Flags<[CC1Option, CoreOption]>,
- MarshallingInfoStringInt<FrontendOpts<"TimeTraceGranularity">, "500u">;
+ MarshallingInfoInt<FrontendOpts<"TimeTraceGranularity">, "500u">;
def fproc_stat_report : Joined<["-"], "fproc-stat-report">, Group<f_Group>,
HelpText<"Print subprocess statistics">;
def fproc_stat_report_EQ : Joined<["-"], "fproc-stat-report=">, Group<f_Group>,
@@ -2392,7 +2641,7 @@ def ftlsmodel_EQ : Joined<["-"], "ftls-model=">, Group<f_Group>, Flags<[CC1Optio
Values<"global-dynamic,local-dynamic,initial-exec,local-exec">,
NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["GeneralDynamicTLSModel", "LocalDynamicTLSModel", "InitialExecTLSModel", "LocalExecTLSModel"]>,
- MarshallingInfoString<CodeGenOpts<"DefaultTLSModel">, "GeneralDynamicTLSModel">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"DefaultTLSModel">, "GeneralDynamicTLSModel">;
def ftrapv : Flag<["-"], "ftrapv">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Trap on integer overflow">;
def ftrapv_handler_EQ : Joined<["-"], "ftrapv-handler=">, Group<f_Group>,
@@ -2410,6 +2659,11 @@ def fno_unroll_loops : Flag<["-"], "fno-unroll-loops">, Group<f_Group>,
defm reroll_loops : BoolFOption<"reroll-loops",
CodeGenOpts<"RerollLoops">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Turn on loop reroller">, NegFlag<SetFalse>>;
+def ffinite_loops: Flag<["-"], "ffinite-loops">, Group<f_Group>,
+ HelpText<"Assume all loops are finite.">, Flags<[CC1Option]>;
+def fno_finite_loops: Flag<["-"], "fno-finite-loops">, Group<f_Group>,
+ HelpText<"Do not assume that any loop is finite.">, Flags<[CC1Option]>;
+
def ftrigraphs : Flag<["-"], "ftrigraphs">, Group<f_Group>,
HelpText<"Process trigraph sequences">, Flags<[CC1Option]>;
def fno_trigraphs : Flag<["-"], "fno-trigraphs">, Group<f_Group>,
@@ -2452,9 +2706,10 @@ def fvisibility_externs_nodllstorageclass_EQ : Joined<["-"], "fvisibility-extern
ShouldParseIf<fvisibility_from_dllstorageclass.KeyPath>;
def fvisibility_EQ : Joined<["-"], "fvisibility=">, Group<f_Group>,
HelpText<"Set the default symbol visibility for all global declarations">, Values<"hidden,default">;
-def fvisibility_inlines_hidden : Flag<["-"], "fvisibility-inlines-hidden">, Group<f_Group>,
- HelpText<"Give inline C++ member functions hidden visibility by default">,
- Flags<[CC1Option]>, MarshallingInfoFlag<LangOpts<"InlineVisibilityHidden">>;
+defm visibility_inlines_hidden : BoolFOption<"visibility-inlines-hidden",
+ LangOpts<"InlineVisibilityHidden">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Give inline C++ member functions hidden visibility by default">,
+ NegFlag<SetFalse>>;
defm visibility_inlines_hidden_static_local_var : BoolFOption<"visibility-inlines-hidden-static-local-var",
LangOpts<"VisibilityInlinesHiddenStaticLocalVar">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "When -fvisibility-inlines-hidden is enabled, static variables in"
@@ -2493,7 +2748,10 @@ defm zero_initialized_in_bss : BoolFOption<"zero-initialized-in-bss",
CodeGenOpts<"NoZeroInitializedInBSS">, DefaultFalse,
NegFlag<SetTrue, [CC1Option], "Don't place zero initialized data in BSS">,
PosFlag<SetFalse>>;
-defm function_sections : OptInFFlag<"function-sections", "Place each function in its own section">;
+defm function_sections : BoolFOption<"function-sections",
+ CodeGenOpts<"FunctionSections">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option], "Place each function in its own section">,
+ NegFlag<SetFalse>>;
def fbasic_block_sections_EQ : Joined<["-"], "fbasic-block-sections=">, Group<f_Group>,
Flags<[CC1Option, CC1AsOption]>,
HelpText<"Place each function's basic blocks in unique sections (ELF Only) : all | labels | none | list=<file>">,
@@ -2507,6 +2765,12 @@ defm stack_size_section : BoolFOption<"stack-size-section",
CodeGenOpts<"StackSizeSection">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Emit section containing metadata on function stack sizes">,
NegFlag<SetFalse>>;
+def fstack_usage : Flag<["-"], "fstack-usage">, Group<f_Group>,
+ HelpText<"Emit .su file containing information on function stack sizes">;
+def stack_usage_file : Separate<["-"], "stack-usage-file">,
+ Flags<[CC1Option, NoDriverOption]>,
+ HelpText<"Filename (or -) to write stack usage output to">,
+ MarshallingInfoString<CodeGenOpts<"StackUsageOutput">>;
defm unique_basic_block_section_names : BoolFOption<"unique-basic-block-section-names",
CodeGenOpts<"UniqueBasicBlockSectionNames">, DefaultFalse,
@@ -2547,9 +2811,9 @@ defm debug_ranges_base_address : BoolFOption<"debug-ranges-base-address",
PosFlag<SetTrue, [CC1Option], "Use DWARF base address selection entries in .debug_ranges">,
NegFlag<SetFalse>>;
defm split_dwarf_inlining : BoolFOption<"split-dwarf-inlining",
- CodeGenOpts<"SplitDwarfInlining">, DefaultTrue,
- NegFlag<SetFalse, [CC1Option]>,
- PosFlag<SetTrue, [], "Provide minimal debug info in the object/executable"
+ CodeGenOpts<"SplitDwarfInlining">, DefaultFalse,
+ NegFlag<SetFalse, []>,
+ PosFlag<SetTrue, [CC1Option], "Provide minimal debug info in the object/executable"
" to facilitate online symbolication/stack traces in the absence of"
" .dwo/.dwp files when using Split DWARF">>;
def fdebug_default_version: Joined<["-"], "fdebug-default-version=">, Group<f_Group>,
@@ -2558,10 +2822,10 @@ def fdebug_prefix_map_EQ
: Joined<["-"], "fdebug-prefix-map=">, Group<f_Group>,
Flags<[CC1Option,CC1AsOption]>,
HelpText<"remap file source paths in debug info">;
-def fprofile_prefix_map_EQ
- : Joined<["-"], "fprofile-prefix-map=">, Group<f_Group>,
+def fcoverage_prefix_map_EQ
+ : Joined<["-"], "fcoverage-prefix-map=">, Group<f_Group>,
Flags<[CC1Option]>,
- HelpText<"remap file source paths in coverage info">;
+ HelpText<"remap file source paths in coverage mapping">;
def ffile_prefix_map_EQ
: Joined<["-"], "ffile-prefix-map=">, Group<f_Group>,
HelpText<"remap file source paths in debug info and predefined preprocessor macros">;
@@ -2589,6 +2853,7 @@ def ggdb2 : Flag<["-"], "ggdb2">, Group<ggdbN_Group>;
def ggdb3 : Flag<["-"], "ggdb3">, Group<ggdbN_Group>;
def glldb : Flag<["-"], "glldb">, Group<gTune_Group>;
def gsce : Flag<["-"], "gsce">, Group<gTune_Group>;
+def gdbx : Flag<["-"], "gdbx">, Group<gTune_Group>;
// Equivalent to our default dwarf version. Forces usual dwarf emission when
// CodeView is enabled.
def gdwarf : Flag<["-"], "gdwarf">, Group<g_Group>, Flags<[CoreOption]>,
@@ -2601,10 +2866,12 @@ def gdwarf_4 : Flag<["-"], "gdwarf-4">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 4">;
def gdwarf_5 : Flag<["-"], "gdwarf-5">, Group<g_Group>,
HelpText<"Generate source-level debug information with dwarf version 5">;
-def gdwarf64 : Flag<["-"], "gdwarf64">, Group<g_Group>, Flags<[CC1Option]>,
+def gdwarf64 : Flag<["-"], "gdwarf64">, Group<g_Group>,
+ Flags<[CC1Option, CC1AsOption]>,
HelpText<"Enables DWARF64 format for ELF binaries, if debug information emission is enabled.">,
MarshallingInfoFlag<CodeGenOpts<"Dwarf64">>;
-def gdwarf32 : Flag<["-"], "gdwarf32">, Group<g_Group>, Flags<[CC1Option]>,
+def gdwarf32 : Flag<["-"], "gdwarf32">, Group<g_Group>,
+ Flags<[CC1Option, CC1AsOption]>,
HelpText<"Enables DWARF32 format for ELF binaries, if debug information emission is enabled.">;
def gcodeview : Flag<["-"], "gcodeview">,
@@ -2633,8 +2900,10 @@ def gno_record_command_line : Flag<["-"], "gno-record-command-line">,
Group<g_flags_Group>;
def : Flag<["-"], "grecord-gcc-switches">, Alias<grecord_command_line>;
def : Flag<["-"], "gno-record-gcc-switches">, Alias<gno_record_command_line>;
-def gstrict_dwarf : Flag<["-"], "gstrict-dwarf">, Group<g_flags_Group>;
-def gno_strict_dwarf : Flag<["-"], "gno-strict-dwarf">, Group<g_flags_Group>;
+defm strict_dwarf : BoolOption<"g", "strict-dwarf",
+ CodeGenOpts<"DebugStrictDwarf">, DefaultFalse,
+ PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[CoreOption]>>,
+ Group<g_flags_Group>;
defm column_info : BoolOption<"g", "column-info",
CodeGenOpts<"DebugColumnInfo">, DefaultTrue,
NegFlag<SetFalse, [CC1Option]>, PosFlag<SetTrue>, BothFlags<[CoreOption]>>,
@@ -2757,7 +3026,7 @@ def mlong_calls : Flag<["-"], "mlong-calls">, Group<m_Group>,
HelpText<"Generate branches with extended addressability, usually via indirect jumps.">;
def mdouble_EQ : Joined<["-"], "mdouble=">, Group<m_Group>, Values<"32,64">, Flags<[CC1Option]>,
HelpText<"Force double to be 32 bits or 64 bits">,
- MarshallingInfoStringInt<LangOpts<"DoubleSize">, "0">;
+ MarshallingInfoInt<LangOpts<"DoubleSize">, "0">;
def LongDouble_Group : OptionGroup<"<LongDouble group>">, Group<m_Group>,
DocName<"Long double flags">,
DocBrief<[{Selects the long double implementation}]>;
@@ -2791,7 +3060,7 @@ def mcmodel_EQ : Joined<["-"], "mcmodel=">, Group<m_Group>, Flags<[CC1Option]>,
def mtls_size_EQ : Joined<["-"], "mtls-size=">, Group<m_Group>, Flags<[NoXarchOption, CC1Option]>,
HelpText<"Specify bit size of immediate TLS offsets (AArch64 ELF only): "
"12 (for 4KB) | 24 (for 16MB, default) | 32 (for 4GB) | 48 (for 256TB, needs -mcmodel=large)">,
- MarshallingInfoStringInt<CodeGenOpts<"TLSSize">>;
+ MarshallingInfoInt<CodeGenOpts<"TLSSize">>;
def mimplicit_it_EQ : Joined<["-"], "mimplicit-it=">, Group<m_Group>;
def mdefault_build_attributes : Joined<["-"], "mdefault-build-attributes">, Group<m_Group>;
def mno_default_build_attributes : Joined<["-"], "mno-default-build-attributes">, Group<m_Group>;
@@ -2849,22 +3118,24 @@ def mstackrealign : Flag<["-"], "mstackrealign">, Group<m_Group>, Flags<[CC1Opti
MarshallingInfoFlag<CodeGenOpts<"StackRealignment">>;
def mstack_alignment : Joined<["-"], "mstack-alignment=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack alignment">,
- MarshallingInfoStringInt<CodeGenOpts<"StackAlignment">>;
+ MarshallingInfoInt<CodeGenOpts<"StackAlignment">>;
def mstack_probe_size : Joined<["-"], "mstack-probe-size=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set the stack probe size">,
- MarshallingInfoStringInt<CodeGenOpts<"StackProbeSize">, "4096">;
+ MarshallingInfoInt<CodeGenOpts<"StackProbeSize">, "4096">;
def mstack_arg_probe : Flag<["-"], "mstack-arg-probe">, Group<m_Group>,
HelpText<"Enable stack probes">;
def mno_stack_arg_probe : Flag<["-"], "mno-stack-arg-probe">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Disable stack probes which are enabled by default">,
MarshallingInfoFlag<CodeGenOpts<"NoStackArgProbe">>;
def mthread_model : Separate<["-"], "mthread-model">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"The thread model to use, e.g. posix, single (posix by default)">, Values<"posix,single">;
+ HelpText<"The thread model to use, e.g. posix, single (posix by default)">, Values<"posix,single">,
+ NormalizedValues<["POSIX", "Single"]>, NormalizedValuesScope<"LangOptions::ThreadModelKind">,
+ MarshallingInfoEnum<LangOpts<"ThreadModel">, "POSIX">;
def meabi : Separate<["-"], "meabi">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Set EABI type, e.g. 4, 5 or gnu (default depends on triple)">, Values<"default,4,5,gnu">,
- MarshallingInfoString<TargetOpts<"EABIVersion">, "Default">,
+ MarshallingInfoEnum<TargetOpts<"EABIVersion">, "Default">,
NormalizedValuesScope<"llvm::EABI">,
- NormalizedValues<["Default", "EABI4", "EABI5", "GNU"]>, AutoNormalizeEnum;
+ NormalizedValues<["Default", "EABI4", "EABI5", "GNU"]>;
def mno_constant_cfstrings : Flag<["-"], "mno-constant-cfstrings">, Group<m_Group>;
def mno_global_merge : Flag<["-"], "mno-global-merge">, Group<m_Group>, Flags<[CC1Option]>,
@@ -2957,8 +3228,8 @@ defm aapcs_bitfield_width : BoolOption<"f", "aapcs-bitfield-width",
" volatile bit-field width is dictated by the field container type. (ARM only).">>,
Group<m_arm_Features_Group>;
-def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_aarch64_Features_Group>,
- HelpText<"Generate code which only uses the general purpose registers (AArch64 only)">;
+def mgeneral_regs_only : Flag<["-"], "mgeneral-regs-only">, Group<m_Group>,
+ HelpText<"Generate code which only uses the general purpose registers (AArch64/x86 only)">;
def mfix_cortex_a53_835769 : Flag<["-"], "mfix-cortex-a53-835769">,
Group<m_aarch64_Features_Group>,
HelpText<"Workaround Cortex-A53 erratum 835769 (AArch64 only)">;
@@ -2980,7 +3251,9 @@ def msve_vector_bits_EQ : Joined<["-"], "msve-vector-bits=">,
Group<m_aarch64_Features_Group>, Flags<[NoXarchOption,CC1Option]>,
HelpText<"Specify the size in bits of an SVE vector register. Defaults to the"
" vector length agnostic value of \"scalable\". (AArch64 only)">,
- Values<"128,256,512,1024,2048,scalable">;
+ Values<"128,256,512,1024,2048,scalable">,
+ NormalizedValues<["128", "256", "512", "1024", "2048", "0"]>,
+ MarshallingInfoEnum<LangOpts<"ArmSveVectorBits">, "0">;
def msign_return_address_EQ : Joined<["-"], "msign-return-address=">,
Flags<[CC1Option]>, Group<m_Group>, Values<"none,all,non-leaf">,
@@ -2992,8 +3265,6 @@ def mharden_sls_EQ : Joined<["-"], "mharden-sls=">,
HelpText<"Select straight-line speculation hardening scope">;
def msimd128 : Flag<["-"], "msimd128">, Group<m_wasm_Features_Group>;
-def munimplemented_simd128 : Flag<["-"], "munimplemented-simd128">, Group<m_wasm_Features_Group>;
-def mno_unimplemented_simd128 : Flag<["-"], "mno-unimplemented-simd128">, Group<m_wasm_Features_Group>;
def mno_simd128 : Flag<["-"], "mno-simd128">, Group<m_wasm_Features_Group>;
def mnontrapping_fptoint : Flag<["-"], "mnontrapping-fptoint">, Group<m_wasm_Features_Group>;
def mno_nontrapping_fptoint : Flag<["-"], "mno-nontrapping-fptoint">, Group<m_wasm_Features_Group>;
@@ -3017,22 +3288,30 @@ def mexec_model_EQ : Joined<["-"], "mexec-model=">, Group<m_wasm_Features_Driver
Values<"command,reactor">,
HelpText<"Execution model (WebAssembly only)">;
+defm amdgpu_ieee : BoolOption<"m", "amdgpu-ieee",
+ CodeGenOpts<"EmitIEEENaNCompliantInsts">, DefaultTrue,
+ PosFlag<SetTrue, [], "Sets the IEEE bit in the expected default floating point "
+ " mode register. Floating point opcodes that support exception flag "
+ "gathering quiet and propagate signaling NaN inputs per IEEE 754-2008. "
+ "This option changes the ABI. (AMDGPU only)">,
+ NegFlag<SetFalse, [CC1Option]>>, Group<m_Group>;
+
def mcode_object_version_EQ : Joined<["-"], "mcode-object-version=">, Group<m_Group>,
HelpText<"Specify code object ABI version. Defaults to 3. (AMDGPU only)">,
MetaVarName<"<version>">, Values<"2,3,4">;
-def mcode_object_v3_legacy : Flag<["-"], "mcode-object-v3">, Group<m_Group>,
- HelpText<"Legacy option to specify code object ABI V2 (-mnocode-object-v3) or V3 (-mcode-object-v3) (AMDGPU only)">;
-def mno_code_object_v3_legacy : Flag<["-"], "mno-code-object-v3">, Group<m_Group>;
-
-def mcumode : Flag<["-"], "mcumode">, Group<m_amdgpu_Features_Group>,
- HelpText<"Specify CU (-mcumode) or WGP (-mno-cumode) wavefront execution mode (AMDGPU only)">;
-def mno_cumode : Flag<["-"], "mno-cumode">, Group<m_amdgpu_Features_Group>;
-
-def mwavefrontsize64 : Flag<["-"], "mwavefrontsize64">, Group<m_Group>,
- HelpText<"Specify wavefront size 64 mode (AMDGPU only)">;
-def mno_wavefrontsize64 : Flag<["-"], "mno-wavefrontsize64">, Group<m_Group>,
- HelpText<"Specify wavefront size 32 mode (AMDGPU only)">;
+defm code_object_v3_legacy : SimpleMFlag<"code-object-v3",
+ "Legacy option to specify code object ABI V3",
+ "Legacy option to specify code object ABI V2",
+ " (AMDGPU only)">;
+defm cumode : SimpleMFlag<"cumode",
+ "Specify CU wavefront", "Specify WGP wavefront",
+ " execution mode (AMDGPU only)", m_amdgpu_Features_Group>;
+defm tgsplit : SimpleMFlag<"tgsplit", "Enable", "Disable",
+ " threadgroup split execution mode (AMDGPU only)", m_amdgpu_Features_Group>;
+defm wavefrontsize64 : SimpleMFlag<"wavefrontsize64",
+ "Specify wavefront size 64", "Specify wavefront size 32",
+ " mode (AMDGPU only)">;
defm unsafe_fp_atomics : BoolOption<"m", "unsafe-fp-atomics",
TargetOpts<"AllowAMDGPUUnsafeFPAtomics">, DefaultFalse,
@@ -3045,11 +3324,14 @@ def maltivec : Flag<["-"], "maltivec">, Group<m_ppc_Features_Group>;
def mno_altivec : Flag<["-"], "mno-altivec">, Group<m_ppc_Features_Group>;
def mpcrel: Flag<["-"], "mpcrel">, Group<m_ppc_Features_Group>;
def mno_pcrel: Flag<["-"], "mno-pcrel">, Group<m_ppc_Features_Group>;
+def mprefixed: Flag<["-"], "mprefixed">, Group<m_ppc_Features_Group>;
+def mno_prefixed: Flag<["-"], "mno-prefixed">, Group<m_ppc_Features_Group>;
def mspe : Flag<["-"], "mspe">, Group<m_ppc_Features_Group>;
def mno_spe : Flag<["-"], "mno-spe">, Group<m_ppc_Features_Group>;
def mefpu2 : Flag<["-"], "mefpu2">, Group<m_ppc_Features_Group>;
def mabi_EQ_vec_extabi : Flag<["-"], "mabi=vec-extabi">, Group<m_Group>, Flags<[CC1Option]>,
- HelpText<"Enable the extended Altivec ABI on AIX (AIX only). Uses volatile and nonvolatile vector registers">;
+ HelpText<"Enable the extended Altivec ABI on AIX (AIX only). Uses volatile and nonvolatile vector registers">,
+ MarshallingInfoFlag<LangOpts<"EnableAIXExtendedAltivecABI">>;
def mabi_EQ_vec_default : Flag<["-"], "mabi=vec-default">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Enable the default Altivec ABI on AIX (AIX only). Uses only volatile vector registers.">;
def mvsx : Flag<["-"], "mvsx">, Group<m_ppc_Features_Group>;
@@ -3110,6 +3392,10 @@ def mno_longcall : Flag<["-"], "mno-longcall">,
Group<m_ppc_Features_Group>;
def mmma: Flag<["-"], "mmma">, Group<m_ppc_Features_Group>;
def mno_mma: Flag<["-"], "mno-mma">, Group<m_ppc_Features_Group>;
+def mrop_protect : Flag<["-"], "mrop-protect">,
+ Group<m_ppc_Features_Group>;
+def mprivileged : Flag<["-"], "mprivileged">,
+ Group<m_ppc_Features_Group>;
def maix_struct_return : Flag<["-"], "maix-struct-return">,
Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Return all structs in memory (PPC32 only)">;
@@ -3179,14 +3465,10 @@ def mstack_protector_guard_EQ : Joined<["-"], "mstack-protector-guard=">, Group<
MarshallingInfoString<CodeGenOpts<"StackProtectorGuard">>;
def mstack_protector_guard_offset_EQ : Joined<["-"], "mstack-protector-guard-offset=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Use the given offset for addressing the stack-protector guard">,
- MarshallingInfoStringInt<CodeGenOpts<"StackProtectorGuardOffset">, "(unsigned)-1">;
+ MarshallingInfoInt<CodeGenOpts<"StackProtectorGuardOffset">, "INT_MAX", "int">;
def mstack_protector_guard_reg_EQ : Joined<["-"], "mstack-protector-guard-reg=">, Group<m_Group>, Flags<[CC1Option]>,
HelpText<"Use the given reg for addressing the stack-protector guard">,
- MarshallingInfoString<CodeGenOpts<"StackProtectorGuardReg">, [{"none"}]>;
-def mpie_copy_relocations : Flag<["-"], "mpie-copy-relocations">,
- Alias<fdirect_access_external_data>, Group<m_Group>;
-def mno_pie_copy_relocations : Flag<["-"], "mno-pie-copy-relocations">,
- Alias<fno_direct_access_external_data>, Group<m_Group>;
+ MarshallingInfoString<CodeGenOpts<"StackProtectorGuardReg">>;
def mfentry : Flag<["-"], "mfentry">, HelpText<"Insert calls to fentry at function entry (x86/SystemZ only)">,
Flags<[CC1Option]>, Group<m_Group>,
MarshallingInfoFlag<CodeGenOpts<"CallFEntry">>;
@@ -3397,8 +3679,8 @@ def pagezero__size : JoinedOrSeparate<["-"], "pagezero_size">;
def pass_exit_codes : Flag<["-", "--"], "pass-exit-codes">, Flags<[Unsupported]>;
def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>,
MarshallingInfoFlag<DiagnosticOpts<"PedanticErrors">>;
-def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option]>,
- MarshallingInfoFlag<DiagnosticOpts<"Pedantic">>;
+def pedantic : Flag<["-", "--"], "pedantic">, Group<pedantic_Group>, Flags<[CC1Option,FlangOption,FC1Option]>,
+ HelpText<"Warn on language extensions">, MarshallingInfoFlag<DiagnosticOpts<"Pedantic">>;
def pg : Flag<["-"], "pg">, HelpText<"Enable mcount instrumentation">, Flags<[CC1Option]>,
MarshallingInfoFlag<CodeGenOpts<"InstrumentForProfiling">>;
def pipe : Flag<["-", "--"], "pipe">,
@@ -3422,6 +3704,8 @@ def print_target_triple : Flag<["-", "--"], "print-target-triple">,
HelpText<"Print the normalized target triple">;
def print_effective_triple : Flag<["-", "--"], "print-effective-triple">,
HelpText<"Print the effective target triple">;
+def print_multiarch : Flag<["-", "--"], "print-multiarch">,
+ HelpText<"Print the multiarch target triple">;
def print_prog_name_EQ : Joined<["-", "--"], "print-prog-name=">,
HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
def print_resource_dir : Flag<["-", "--"], "print-resource-dir">,
@@ -3430,6 +3714,10 @@ def print_search_dirs : Flag<["-", "--"], "print-search-dirs">,
HelpText<"Print the paths used for finding libraries and programs">;
def print_targets : Flag<["-", "--"], "print-targets">,
HelpText<"Print the registered targets">;
+def print_rocm_search_dirs : Flag<["-", "--"], "print-rocm-search-dirs">,
+ HelpText<"Print the paths used for finding ROCm installation">;
+def print_runtime_dir : Flag<["-", "--"], "print-runtime-dir">,
+ HelpText<"Print the directory pathname containing clangs runtime libraries">;
def private__bundle : Flag<["-"], "private_bundle">;
def pthreads : Flag<["-"], "pthreads">;
defm pthread : BoolOption<"", "pthread",
@@ -3496,7 +3784,7 @@ def static_libgcc : Flag<["-"], "static-libgcc">;
def static_libstdcxx : Flag<["-"], "static-libstdc++">;
def static : Flag<["-", "--"], "static">, Group<Link_Group>, Flags<[NoArgumentUnused]>;
def std_default_EQ : Joined<["-"], "std-default=">;
-def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option]>,
+def std_EQ : Joined<["-", "--"], "std=">, Flags<[CC1Option,FlangOption,FC1Option]>,
Group<CompileOnly_Group>, HelpText<"Language standard to compile for">,
ValuesCode<[{
const char *Values =
@@ -3535,8 +3823,6 @@ def print_supported_cpus : Flag<["-", "--"], "print-supported-cpus">,
MarshallingInfoFlag<FrontendOpts<"PrintSupportedCPUs">>;
def mcpu_EQ_QUESTION : Flag<["-"], "mcpu=?">, Alias<print_supported_cpus>;
def mtune_EQ_QUESTION : Flag<["-"], "mtune=?">, Alias<print_supported_cpus>;
-def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[NoXarchOption]>,
- HelpText<"Use the gcc toolchain at the given directory">;
def time : Flag<["-"], "time">,
HelpText<"Time individual commands">;
def traditional_cpp : Flag<["-", "--"], "traditional-cpp">, Flags<[CC1Option]>,
@@ -3558,6 +3844,18 @@ def u : JoinedOrSeparate<["-"], "u">, Group<u_Group>;
def v : Flag<["-"], "v">, Flags<[CC1Option, CoreOption]>,
HelpText<"Show commands to run and use verbose output">,
MarshallingInfoFlag<HeaderSearchOpts<"Verbose">>;
+def altivec_src_compat : Joined<["-"], "faltivec-src-compat=">,
+ Flags<[CC1Option]>, Group<f_Group>,
+ HelpText<"Source-level compatibility for Altivec vectors (for PowerPC "
+ "targets). This includes results of vector comparison (scalar for "
+ "'xl', vector for 'gcc') as well as behavior when initializing with "
+ "a scalar (splatting for 'xl', element zero only for 'gcc'). For "
+ "'mixed', the compatibility is as 'gcc' for 'vector bool/vector "
+ "pixel' and as 'xl' for other types. Current default is 'mixed'.">,
+ Values<"mixed,gcc,xl">,
+ NormalizedValuesScope<"LangOptions::AltivecSrcCompatKind">,
+ NormalizedValues<["Mixed", "GCC", "XL"]>,
+ MarshallingInfoEnum<LangOpts<"AltivecSrcCompat">, "Mixed">;
def verify_debug_info : Flag<["--"], "verify-debug-info">, Flags<[NoXarchOption]>,
HelpText<"Verify the binary representation of debug output">;
def weak_l : Joined<["-"], "weak-l">, Flags<[LinkerInput]>;
@@ -3565,7 +3863,8 @@ def weak__framework : Separate<["-"], "weak_framework">, Flags<[LinkerInput]>;
def weak__library : Separate<["-"], "weak_library">, Flags<[LinkerInput]>;
def weak__reference__mismatches : Separate<["-"], "weak_reference_mismatches">;
def whatsloaded : Flag<["-"], "whatsloaded">;
-def whyload : Flag<["-"], "whyload">;
+def why_load : Flag<["-"], "why_load">;
+def whyload : Flag<["-"], "whyload">, Alias<why_load>;
def w : Flag<["-"], "w">, HelpText<"Suppress all warnings">, Flags<[CC1Option]>,
MarshallingInfoFlag<DiagnosticOpts<"IgnoreWarnings">>;
def x : JoinedOrSeparate<["-"], "x">, Flags<[NoXarchOption,CC1Option]>,
@@ -3605,7 +3904,7 @@ def _CLASSPATH : Separate<["--"], "CLASSPATH">, Alias<fclasspath_EQ>;
def _all_warnings : Flag<["--"], "all-warnings">, Alias<Wall>;
def _analyzer_no_default_checks : Flag<["--"], "analyzer-no-default-checks">, Flags<[NoXarchOption]>;
def _analyzer_output : JoinedOrSeparate<["--"], "analyzer-output">, Flags<[NoXarchOption]>,
- HelpText<"Static analyzer report output format (html|plist|plist-multi-file|plist-html|sarif|text).">;
+ HelpText<"Static analyzer report output format (html|plist|plist-multi-file|plist-html|sarif|sarif-html|text).">;
def _analyze : Flag<["--"], "analyze">, Flags<[NoXarchOption, CoreOption]>,
HelpText<"Run the static analyzer">;
def _assemble : Flag<["--"], "assemble">, Alias<S>;
@@ -3727,6 +4026,8 @@ def mv67 : Flag<["-"], "mv67">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv67"]>;
def mv67t : Flag<["-"], "mv67t">, Group<m_hexagon_Features_Group>,
Alias<mcpu_EQ>, AliasArgs<["hexagonv67t"]>;
+def mv68 : Flag<["-"], "mv68">, Group<m_hexagon_Features_Group>,
+ Alias<mcpu_EQ>, AliasArgs<["hexagonv68"]>;
def mhexagon_hvx : Flag<["-"], "mhvx">, Group<m_hexagon_Features_HVX_Group>,
HelpText<"Enable Hexagon Vector eXtensions">;
def mhexagon_hvx_EQ : Joined<["-"], "mhvx=">,
@@ -3757,6 +4058,20 @@ def mnvs : Flag<["-"], "mnvs">, Group<m_hexagon_Features_Group>,
def mno_nvs : Flag<["-"], "mno-nvs">, Group<m_hexagon_Features_Group>,
Flags<[CC1Option]>, HelpText<"Disable generation of new-value stores">;
+// M68k features flags
+def m68000 : Flag<["-"], "m68000">, Group<m_m68k_Features_Group>;
+def m68010 : Flag<["-"], "m68010">, Group<m_m68k_Features_Group>;
+def m68020 : Flag<["-"], "m68020">, Group<m_m68k_Features_Group>;
+def m68030 : Flag<["-"], "m68030">, Group<m_m68k_Features_Group>;
+def m68040 : Flag<["-"], "m68040">, Group<m_m68k_Features_Group>;
+def m68060 : Flag<["-"], "m68060">, Group<m_m68k_Features_Group>;
+
+foreach i = {0-6} in
+ def ffixed_a#i : Flag<["-"], "ffixed-a"#i>, Group<m_m68k_Features_Group>,
+ HelpText<"Reserve the a"#i#" register (M68k only)">;
+foreach i = {0-7} in
+ def ffixed_d#i : Flag<["-"], "ffixed-d"#i>, Group<m_m68k_Features_Group>,
+ HelpText<"Reserve the d"#i#" register (M68k only)">;
// X86 feature flags
def mx87 : Flag<["-"], "mx87">, Group<m_x86_Features_Group>;
@@ -4072,9 +4387,6 @@ defm devirtualize_speculatively : BooleanFFlag<"devirtualize-speculatively">,
// Generic gfortran options.
def A_DASH : Joined<["-"], "A-">, Group<gfortran_Group>;
-def J : JoinedOrSeparate<["-"], "J">, Flags<[RenderJoined]>, Group<gfortran_Group>;
-def cpp : Flag<["-"], "cpp">, Group<gfortran_Group>;
-def nocpp : Flag<["-"], "nocpp">, Group<gfortran_Group>;
def static_libgfortran : Flag<["-"], "static-libgfortran">, Group<gfortran_Group>;
// "f" options with values for gfortran.
@@ -4082,7 +4394,6 @@ def fblas_matmul_limit_EQ : Joined<["-"], "fblas-matmul-limit=">, Group<gfortran
def fcheck_EQ : Joined<["-"], "fcheck=">, Group<gfortran_Group>;
def fcoarray_EQ : Joined<["-"], "fcoarray=">, Group<gfortran_Group>;
def fconvert_EQ : Joined<["-"], "fconvert=">, Group<gfortran_Group>;
-def ffixed_line_length_VALUE : Joined<["-"], "ffixed-line-length-">, Group<gfortran_Group>;
def ffpe_trap_EQ : Joined<["-"], "ffpe-trap=">, Group<gfortran_Group>;
def ffree_line_length_VALUE : Joined<["-"], "ffree-line-length-">, Group<gfortran_Group>;
def finit_character_EQ : Joined<["-"], "finit-character=">, Group<gfortran_Group>;
@@ -4100,33 +4411,25 @@ defm aggressive_function_elimination : BooleanFFlag<"aggressive-function-elimina
defm align_commons : BooleanFFlag<"align-commons">, Group<gfortran_Group>;
defm all_intrinsics : BooleanFFlag<"all-intrinsics">, Group<gfortran_Group>;
defm automatic : BooleanFFlag<"automatic">, Group<gfortran_Group>;
-defm backslash : BooleanFFlag<"backslash">, Group<gfortran_Group>;
defm backtrace : BooleanFFlag<"backtrace">, Group<gfortran_Group>;
defm bounds_check : BooleanFFlag<"bounds-check">, Group<gfortran_Group>;
defm check_array_temporaries : BooleanFFlag<"check-array-temporaries">, Group<gfortran_Group>;
defm cray_pointer : BooleanFFlag<"cray-pointer">, Group<gfortran_Group>;
defm d_lines_as_code : BooleanFFlag<"d-lines-as-code">, Group<gfortran_Group>;
defm d_lines_as_comments : BooleanFFlag<"d-lines-as-comments">, Group<gfortran_Group>;
-defm default_double_8 : BooleanFFlag<"default-double-8">, Group<gfortran_Group>;
-defm default_integer_8 : BooleanFFlag<"default-integer-8">, Group<gfortran_Group>;
-defm default_real_8 : BooleanFFlag<"default-real-8">, Group<gfortran_Group>;
defm dollar_ok : BooleanFFlag<"dollar-ok">, Group<gfortran_Group>;
defm dump_fortran_optimized : BooleanFFlag<"dump-fortran-optimized">, Group<gfortran_Group>;
defm dump_fortran_original : BooleanFFlag<"dump-fortran-original">, Group<gfortran_Group>;
defm dump_parse_tree : BooleanFFlag<"dump-parse-tree">, Group<gfortran_Group>;
defm external_blas : BooleanFFlag<"external-blas">, Group<gfortran_Group>;
defm f2c : BooleanFFlag<"f2c">, Group<gfortran_Group>;
-defm fixed_form : BooleanFFlag<"fixed-form">, Group<gfortran_Group>;
-defm free_form : BooleanFFlag<"free-form">, Group<gfortran_Group>;
defm frontend_optimize : BooleanFFlag<"frontend-optimize">, Group<gfortran_Group>;
-defm implicit_none : BooleanFFlag<"implicit-none">, Group<gfortran_Group>;
defm init_local_zero : BooleanFFlag<"init-local-zero">, Group<gfortran_Group>;
defm integer_4_integer_8 : BooleanFFlag<"integer-4-integer-8">, Group<gfortran_Group>;
-defm intrinsic_modules_path : BooleanFFlag<"intrinsic-modules-path">, Group<gfortran_Group>;
defm max_identifier_length : BooleanFFlag<"max-identifier-length">, Group<gfortran_Group>;
defm module_private : BooleanFFlag<"module-private">, Group<gfortran_Group>;
defm pack_derived : BooleanFFlag<"pack-derived">, Group<gfortran_Group>;
-defm protect_parens : BooleanFFlag<"protect-parens">, Group<gfortran_Group>;
+//defm protect_parens : BooleanFFlag<"protect-parens">, Group<gfortran_Group>;
defm range_check : BooleanFFlag<"range-check">, Group<gfortran_Group>;
defm real_4_real_10 : BooleanFFlag<"real-4-real-10">, Group<gfortran_Group>;
defm real_4_real_16 : BooleanFFlag<"real-4-real-16">, Group<gfortran_Group>;
@@ -4144,21 +4447,135 @@ defm underscoring : BooleanFFlag<"underscoring">, Group<gfortran_Group>;
defm whole_file : BooleanFFlag<"whole-file">, Group<gfortran_Group>;
// C++ SYCL options
-defm sycl : BoolOption<"f", "sycl",
- LangOpts<"SYCL">, DefaultFalse,
- PosFlag<SetTrue, [CC1Option], "Enable">, NegFlag<SetFalse, [], "Disable">,
- BothFlags<[CoreOption], " SYCL kernels compilation for device">>,
- Group<sycl_Group>;
-def sycl_std_EQ : Joined<["-"], "sycl-std=">, Group<sycl_Group>, Flags<[CC1Option, NoArgumentUnused, CoreOption]>,
- HelpText<"SYCL language standard to compile for.">, Values<"2017,121,1.2.1,sycl-1.2.1">,
- NormalizedValues<["SYCL_2017", "SYCL_2017", "SYCL_2017", "SYCL_2017"]>, NormalizedValuesScope<"LangOptions">,
- MarshallingInfoString<LangOpts<"SYCLVersion">, "SYCL_None">, ShouldParseIf<fsycl.KeyPath>, AutoNormalizeEnum;
+def fsycl : Flag<["-"], "fsycl">, Flags<[NoXarchOption, CoreOption]>,
+ Group<sycl_Group>, HelpText<"Enables SYCL kernels compilation for device">;
+def fno_sycl : Flag<["-"], "fno-sycl">, Flags<[NoXarchOption, CoreOption]>,
+ Group<sycl_Group>, HelpText<"Disables SYCL kernels compilation for device">;
+
+//===----------------------------------------------------------------------===//
+// FLangOption + CoreOption + NoXarchOption
+//===----------------------------------------------------------------------===//
+let Flags = [FlangOption, FlangOnlyOption, NoXarchOption, CoreOption] in {
+def Xflang : Separate<["-"], "Xflang">,
+ HelpText<"Pass <arg> to the flang compiler">, MetaVarName<"<arg>">,
+ Flags<[NoXarchOption, CoreOption]>, Group<CompileOnly_Group>;
+}
//===----------------------------------------------------------------------===//
// FlangOption and FC1 Options
//===----------------------------------------------------------------------===//
-def test_io : Flag<["-"], "test-io">, Flags<[HelpHidden, FlangOption, FC1Option, FlangOnlyOption]>, Group<Action_Group>,
+let Flags = [FC1Option, FlangOption, FlangOnlyOption] in {
+
+def cpp : Flag<["-"], "cpp">, Group<f_Group>,
+ HelpText<"Enable predefined and command line preprocessor macros">;
+def nocpp : Flag<["-"], "nocpp">, Group<f_Group>,
+ HelpText<"Disable predefined and command line preprocessor macros">;
+def module_dir : Separate<["-"], "module-dir">, MetaVarName<"<dir>">,
+ HelpText<"Put MODULE files in <dir>">,
+ DocBrief<[{This option specifies where to put .mod files for compiled modules.
+It is also added to the list of directories to be searched by an USE statement.
+The default is the current directory.}]>;
+
+def ffixed_form : Flag<["-"], "ffixed-form">, Group<f_Group>,
+ HelpText<"Process source files in fixed form">;
+def ffree_form : Flag<["-"], "ffree-form">, Group<f_Group>,
+ HelpText<"Process source files in free form">;
+def ffixed_line_length_EQ : Joined<["-"], "ffixed-line-length=">, Group<f_Group>,
+ HelpText<"Use <value> as character line width in fixed mode">,
+ DocBrief<[{Set column after which characters are ignored in typical fixed-form lines in the source
+file}]>;
+def ffixed_line_length_VALUE : Joined<["-"], "ffixed-line-length-">, Group<f_Group>, Alias<ffixed_line_length_EQ>;
+def fopenacc : Flag<["-"], "fopenacc">, Group<f_Group>,
+ HelpText<"Enable OpenACC">;
+def fdefault_double_8 : Flag<["-"],"fdefault-double-8">, Group<f_Group>,
+ HelpText<"Set the default double precision kind to an 8 byte wide type">;
+def fdefault_integer_8 : Flag<["-"],"fdefault-integer-8">, Group<f_Group>,
+ HelpText<"Set the default integer kind to an 8 byte wide type">;
+def fdefault_real_8 : Flag<["-"],"fdefault-real-8">, Group<f_Group>,
+ HelpText<"Set the default real kind to an 8 byte wide type">;
+def flarge_sizes : Flag<["-"],"flarge-sizes">, Group<f_Group>,
+ HelpText<"Use INTEGER(KIND=8) for the result type in size-related intrinsics">;
+def fbackslash : Flag<["-"], "fbackslash">, Group<f_Group>,
+ HelpText<"Specify that backslash in string introduces an escape character">,
+ DocBrief<[{Change the interpretation of backslashes in string literals from
+a single backslash character to "C-style" escape characters.}]>;
+def fno_backslash : Flag<["-"], "fno-backslash">, Group<f_Group>;
+def fxor_operator : Flag<["-"], "fxor-operator">, Group<f_Group>,
+ HelpText<"Enable .XOR. as a synonym of .NEQV.">;
+def fno_xor_operator : Flag<["-"], "fno-xor-operator">, Group<f_Group>;
+def flogical_abbreviations : Flag<["-"], "flogical-abbreviations">, Group<f_Group>,
+ HelpText<"Enable logical abbreviations">;
+def fno_logical_abbreviations : Flag<["-"], "fno-logical-abbreviations">, Group<f_Group>;
+def fimplicit_none : Flag<["-"], "fimplicit-none">, Group<f_Group>,
+ HelpText<"No implicit typing allowed unless overridden by IMPLICIT statements">;
+def fno_implicit_none : Flag<["-"], "fno-implicit-none">, Group<f_Group>;
+def falternative_parameter_statement : Flag<["-"], "falternative-parameter-statement">, Group<f_Group>,
+ HelpText<"Enable the old style PARAMETER statement">;
+def fintrinsic_modules_path : Separate<["-"], "fintrinsic-modules-path">, Group<f_Group>, MetaVarName<"<dir>">,
+ HelpText<"Specify where to find the compiled intrinsic modules">,
+ DocBrief<[{This option specifies the location of pre-compiled intrinsic modules,
+ if they are not in the default location expected by the compiler.}]>;
+}
+
+def J : JoinedOrSeparate<["-"], "J">,
+ Flags<[RenderJoined, FlangOption, FC1Option, FlangOnlyOption]>,
+ Group<gfortran_Group>,
+ Alias<module_dir>;
+
+//===----------------------------------------------------------------------===//
+// FC1 Options
+//===----------------------------------------------------------------------===//
+let Flags = [FC1Option, FlangOnlyOption] in {
+
+def fget_definition : MultiArg<["-"], "fget-definition", 3>,
+ HelpText<"Get the symbol definition from <line> <start-column> <end-column>">,
+ Group<Action_Group>;
+def test_io : Flag<["-"], "test-io">, Group<Action_Group>,
HelpText<"Run the InputOuputTest action. Use for development and testing only.">;
+def fdebug_unparse_no_sema : Flag<["-"], "fdebug-unparse-no-sema">, Group<Action_Group>,
+ HelpText<"Unparse and stop (skips the semantic checks)">,
+ DocBrief<[{Only run the parser, then unparse the parse-tree and output the
+generated Fortran source file. Semantic checks are disabled.}]>;
+def fdebug_unparse : Flag<["-"], "fdebug-unparse">, Group<Action_Group>,
+ HelpText<"Unparse and stop.">,
+ DocBrief<[{Run the parser and the semantic checks. Then unparse the
+parse-tree and output the generated Fortran source file.}]>;
+def fdebug_unparse_with_symbols : Flag<["-"], "fdebug-unparse-with-symbols">, Group<Action_Group>,
+ HelpText<"Unparse and stop.">;
+def fdebug_dump_symbols : Flag<["-"], "fdebug-dump-symbols">, Group<Action_Group>,
+ HelpText<"Dump symbols after the semantic analysis">;
+def fdebug_dump_parse_tree : Flag<["-"], "fdebug-dump-parse-tree">, Group<Action_Group>,
+ HelpText<"Dump the parse tree">,
+ DocBrief<[{Run the Parser and the semantic checks, and then output the
+parse tree.}]>;
+def fdebug_dump_parse_tree_no_sema : Flag<["-"], "fdebug-dump-parse-tree-no-sema">, Group<Action_Group>,
+ HelpText<"Dump the parse tree (skips the semantic checks)">,
+ DocBrief<[{Run the Parser and then output the parse tree. Semantic
+checks are disabled.}]>;
+def fdebug_dump_all : Flag<["-"], "fdebug-dump-all">, Group<Action_Group>,
+ HelpText<"Dump symbols and the parse tree after the semantic checks">;
+def fdebug_dump_provenance : Flag<["-"], "fdebug-dump-provenance">, Group<Action_Group>,
+ HelpText<"Dump provenance">;
+def fdebug_dump_parsing_log : Flag<["-"], "fdebug-dump-parsing-log">, Group<Action_Group>,
+ HelpText<"Run instrumented parse and dump the parsing log">;
+def fdebug_measure_parse_tree : Flag<["-"], "fdebug-measure-parse-tree">, Group<Action_Group>,
+ HelpText<"Measure the parse tree">;
+def fdebug_pre_fir_tree : Flag<["-"], "fdebug-pre-fir-tree">, Group<Action_Group>,
+ HelpText<"Dump the pre-FIR tree">;
+def fdebug_module_writer : Flag<["-"],"fdebug-module-writer">,
+ HelpText<"Enable debug messages while writing module files">;
+def fget_symbols_sources : Flag<["-"], "fget-symbols-sources">, Group<Action_Group>,
+ HelpText<"Dump symbols and their source code locations">;
+
+def module_suffix : Separate<["-"], "module-suffix">, Group<f_Group>, MetaVarName<"<suffix>">,
+ HelpText<"Use <suffix> as the suffix for module files (the default value is `.mod`)">;
+def fanalyzed_objects_for_unparse : Flag<["-"],
+ "fanalyzed-objects-for-unparse">, Group<f_Group>;
+def fno_analyzed_objects_for_unparse : Flag<["-"],
+ "fno-analyzed-objects-for-unparse">, Group<f_Group>,
+ HelpText<"Do not use the analyzed objects when unparsing">;
+
+}
//===----------------------------------------------------------------------===//
// CC1 Options
@@ -4201,9 +4618,11 @@ def mfpmath : Separate<["-"], "mfpmath">,
HelpText<"Which unit to use for fp math">,
MarshallingInfoString<TargetOpts<"FPMath">>;
-def fpadding_on_unsigned_fixed_point : Flag<["-"], "fpadding-on-unsigned-fixed-point">,
- HelpText<"Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">;
-def fno_padding_on_unsigned_fixed_point : Flag<["-"], "fno-padding-on-unsigned-fixed-point">;
+defm padding_on_unsigned_fixed_point : BoolOption<"f", "padding-on-unsigned-fixed-point",
+ LangOpts<"PaddingOnUnsignedFixedPoint">, DefaultFalse,
+ PosFlag<SetTrue, [], "Force each unsigned fixed point type to have an extra bit of padding to align their scales with those of signed fixed point types">,
+ NegFlag<SetFalse>>,
+ ShouldParseIf<ffixed_point.KeyPath>;
//===----------------------------------------------------------------------===//
// Analyzer Options
@@ -4260,7 +4679,7 @@ def analyzer_dump_egraph_EQ : Joined<["-"], "analyzer-dump-egraph=">, Alias<anal
def analyzer_inline_max_stack_depth : Separate<["-"], "analyzer-inline-max-stack-depth">,
HelpText<"Bound on stack depth while inlining (4 by default)">,
// Cap the stack depth at 4 calls (5 stack frames, base + 4 calls).
- MarshallingInfoStringInt<AnalyzerOpts<"InlineMaxStackDepth">, "5">;
+ MarshallingInfoInt<AnalyzerOpts<"InlineMaxStackDepth">, "5">;
def analyzer_inline_max_stack_depth_EQ : Joined<["-"], "analyzer-inline-max-stack-depth=">,
Alias<analyzer_inline_max_stack_depth>;
@@ -4274,7 +4693,7 @@ def analyzer_disable_retry_exhausted : Flag<["-"], "analyzer-disable-retry-exhau
def analyzer_max_loop : Separate<["-"], "analyzer-max-loop">,
HelpText<"The maximum number of times the analyzer will go through a loop">,
- MarshallingInfoStringInt<AnalyzerOpts<"maxBlockVisitOnPath">, "4">;
+ MarshallingInfoInt<AnalyzerOpts<"maxBlockVisitOnPath">, "4">;
def analyzer_stats : Flag<["-"], "analyzer-stats">,
HelpText<"Print internal analyzer statistics.">,
MarshallingInfoFlag<AnalyzerOpts<"PrintStats">>;
@@ -4350,8 +4769,7 @@ def analyzer_checker_option_help_developer : Flag<["-"], "analyzer-checker-optio
def analyzer_config_compatibility_mode : Separate<["-"], "analyzer-config-compatibility-mode">,
HelpText<"Don't emit errors on invalid analyzer-config inputs">,
Values<"true,false">, NormalizedValues<[[{false}], [{true}]]>,
- MarshallingInfoString<AnalyzerOpts<"ShouldEmitErrorsOnInvalidConfigValue">, [{true}]>,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<AnalyzerOpts<"ShouldEmitErrorsOnInvalidConfigValue">, [{true}]>;
def analyzer_config_compatibility_mode_EQ : Joined<["-"], "analyzer-config-compatibility-mode=">,
Alias<analyzer_config_compatibility_mode>;
@@ -4376,12 +4794,7 @@ def migrator_no_finalize_removal : Flag<["-"], "no-finalize-removal">,
//===----------------------------------------------------------------------===//
let Flags = [CC1Option, CC1AsOption, NoDriverOption] in {
-def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">,
- Values<"line-tables-only,line-directives-only,constructor,limited,standalone,unused-types">,
- NormalizedValuesScope<"codegenoptions">,
- NormalizedValues<["DebugLineTablesOnly", "DebugDirectivesOnly", "DebugInfoConstructor",
- "LimitedDebugInfo", "FullDebugInfo", "UnusedTypeInfo"]>,
- MarshallingInfoString<CodeGenOpts<"DebugInfo">, "NoDebugInfo">, AutoNormalizeEnum;
+def debug_info_kind_EQ : Joined<["-"], "debug-info-kind=">;
def debug_info_macro : Flag<["-"], "debug-info-macro">,
HelpText<"Emit macro debug information">,
MarshallingInfoFlag<CodeGenOpts<"MacroDebugInfo">>;
@@ -4389,11 +4802,11 @@ def default_function_attr : Separate<["-"], "default-function-attr">,
HelpText<"Apply given attribute to all functions">,
MarshallingInfoStringVector<CodeGenOpts<"DefaultFunctionAttrs">>;
def dwarf_version_EQ : Joined<["-"], "dwarf-version=">,
- MarshallingInfoStringInt<CodeGenOpts<"DwarfVersion">>;
+ MarshallingInfoInt<CodeGenOpts<"DwarfVersion">>;
def debugger_tuning_EQ : Joined<["-"], "debugger-tuning=">,
- Values<"gdb,lldb,sce">,
- NormalizedValuesScope<"llvm::DebuggerKind">, NormalizedValues<["GDB", "LLDB", "SCE"]>,
- MarshallingInfoString<CodeGenOpts<"DebuggerTuning">, "Default">, AutoNormalizeEnum;
+ Values<"gdb,lldb,sce,dbx">,
+ NormalizedValuesScope<"llvm::DebuggerKind">, NormalizedValues<["GDB", "LLDB", "SCE", "DBX"]>,
+ MarshallingInfoEnum<CodeGenOpts<"DebuggerTuning">, "Default">;
def dwarf_debug_flags : Separate<["-"], "dwarf-debug-flags">,
HelpText<"The string to embed in the Dwarf debug flags record.">,
MarshallingInfoString<CodeGenOpts<"DwarfDebugFlags">>;
@@ -4403,7 +4816,7 @@ def record_command_line : Separate<["-"], "record-command-line">,
def compress_debug_sections_EQ : Joined<["-", "--"], "compress-debug-sections=">,
HelpText<"DWARF debug sections compression type">, Values<"none,zlib,zlib-gnu">,
NormalizedValuesScope<"llvm::DebugCompressionType">, NormalizedValues<["None", "Z", "GNU"]>,
- MarshallingInfoString<CodeGenOpts<"CompressDebugSections">, "None">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"CompressDebugSections">, "None">;
def compress_debug_sections : Flag<["-", "--"], "compress-debug-sections">,
Alias<compress_debug_sections_EQ>, AliasArgs<["zlib"]>;
def mno_exec_stack : Flag<["-"], "mnoexecstack">,
@@ -4427,11 +4840,12 @@ def mrelocation_model : Separate<["-"], "mrelocation-model">,
HelpText<"The relocation model to use">, Values<"static,pic,ropi,rwpi,ropi-rwpi,dynamic-no-pic">,
NormalizedValuesScope<"llvm::Reloc">,
NormalizedValues<["Static", "PIC_", "ROPI", "RWPI", "ROPI_RWPI", "DynamicNoPIC"]>,
- MarshallingInfoString<CodeGenOpts<"RelocationModel">, "PIC_">,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"RelocationModel">, "PIC_">;
def fno_math_builtin : Flag<["-"], "fno-math-builtin">,
HelpText<"Disable implicit builtin knowledge of math functions">,
MarshallingInfoFlag<LangOpts<"NoMathBuiltin">>;
+def fno_use_ctor_homing: Flag<["-"], "fno-use-ctor-homing">,
+ HelpText<"Don't use constructor homing for debug info">;
def fuse_ctor_homing: Flag<["-"], "fuse-ctor-homing">,
HelpText<"Use constructor homing if we are using limited debug info already">;
}
@@ -4513,7 +4927,7 @@ def mdebug_pass : Separate<["-"], "mdebug-pass">,
def mframe_pointer_EQ : Joined<["-"], "mframe-pointer=">,
HelpText<"Specify which frame pointers to retain (all, non-leaf, none).">, Values<"all,non-leaf,none">,
NormalizedValuesScope<"CodeGenOptions::FramePointerKind">, NormalizedValues<["All", "NonLeaf", "None"]>,
- MarshallingInfoString<CodeGenOpts<"FramePointer">, "None">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"FramePointer">, "None">;
def mdisable_tail_calls : Flag<["-"], "mdisable-tail-calls">,
HelpText<"Disable tail call optimization, keeping the call stack accurate">,
MarshallingInfoFlag<CodeGenOpts<"DisableTailCalls">>;
@@ -4537,15 +4951,12 @@ def mtp : Separate<["-"], "mtp">,
def mlimit_float_precision : Separate<["-"], "mlimit-float-precision">,
HelpText<"Limit float precision to the given value">,
MarshallingInfoString<CodeGenOpts<"LimitFloatPrecision">>;
-def split_stacks : Flag<["-"], "split-stacks">,
- HelpText<"Try to use a split stack if possible.">,
- MarshallingInfoFlag<CodeGenOpts<"EnableSegmentedStacks">>;
def mregparm : Separate<["-"], "mregparm">,
HelpText<"Limit the number of registers available for integer arguments">,
- MarshallingInfoStringInt<CodeGenOpts<"NumRegisterParameters">>;
+ MarshallingInfoInt<CodeGenOpts<"NumRegisterParameters">>;
def msmall_data_limit : Separate<["-"], "msmall-data-limit">,
HelpText<"Put global and static data smaller than the limit into a special section">,
- MarshallingInfoStringInt<CodeGenOpts<"SmallDataLimit">>;
+ MarshallingInfoInt<CodeGenOpts<"SmallDataLimit">>;
def munwind_tables : Flag<["-"], "munwind-tables">,
HelpText<"Generate unwinding tables for all functions">,
MarshallingInfoFlag<CodeGenOpts<"UnwindTables">>;
@@ -4573,7 +4984,7 @@ def linker_option : Joined<["--"], "linker-option=">,
MarshallingInfoStringVector<CodeGenOpts<"LinkerOptions">>;
def fsanitize_coverage_type : Joined<["-"], "fsanitize-coverage-type=">,
HelpText<"Sanitizer coverage type">,
- MarshallingInfoStringInt<CodeGenOpts<"SanitizeCoverageType">>;
+ MarshallingInfoInt<CodeGenOpts<"SanitizeCoverageType">>;
def fsanitize_coverage_indirect_calls
: Flag<["-"], "fsanitize-coverage-indirect-calls">,
HelpText<"Enable sanitizer coverage for indirect calls">,
@@ -4629,13 +5040,13 @@ def fsanitize_coverage_stack_depth
def fpatchable_function_entry_offset_EQ
: Joined<["-"], "fpatchable-function-entry-offset=">, MetaVarName<"<M>">,
HelpText<"Generate M NOPs before function entry">,
- MarshallingInfoStringInt<CodeGenOpts<"PatchableFunctionEntryOffset">>;
+ MarshallingInfoInt<CodeGenOpts<"PatchableFunctionEntryOffset">>;
def fprofile_instrument_EQ : Joined<["-"], "fprofile-instrument=">,
HelpText<"Enable PGO instrumentation. The accepted value is clang, llvm, "
"or none">, Values<"none,clang,llvm,csllvm">,
NormalizedValuesScope<"CodeGenOptions">,
NormalizedValues<["ProfileNone", "ProfileClangInstr", "ProfileIRInstr", "ProfileCSIRInstr"]>,
- MarshallingInfoString<CodeGenOpts<"ProfileInstr">, "ProfileNone">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"ProfileInstr">, "ProfileNone">;
def fprofile_instrument_path_EQ : Joined<["-"], "fprofile-instrument-path=">,
HelpText<"Generate instrumented code to collect execution counts into "
"<file> (overridden by LLVM_PROFILE_FILE env var)">,
@@ -4660,6 +5071,21 @@ def fexperimental_debug_variable_locations : Flag<["-"],
"fexperimental-debug-variable-locations">,
HelpText<"Use experimental new value-tracking variable locations">,
MarshallingInfoFlag<CodeGenOpts<"ValueTrackingVariableLocations">>;
+def fverify_debuginfo_preserve
+ : Flag<["-"], "fverify-debuginfo-preserve">,
+ HelpText<"Enable Debug Info Metadata preservation testing in "
+ "optimizations.">,
+ MarshallingInfoFlag<CodeGenOpts<"EnableDIPreservationVerify">>;
+def fverify_debuginfo_preserve_export
+ : Joined<["-"], "fverify-debuginfo-preserve-export=">,
+ MetaVarName<"<file>">,
+ HelpText<"Export debug info (by testing original Debug Info) failures "
+ "into specified (JSON) file (should be abs path as we use "
+ "append mode to insert new JSON objects).">,
+ MarshallingInfoString<CodeGenOpts<"DIBugsReportFilePath">>;
+def fwarn_stack_size_EQ
+ : Joined<["-"], "fwarn-stack-size=">,
+ MarshallingInfoInt<CodeGenOpts<"WarnStackSize">, "UINT_MAX">;
// The driver option takes the key as a parameter to the -msign-return-address=
// and -mbranch-protection= options, but CC1 has a separate option so we
// don't have to parse the parameter twice.
@@ -4675,6 +5101,9 @@ def cfguard_no_checks : Flag<["-"], "cfguard-no-checks">,
def cfguard : Flag<["-"], "cfguard">,
HelpText<"Emit Windows Control Flow Guard tables and checks">,
MarshallingInfoFlag<CodeGenOpts<"ControlFlowGuard">>;
+def ehcontguard : Flag<["-"], "ehcontguard">,
+ HelpText<"Emit Windows EH Continuation Guard tables">,
+ MarshallingInfoFlag<CodeGenOpts<"EHContGuard">>;
def fdenormal_fp_math_f32_EQ : Joined<["-"], "fdenormal-fp-math-f32=">,
Group<f_Group>;
@@ -4707,43 +5136,42 @@ def diagnostic_serialized_file : Separate<["-"], "serialize-diagnostic-file">,
HelpText<"File for serializing diagnostics in a binary format">;
def fdiagnostics_format : Separate<["-"], "fdiagnostics-format">,
- HelpText<"Change diagnostic formatting to match IDE and command line tools">, Values<"clang,msvc,msvc-fallback,vi">,
- NormalizedValuesScope<"DiagnosticOptions">, NormalizedValues<["Clang", "MSVC", "MSVC", "Vi"]>,
- MarshallingInfoString<DiagnosticOpts<"Format">, "Clang">, AutoNormalizeEnum;
+ HelpText<"Change diagnostic formatting to match IDE and command line tools">, Values<"clang,msvc,vi">,
+ NormalizedValuesScope<"DiagnosticOptions">, NormalizedValues<["Clang", "MSVC", "Vi"]>,
+ MarshallingInfoEnum<DiagnosticOpts<"Format">, "Clang">;
def fdiagnostics_show_category : Separate<["-"], "fdiagnostics-show-category">,
HelpText<"Print diagnostic category">, Values<"none,id,name">,
NormalizedValues<["0", "1", "2"]>,
- MarshallingInfoString<DiagnosticOpts<"ShowCategories">, "0">, AutoNormalizeEnum;
+ MarshallingInfoEnum<DiagnosticOpts<"ShowCategories">, "0">;
def fno_diagnostics_use_presumed_location : Flag<["-"], "fno-diagnostics-use-presumed-location">,
HelpText<"Ignore #line directives when displaying diagnostic locations">,
MarshallingInfoNegativeFlag<DiagnosticOpts<"ShowPresumedLoc">>;
def ftabstop : Separate<["-"], "ftabstop">, MetaVarName<"<N>">,
HelpText<"Set the tab stop distance.">,
- MarshallingInfoStringInt<DiagnosticOpts<"TabStop">, "DiagnosticOptions::DefaultTabStop">;
+ MarshallingInfoInt<DiagnosticOpts<"TabStop">, "DiagnosticOptions::DefaultTabStop">;
def ferror_limit : Separate<["-"], "ferror-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">,
- MarshallingInfoStringInt<DiagnosticOpts<"ErrorLimit">>;
+ MarshallingInfoInt<DiagnosticOpts<"ErrorLimit">>;
def fmacro_backtrace_limit : Separate<["-"], "fmacro-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">,
- MarshallingInfoStringInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
+ MarshallingInfoInt<DiagnosticOpts<"MacroBacktraceLimit">, "DiagnosticOptions::DefaultMacroBacktraceLimit">;
def ftemplate_backtrace_limit : Separate<["-"], "ftemplate-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">,
- MarshallingInfoStringInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
+ MarshallingInfoInt<DiagnosticOpts<"TemplateBacktraceLimit">, "DiagnosticOptions::DefaultTemplateBacktraceLimit">;
def fconstexpr_backtrace_limit : Separate<["-"], "fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">,
- MarshallingInfoStringInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
+ MarshallingInfoInt<DiagnosticOpts<"ConstexprBacktraceLimit">, "DiagnosticOptions::DefaultConstexprBacktraceLimit">;
def fspell_checking_limit : Separate<["-"], "fspell-checking-limit">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of times to perform spell checking on unrecognized identifiers (0 = no limit).">,
- MarshallingInfoStringInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
+ MarshallingInfoInt<DiagnosticOpts<"SpellCheckingLimit">, "DiagnosticOptions::DefaultSpellCheckingLimit">;
def fcaret_diagnostics_max_lines :
Separate<["-"], "fcaret-diagnostics-max-lines">, MetaVarName<"<N>">,
HelpText<"Set the maximum number of source lines to show in a caret diagnostic">,
- MarshallingInfoStringInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
+ MarshallingInfoInt<DiagnosticOpts<"SnippetLineLimit">, "DiagnosticOptions::DefaultSnippetLineLimit">;
def verify_EQ : CommaJoined<["-"], "verify=">,
MetaVarName<"<prefixes>">,
HelpText<"Verify diagnostic output using comment directives that start with"
- " prefixes in the comma-separated sequence <prefixes>">,
- MarshallingInfoStringVector<DiagnosticOpts<"VerifyPrefixes">>;
+ " prefixes in the comma-separated sequence <prefixes>">;
def verify : Flag<["-"], "verify">,
HelpText<"Equivalent to -verify=expected">;
def verify_ignore_unexpected : Flag<["-"], "verify-ignore-unexpected">,
@@ -4803,6 +5231,9 @@ def code_completion_with_fixits : Flag<["-"], "code-completion-with-fixits">,
def disable_free : Flag<["-"], "disable-free">,
HelpText<"Disable freeing of memory on exit">,
MarshallingInfoFlag<FrontendOpts<"DisableFree">>;
+def enable_noundef_analysis : Flag<["-"], "enable-noundef-analysis">, Group<f_Group>,
+ HelpText<"Enable analyzing function argument and return types for mandatory definedness">,
+ MarshallingInfoFlag<CodeGenOpts<"EnableNoundefAttrs">>;
def discard_value_names : Flag<["-"], "discard-value-names">,
HelpText<"Discard value names in LLVM IR">,
MarshallingInfoFlag<CodeGenOpts<"DiscardValueNames">>;
@@ -4845,10 +5276,14 @@ def fmodules_embed_all_files : Joined<["-"], "fmodules-embed-all-files">,
HelpText<"Embed the contents of all files read by this compilation into "
"the produced module file.">,
MarshallingInfoFlag<FrontendOpts<"ModulesEmbedAllFiles">>;
+// FIXME: We only need this in C++ modules / Modules TS if we might textually
+// enter a different module (eg, when building a header unit).
def fmodules_local_submodule_visibility :
Flag<["-"], "fmodules-local-submodule-visibility">,
HelpText<"Enforce name visibility rules across submodules of the same "
- "top-level module.">;
+ "top-level module.">,
+ MarshallingInfoFlag<LangOpts<"ModulesLocalVisibility">>,
+ ImpliedByAnyOf<[fmodules_ts.KeyPath, cpp_modules.KeyPath]>;
def fmodules_codegen :
Flag<["-"], "fmodules-codegen">,
HelpText<"Generate code for uses of this module that assumes an explicit "
@@ -4893,8 +5328,6 @@ def analyze : Flag<["-"], "analyze">,
HelpText<"Run static analysis engine">;
def dump_tokens : Flag<["-"], "dump-tokens">,
HelpText<"Run preprocessor, dump internal rep of tokens">;
-def init_only : Flag<["-"], "init-only">,
- HelpText<"Only execute frontend initialization">;
def fixit : Flag<["-"], "fixit">,
HelpText<"Apply fix-it advice to the input source">;
def fixit_EQ : Joined<["-"], "fixit=">,
@@ -4969,8 +5402,7 @@ def arcmt_action_EQ : Joined<["-"], "arcmt-action=">, Flags<[CC1Option, NoDriver
HelpText<"The ARC migration action to take">, Values<"check,modify,migrate">,
NormalizedValuesScope<"FrontendOptions">,
NormalizedValues<["ARCMT_Check", "ARCMT_Modify", "ARCMT_Migrate"]>,
- MarshallingInfoString<FrontendOpts<"ARCMTAction">, "ARCMT_None">,
- AutoNormalizeEnum;
+ MarshallingInfoEnum<FrontendOpts<"ARCMTAction">, "ARCMT_None">;
def opt_record_file : Separate<["-"], "opt-record-file">,
HelpText<"File name to use for YAML optimization record output">,
@@ -4986,11 +5418,19 @@ def print_stats : Flag<["-"], "print-stats">,
def stats_file : Joined<["-"], "stats-file=">,
HelpText<"Filename to write statistics to">,
MarshallingInfoString<FrontendOpts<"StatsFile">>;
-def fdump_record_layouts : Flag<["-"], "fdump-record-layouts">,
- HelpText<"Dump record layout information">;
def fdump_record_layouts_simple : Flag<["-"], "fdump-record-layouts-simple">,
HelpText<"Dump record layout information in a simple form used for testing">,
MarshallingInfoFlag<LangOpts<"DumpRecordLayoutsSimple">>;
+def fdump_record_layouts_canonical : Flag<["-"], "fdump-record-layouts-canonical">,
+ HelpText<"Dump record layout information with canonical field types">,
+ MarshallingInfoFlag<LangOpts<"DumpRecordLayoutsCanonical">>;
+def fdump_record_layouts_complete : Flag<["-"], "fdump-record-layouts-complete">,
+ HelpText<"Dump record layout information for all complete types">,
+ MarshallingInfoFlag<LangOpts<"DumpRecordLayoutsComplete">>;
+def fdump_record_layouts : Flag<["-"], "fdump-record-layouts">,
+ HelpText<"Dump record layout information">,
+ MarshallingInfoFlag<LangOpts<"DumpRecordLayouts">>,
+ ImpliedByAnyOf<[fdump_record_layouts_simple.KeyPath, fdump_record_layouts_complete.KeyPath, fdump_record_layouts_canonical.KeyPath]>;
def fix_what_you_can : Flag<["-"], "fix-what-you-can">,
HelpText<"Apply fix-it advice even in the presence of unfixable errors">,
MarshallingInfoFlag<FrontendOpts<"FixWhatYouCan">>;
@@ -5024,7 +5464,9 @@ def building_pch_with_obj : Flag<["-"], "building-pch-with-obj">,
MarshallingInfoFlag<LangOpts<"BuildingPCHWithObjectFile">>;
def aligned_alloc_unavailable : Flag<["-"], "faligned-alloc-unavailable">,
- HelpText<"Aligned allocation/deallocation functions are unavailable">;
+ HelpText<"Aligned allocation/deallocation functions are unavailable">,
+ MarshallingInfoFlag<LangOpts<"AlignedAllocationUnavailable">>,
+ ShouldParseIf<faligned_allocation.KeyPath>;
//===----------------------------------------------------------------------===//
// Language Options
@@ -5054,7 +5496,9 @@ def split_dwarf_file : Separate<["-"], "split-dwarf-file">,
HelpText<"Name of the split dwarf debug info file to encode in the object file">,
MarshallingInfoString<CodeGenOpts<"SplitDwarfFile">>;
def fno_wchar : Flag<["-"], "fno-wchar">,
- HelpText<"Disable C++ builtin type wchar_t">;
+ HelpText<"Disable C++ builtin type wchar_t">,
+ MarshallingInfoNegativeFlag<LangOpts<"WChar">, cplusplus.KeyPath>,
+ ShouldParseIf<cplusplus.KeyPath>;
def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
MetaVarName<"<class name>">,
HelpText<"Specify the class to use for constant Objective-C string objects.">,
@@ -5062,13 +5506,13 @@ def fconstant_string_class : Separate<["-"], "fconstant-string-class">,
def fobjc_arc_cxxlib_EQ : Joined<["-"], "fobjc-arc-cxxlib=">,
HelpText<"Objective-C++ Automatic Reference Counting standard library kind">, Values<"libc++,libstdc++,none">,
NormalizedValues<["ARCXX_libcxx", "ARCXX_libstdcxx", "ARCXX_nolib"]>,
- MarshallingInfoString<PreprocessorOpts<"ObjCXXARCStandardLibrary">, "ARCXX_nolib">, AutoNormalizeEnum;
+ MarshallingInfoEnum<PreprocessorOpts<"ObjCXXARCStandardLibrary">, "ARCXX_nolib">;
def fobjc_runtime_has_weak : Flag<["-"], "fobjc-runtime-has-weak">,
HelpText<"The target Objective-C runtime supports ARC weak operations">;
def fobjc_dispatch_method_EQ : Joined<["-"], "fobjc-dispatch-method=">,
HelpText<"Objective-C dispatch method to use">, Values<"legacy,non-legacy,mixed">,
NormalizedValuesScope<"CodeGenOptions">, NormalizedValues<["Legacy", "NonLegacy", "Mixed"]>,
- MarshallingInfoString<CodeGenOpts<"ObjCDispatchMethod">, "Legacy">, AutoNormalizeEnum;
+ MarshallingInfoEnum<CodeGenOpts<"ObjCDispatchMethod">, "Legacy">;
def disable_objc_default_synthesize_properties : Flag<["-"], "disable-objc-default-synthesize-properties">,
HelpText<"disable the default synthesis of Objective-C properties">,
MarshallingInfoNegativeFlag<LangOpts<"ObjCDefaultSynthProperties">>;
@@ -5077,9 +5521,10 @@ def fencode_extended_block_signature : Flag<["-"], "fencode-extended-block-signa
MarshallingInfoFlag<LangOpts<"EncodeExtendedBlockSig">>;
def function_alignment : Separate<["-"], "function-alignment">,
HelpText<"default alignment for functions">,
- MarshallingInfoStringInt<LangOpts<"FunctionAlignment">>;
+ MarshallingInfoInt<LangOpts<"FunctionAlignment">>;
def pic_level : Separate<["-"], "pic-level">,
- HelpText<"Value for __PIC__">;
+ HelpText<"Value for __PIC__">,
+ MarshallingInfoInt<LangOpts<"PICLevel">>;
def pic_is_pie : Flag<["-"], "pic-is-pie">,
HelpText<"File is for a position independent executable">,
MarshallingInfoFlag<LangOpts<"PIE">>;
@@ -5097,6 +5542,10 @@ def fallow_pch_with_errors : Flag<["-"], "fallow-pch-with-compiler-errors">,
HelpText<"Accept a PCH file that was created with compiler errors">,
MarshallingInfoFlag<PreprocessorOpts<"AllowPCHWithCompilerErrors">>,
ImpliedByAnyOf<[fallow_pcm_with_errors.KeyPath]>;
+def fallow_pch_with_different_modules_cache_path :
+ Flag<["-"], "fallow-pch-with-different-modules-cache-path">,
+ HelpText<"Accept a PCH file that was created with a different modules cache path">,
+ MarshallingInfoFlag<PreprocessorOpts<"AllowPCHWithDifferentModulesCachePath">>;
def dump_deserialized_pch_decls : Flag<["-"], "dump-deserialized-decls">,
HelpText<"Dump declarations that are deserialized from PCH, for testing">,
MarshallingInfoFlag<PreprocessorOpts<"DumpDeserializedPCHDecls">>;
@@ -5111,13 +5560,15 @@ def stack_protector : Separate<["-"], "stack-protector">,
HelpText<"Enable stack protectors">, Values<"0,1,2,3">,
NormalizedValuesScope<"LangOptions">,
NormalizedValues<["SSPOff", "SSPOn", "SSPStrong", "SSPReq"]>,
- MarshallingInfoString<LangOpts<"StackProtector">, "SSPOff">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"StackProtector">, "SSPOff">;
def stack_protector_buffer_size : Separate<["-"], "stack-protector-buffer-size">,
HelpText<"Lower bound for a buffer to be considered for stack protection">,
- MarshallingInfoStringInt<CodeGenOpts<"SSPBufferSize">, "8">;
+ MarshallingInfoInt<CodeGenOpts<"SSPBufferSize">, "8">;
def fvisibility : Separate<["-"], "fvisibility">,
HelpText<"Default type and symbol visibility">,
- MarshallingInfoVisibility<LangOpts<"ValueVisibilityMode">, "DefaultVisibility">;
+ MarshallingInfoVisibility<LangOpts<"ValueVisibilityMode">, "DefaultVisibility">,
+ // Always emitting because of the relation to `-mignore-xcoff-visibility`.
+ AlwaysEmit;
def ftype_visibility : Separate<["-"], "ftype-visibility">,
HelpText<"Default type visibility">,
MarshallingInfoVisibility<LangOpts<"TypeVisibilityMode">, fvisibility.KeyPath>;
@@ -5126,19 +5577,19 @@ def fapply_global_visibility_to_externs : Flag<["-"], "fapply-global-visibility-
MarshallingInfoFlag<LangOpts<"SetVisibilityForExternDecls">>;
def ftemplate_depth : Separate<["-"], "ftemplate-depth">,
HelpText<"Maximum depth of recursive template instantiation">,
- MarshallingInfoStringInt<LangOpts<"InstantiationDepth">, "1024">;
+ MarshallingInfoInt<LangOpts<"InstantiationDepth">, "1024">;
def foperator_arrow_depth : Separate<["-"], "foperator-arrow-depth">,
HelpText<"Maximum number of 'operator->'s to call for a member access">,
- MarshallingInfoStringInt<LangOpts<"ArrowDepth">, "256">;
+ MarshallingInfoInt<LangOpts<"ArrowDepth">, "256">;
def fconstexpr_depth : Separate<["-"], "fconstexpr-depth">,
HelpText<"Maximum depth of recursive constexpr function calls">,
- MarshallingInfoStringInt<LangOpts<"ConstexprCallDepth">, "512">;
+ MarshallingInfoInt<LangOpts<"ConstexprCallDepth">, "512">;
def fconstexpr_steps : Separate<["-"], "fconstexpr-steps">,
HelpText<"Maximum number of steps in constexpr function evaluation">,
- MarshallingInfoStringInt<LangOpts<"ConstexprStepLimit">, "1048576">;
+ MarshallingInfoInt<LangOpts<"ConstexprStepLimit">, "1048576">;
def fbracket_depth : Separate<["-"], "fbracket-depth">,
HelpText<"Maximum nesting level for parentheses, brackets, and braces">,
- MarshallingInfoStringInt<LangOpts<"BracketDepth">, "256">;
+ MarshallingInfoInt<LangOpts<"BracketDepth">, "256">;
defm const_strings : BoolOption<"f", "const-strings",
LangOpts<"ConstStrings">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
@@ -5153,7 +5604,7 @@ def faddress_space_map_mangling_EQ : Joined<["-"], "faddress-space-map-mangling=
HelpText<"Set the mode for address space map based mangling; OpenCL testing purposes only">,
Values<"target,no,yes">, NormalizedValuesScope<"LangOptions">,
NormalizedValues<["ASMM_Target", "ASMM_Off", "ASMM_On"]>,
- MarshallingInfoString<LangOpts<"AddressSpaceMapMangling">, "ASMM_Target">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"AddressSpaceMapMangling">, "ASMM_Target">;
def funknown_anytype : Flag<["-"], "funknown-anytype">,
HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">,
MarshallingInfoFlag<LangOpts<"ParseUnknownAnytype">>;
@@ -5166,16 +5617,16 @@ def fdebugger_cast_result_to_id : Flag<["-"], "fdebugger-cast-result-to-id">,
def fdebugger_objc_literal : Flag<["-"], "fdebugger-objc-literal">,
HelpText<"Enable special debugger support for Objective-C subscripting and literals">,
MarshallingInfoFlag<LangOpts<"DebuggerObjCLiteral">>;
-def fdeprecated_macro : Flag<["-"], "fdeprecated-macro">,
- HelpText<"Defines the __DEPRECATED macro">;
-def fno_deprecated_macro : Flag<["-"], "fno-deprecated-macro">,
- HelpText<"Undefines the __DEPRECATED macro">;
+defm deprecated_macro : BoolOption<"f", "deprecated-macro",
+ LangOpts<"Deprecated">, DefaultFalse,
+ PosFlag<SetTrue, [], "Defines">, NegFlag<SetFalse, [], "Undefines">,
+ BothFlags<[], " the __DEPRECATED macro">>;
def fobjc_subscripting_legacy_runtime : Flag<["-"], "fobjc-subscripting-legacy-runtime">,
HelpText<"Allow Objective-C array and dictionary subscripting in legacy runtime">;
// TODO: Enforce values valid for MSVtorDispMode.
def vtordisp_mode_EQ : Joined<["-"], "vtordisp-mode=">,
HelpText<"Control vtordisp placement on win32 targets">,
- MarshallingInfoStringInt<LangOpts<"VtorDispMode">, "1">;
+ MarshallingInfoInt<LangOpts<"VtorDispMode">, "1">;
def fnative_half_type: Flag<["-"], "fnative-half-type">,
HelpText<"Use the native half type for __fp16 instead of promoting to float">,
MarshallingInfoFlag<LangOpts<"NativeHalfType">>,
@@ -5192,7 +5643,7 @@ def fdefault_calling_conv_EQ : Joined<["-"], "fdefault-calling-conv=">,
HelpText<"Set default calling convention">, Values<"cdecl,fastcall,stdcall,vectorcall,regcall">,
NormalizedValuesScope<"LangOptions">,
NormalizedValues<["DCC_CDecl", "DCC_FastCall", "DCC_StdCall", "DCC_VectorCall", "DCC_RegCall"]>,
- MarshallingInfoString<LangOpts<"DefaultCallingConv">, "DCC_None">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"DefaultCallingConv">, "DCC_None">;
// These options cannot be marshalled, because they are used to set up the LangOptions defaults.
def finclude_default_header : Flag<["-"], "finclude-default-header">,
@@ -5206,7 +5657,7 @@ def fpreserve_vec3_type : Flag<["-"], "fpreserve-vec3-type">,
def fwchar_type_EQ : Joined<["-"], "fwchar-type=">,
HelpText<"Select underlying type for wchar_t">, Values<"char,short,int">,
NormalizedValues<["1", "2", "4"]>,
- MarshallingInfoString<LangOpts<"WCharSize">, "0">, AutoNormalizeEnum;
+ MarshallingInfoEnum<LangOpts<"WCharSize">, "0">;
defm signed_wchar : BoolOption<"f", "signed-wchar",
LangOpts<"WCharIsSigned">, DefaultTrue,
NegFlag<SetFalse, [CC1Option], "Use an unsigned">, PosFlag<SetTrue, [], "Use a signed">,
@@ -5322,11 +5773,22 @@ def fopenmp_host_ir_file_path : Separate<["-"], "fopenmp-host-ir-file-path">,
def fsycl_is_device : Flag<["-"], "fsycl-is-device">,
HelpText<"Generate code for SYCL device.">,
- MarshallingInfoFlag<LangOpts<"SYCLIsDevice">>,
- ShouldParseIf<fsycl.KeyPath>;
+ MarshallingInfoFlag<LangOpts<"SYCLIsDevice">>;
+def fsycl_is_host : Flag<["-"], "fsycl-is-host">,
+ HelpText<"SYCL host compilation">,
+ MarshallingInfoFlag<LangOpts<"SYCLIsHost">>;
} // let Flags = [CC1Option, NoDriverOption]
+def sycl_std_EQ : Joined<["-"], "sycl-std=">, Group<sycl_Group>,
+ Flags<[CC1Option, NoArgumentUnused, CoreOption]>,
+ HelpText<"SYCL language standard to compile for.">,
+ Values<"2020,2017,121,1.2.1,sycl-1.2.1">,
+ NormalizedValues<["SYCL_2020", "SYCL_2017", "SYCL_2017", "SYCL_2017", "SYCL_2017"]>,
+ NormalizedValuesScope<"LangOptions">,
+ MarshallingInfoEnum<LangOpts<"SYCLVersion">, "SYCL_None">,
+ ShouldParseIf<!strconcat(fsycl_is_device.KeyPath, "||", fsycl_is_host.KeyPath)>;
+
defm cuda_approx_transcendentals : BoolFOption<"cuda-approx-transcendentals",
LangOpts<"CUDADeviceApproxTranscendentals">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Use">, NegFlag<SetFalse, [], "Don't use">,
@@ -5341,6 +5803,8 @@ let Group = Action_Group in {
def emit_obj : Flag<["-"], "emit-obj">,
HelpText<"Emit native object files">;
+def init_only : Flag<["-"], "init-only">,
+ HelpText<"Only execute frontend initialization">;
} // let Group = Action_Group
} // let Flags = [CC1Option, FC1Option, NoDriverOption]
@@ -5444,6 +5908,9 @@ def _SLASH_diagnostics_classic : CLFlag<"diagnostics:classic">,
def _SLASH_D : CLJoinedOrSeparate<"D">, HelpText<"Define macro">,
MetaVarName<"<macro[=value]>">, Alias<D>;
def _SLASH_E : CLFlag<"E">, HelpText<"Preprocess to stdout">, Alias<E>;
+def _SLASH_external_COLON_I : CLJoinedOrSeparate<"external:I">, Alias<isystem>,
+ HelpText<"Add directory to include search path with warnings suppressed">,
+ MetaVarName<"<dir>">;
def _SLASH_fp_except : CLFlag<"fp:except">, HelpText<"">, Alias<ftrapping_math>;
def _SLASH_fp_except_ : CLFlag<"fp:except-">,
HelpText<"">, Alias<fno_trapping_math>;
@@ -5451,6 +5918,9 @@ def _SLASH_fp_fast : CLFlag<"fp:fast">, HelpText<"">, Alias<ffast_math>;
def _SLASH_fp_precise : CLFlag<"fp:precise">,
HelpText<"">, Alias<fno_fast_math>;
def _SLASH_fp_strict : CLFlag<"fp:strict">, HelpText<"">, Alias<fno_fast_math>;
+def _SLASH_fsanitize_EQ_address : CLFlag<"fsanitize=address">,
+ HelpText<"Enable AddressSanitizer">,
+ Alias<fsanitize_EQ>, AliasArgs<["address"]>;
def _SLASH_GA : CLFlag<"GA">, Alias<ftlsmodel_EQ>, AliasArgs<["local-exec"]>,
HelpText<"Assume thread-local variables are defined in the executable">;
def _SLASH_GR : CLFlag<"GR">, HelpText<"Emit RTTI data (default)">;
@@ -5540,7 +6010,7 @@ def _SLASH_execution_charset : CLCompileJoined<"execution-charset:">,
HelpText<"Set runtime encoding, supports only UTF-8">,
Alias<fexec_charset_EQ>;
def _SLASH_std : CLCompileJoined<"std:">,
- HelpText<"Set C++ version (c++14,c++17,c++latest)">;
+ HelpText<"Set language version (c++14,c++17,c++20,c++latest,c11,c17)">;
def _SLASH_U : CLJoinedOrSeparate<"U">, HelpText<"Undefine macro">,
MetaVarName<"<macro>">, Alias<U>;
def _SLASH_validate_charset : CLFlag<"validate-charset">,
@@ -5633,6 +6103,9 @@ def _SLASH_openmp_experimental : CLFlag<"openmp:experimental">,
def _SLASH_tune : CLCompileJoined<"tune:">,
HelpText<"Set CPU for optimization without affecting instruction set">,
Alias<mtune_EQ>;
+def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">,
+ HelpText<"Align branches within 32-byte boundaries to mitigate the performance impact of the Intel JCC erratum.">,
+ Alias<mbranches_within_32B_boundaries>;
// Non-aliases:
@@ -5646,13 +6119,14 @@ def _SLASH_volatile_Group : OptionGroup<"</volatile group>">,
def _SLASH_EH : CLJoined<"EH">, HelpText<"Set exception handling model">;
def _SLASH_EP : CLFlag<"EP">,
HelpText<"Disable linemarker output and preprocess to stdout">;
+def _SLASH_external_env : CLJoined<"external:env:">,
+ HelpText<"Add dirs in env var <var> to include search path with warnings suppressed">,
+ MetaVarName<"<var>">;
def _SLASH_FA : CLFlag<"FA">,
HelpText<"Output assembly code file during compilation">;
def _SLASH_Fa : CLJoined<"Fa">,
HelpText<"Set assembly output file name (with /FA)">,
MetaVarName<"<file or dir/>">;
-def _SLASH_fallback : CLCompileFlag<"fallback">,
- HelpText<"Fall back to cl.exe if clang-cl fails to compile">;
def _SLASH_FI : CLJoinedOrSeparate<"FI">,
HelpText<"Include file before parsing">, Alias<include_>;
def _SLASH_Fe : CLJoined<"Fe">,
@@ -5666,7 +6140,8 @@ def _SLASH_Fo : CLCompileJoined<"Fo">,
HelpText<"Set output object file (with /c)">,
MetaVarName<"<file or dir/>">;
def _SLASH_guard : CLJoined<"guard:">,
- HelpText<"Enable Control Flow Guard with /guard:cf, or only the table with /guard:cf,nochecks">;
+ HelpText<"Enable Control Flow Guard with /guard:cf, or only the table with /guard:cf,nochecks. "
+ "Enable EH Continuation Guard with /guard:ehcont">;
def _SLASH_GX : CLFlag<"GX">,
HelpText<"Deprecated; use /EHsc">;
def _SLASH_GX_ : CLFlag<"GX-">,
@@ -5690,6 +6165,10 @@ def _SLASH_o : CLJoinedOrSeparate<"o">,
HelpText<"Deprecated (set output file name); use /Fe or /Fe">,
MetaVarName<"<file or dir/>">;
def _SLASH_P : CLFlag<"P">, HelpText<"Preprocess to file">;
+def _SLASH_permissive : CLFlag<"permissive">,
+ HelpText<"Enable some non conforming code to compile">;
+def _SLASH_permissive_ : CLFlag<"permissive-">,
+ HelpText<"Disable non conforming code from compiling (default)">;
def _SLASH_Tc : CLCompileJoinedOrSeparate<"Tc">,
HelpText<"Treat <file> as C source file">, MetaVarName<"<file>">;
def _SLASH_TC : CLCompileFlag<"TC">, HelpText<"Treat all source files as C">;
@@ -5698,6 +6177,15 @@ def _SLASH_Tp : CLCompileJoinedOrSeparate<"Tp">,
def _SLASH_TP : CLCompileFlag<"TP">, HelpText<"Treat all source files as C++">;
def _SLASH_vctoolsdir : CLJoinedOrSeparate<"vctoolsdir">,
HelpText<"Path to the VCToolChain">, MetaVarName<"<dir>">;
+def _SLASH_vctoolsversion : CLJoinedOrSeparate<"vctoolsversion">,
+ HelpText<"For use with /winsysroot, defaults to newest found">;
+def _SLASH_winsdkdir : CLJoinedOrSeparate<"winsdkdir">,
+ HelpText<"Path to the Windows SDK">, MetaVarName<"<dir>">;
+def _SLASH_winsdkversion : CLJoinedOrSeparate<"winsdkversion">,
+ HelpText<"Full version of the Windows SDK, defaults to newest found">;
+def _SLASH_winsysroot : CLJoinedOrSeparate<"winsysroot">,
+ HelpText<"Same as /vctoolsdir <dir>/VC/Tools/MSVC/<vctoolsversion> /winsdkdir <dir>/Windows Kits/10">,
+ MetaVarName<"<dir>">;
def _SLASH_volatile_iso : Option<["/", "-"], "volatile:iso", KIND_FLAG>,
Group<_SLASH_volatile_Group>, Flags<[CLOption, NoXarchOption]>,
HelpText<"Volatile loads and stores have standard semantics">;
@@ -5762,7 +6250,6 @@ def _SLASH_FS : CLIgnoredFlag<"FS">;
def _SLASH_JMC : CLIgnoredFlag<"JMC">;
def _SLASH_kernel_ : CLIgnoredFlag<"kernel-">;
def _SLASH_nologo : CLIgnoredFlag<"nologo">;
-def _SLASH_permissive_ : CLIgnoredFlag<"permissive-">;
def _SLASH_RTC : CLIgnoredJoined<"RTC">;
def _SLASH_sdl : CLIgnoredFlag<"sdl">;
def _SLASH_sdl_ : CLIgnoredFlag<"sdl-">;
@@ -5787,6 +6274,7 @@ def _SLASH_Zo_ : CLIgnoredFlag<"Zo-">;
// Unsupported:
def _SLASH_await : CLFlag<"await">;
+def _SLASH_await_COLON : CLJoined<"await:">;
def _SLASH_constexpr : CLJoined<"constexpr:">;
def _SLASH_AI : CLJoinedOrSeparate<"AI">;
def _SLASH_Bt : CLFlag<"Bt">;
@@ -5794,8 +6282,13 @@ def _SLASH_Bt_plus : CLFlag<"Bt+">;
def _SLASH_clr : CLJoined<"clr">;
def _SLASH_d2 : CLJoined<"d2">;
def _SLASH_doc : CLJoined<"doc">;
+def _SLASH_experimental : CLJoined<"experimental:">;
+def _SLASH_exportHeader : CLFlag<"exportHeader">;
+def _SLASH_external : CLJoined<"external:">;
def _SLASH_FA_joined : CLJoined<"FA">;
def _SLASH_favor : CLJoined<"favor">;
+def _SLASH_fsanitize_address_use_after_return : CLJoined<"fsanitize-address-use-after-return">;
+def _SLASH_fno_sanitize_address_vcasan_lib : CLJoined<"fno-sanitize-address-vcasan-lib">;
def _SLASH_F : CLJoinedOrSeparate<"F">;
def _SLASH_Fm : CLJoined<"Fm">;
def _SLASH_Fr : CLJoined<"Fr">;
@@ -5814,6 +6307,10 @@ def _SLASH_Gm_ : CLFlag<"Gm-">;
def _SLASH_GT : CLFlag<"GT">;
def _SLASH_GZ : CLFlag<"GZ">;
def _SLASH_H : CLFlag<"H">;
+def _SLASH_headername : CLJoined<"headerName:">;
+def _SLASH_headerUnit : CLJoinedOrSeparate<"headerUnit">;
+def _SLASH_headerUnitAngle : CLJoinedOrSeparate<"headerUnit:angle">;
+def _SLASH_headerUnitQuote : CLJoinedOrSeparate<"headerUnit:quote">;
def _SLASH_homeparams : CLFlag<"homeparams">;
def _SLASH_hotpatch : CLFlag<"hotpatch">;
def _SLASH_kernel : CLFlag<"kernel">;
@@ -5821,7 +6318,6 @@ def _SLASH_LN : CLFlag<"LN">;
def _SLASH_MP : CLJoined<"MP">;
def _SLASH_Qfast_transcendentals : CLFlag<"Qfast_transcendentals">;
def _SLASH_QIfist : CLFlag<"QIfist">;
-def _SLASH_QIntel_jcc_erratum : CLFlag<"QIntel-jcc-erratum">;
def _SLASH_Qimprecise_fwaits : CLFlag<"Qimprecise_fwaits">;
def _SLASH_Qpar : CLFlag<"Qpar">;
def _SLASH_Qpar_report : CLJoined<"Qpar-report">;
@@ -5830,6 +6326,10 @@ def _SLASH_Qspectre : CLFlag<"Qspectre">;
def _SLASH_Qspectre_load : CLFlag<"Qspectre-load">;
def _SLASH_Qspectre_load_cf : CLFlag<"Qspectre-load-cf">;
def _SLASH_Qvec_report : CLJoined<"Qvec-report">;
+def _SLASH_reference : CLJoinedOrSeparate<"reference">;
+def _SLASH_sourceDependencies : CLJoinedOrSeparate<"sourceDependencies">;
+def _SLASH_sourceDependenciesDirectives : CLJoinedOrSeparate<"sourceDependencies:directives">;
+def _SLASH_translateInclude : CLFlag<"translateInclude">;
def _SLASH_u : CLFlag<"u">;
def _SLASH_V : CLFlag<"V">;
def _SLASH_WL : CLFlag<"WL">;
diff --git a/clang/include/clang/Driver/SanitizerArgs.h b/clang/include/clang/Driver/SanitizerArgs.h
index ac2b817be1dc..e9e329e7cb53 100644
--- a/clang/include/clang/Driver/SanitizerArgs.h
+++ b/clang/include/clang/Driver/SanitizerArgs.h
@@ -12,6 +12,7 @@
#include "clang/Driver/Types.h"
#include "llvm/Option/Arg.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <string>
#include <vector>
@@ -25,10 +26,10 @@ class SanitizerArgs {
SanitizerSet RecoverableSanitizers;
SanitizerSet TrapSanitizers;
- std::vector<std::string> UserBlacklistFiles;
- std::vector<std::string> SystemBlacklistFiles;
+ std::vector<std::string> UserIgnorelistFiles;
+ std::vector<std::string> SystemIgnorelistFiles;
std::vector<std::string> CoverageAllowlistFiles;
- std::vector<std::string> CoverageBlocklistFiles;
+ std::vector<std::string> CoverageIgnorelistFiles;
int CoverageFeatures = 0;
int MsanTrackOrigins = 0;
bool MsanUseAfterDtor = true;
@@ -43,6 +44,8 @@ class SanitizerArgs {
bool AsanUseOdrIndicator = false;
bool AsanInvalidPointerCmp = false;
bool AsanInvalidPointerSub = false;
+ bool AsanOutlineInstrumentation = false;
+ llvm::AsanDtorKind AsanDtorKind = llvm::AsanDtorKind::Invalid;
std::string HwasanAbi;
bool LinkRuntimes = true;
bool LinkCXXRuntimes = false;
@@ -56,6 +59,9 @@ class SanitizerArgs {
// True if cross-dso CFI support if provided by the system (i.e. Android).
bool ImplicitCfiRuntime = false;
bool NeedsMemProfRt = false;
+ bool HwasanUseAliases = false;
+ llvm::AsanDetectStackUseAfterReturnMode AsanUseAfterReturn =
+ llvm::AsanDetectStackUseAfterReturnMode::Invalid;
public:
/// Parses the sanitizer arguments from an argument list.
@@ -68,6 +74,9 @@ public:
bool needsHwasanRt() const {
return Sanitizers.has(SanitizerKind::HWAddress);
}
+ bool needsHwasanAliasesRt() const {
+ return needsHwasanRt() && HwasanUseAliases;
+ }
bool needsTsanRt() const { return Sanitizers.has(SanitizerKind::Thread); }
bool needsMsanRt() const { return Sanitizers.has(SanitizerKind::Memory); }
bool needsFuzzer() const { return Sanitizers.has(SanitizerKind::Fuzzer); }
diff --git a/clang/include/clang/Driver/ToolChain.h b/clang/include/clang/Driver/ToolChain.h
index 28c37a44e1eb..882ae40086ce 100644
--- a/clang/include/clang/Driver/ToolChain.h
+++ b/clang/include/clang/Driver/ToolChain.h
@@ -166,6 +166,10 @@ private:
EffectiveTriple = std::move(ET);
}
+ mutable llvm::Optional<CXXStdlibType> cxxStdlibType;
+ mutable llvm::Optional<RuntimeLibType> runtimeLibType;
+ mutable llvm::Optional<UnwindLibType> unwindLibType;
+
protected:
MultilibSet Multilibs;
Multilib SelectedMultilib;
@@ -180,6 +184,11 @@ protected:
virtual Tool *buildStaticLibTool() const;
virtual Tool *getTool(Action::ActionClass AC) const;
+ virtual std::string buildCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type,
+ bool AddArch) const;
+
/// \name Utilities for implementing subclasses.
///@{
static void addSystemInclude(const llvm::opt::ArgList &DriverArgs,
@@ -371,6 +380,10 @@ public:
/// Check if the toolchain should use the integrated assembler.
virtual bool useIntegratedAs() const;
+ /// Check if the toolchain should use AsmParser to parse inlineAsm when
+ /// integrated assembler is not default.
+ virtual bool parseInlineAsmUsingAsmParser() const { return false; }
+
/// IsMathErrnoDefault - Does this tool chain use -fmath-errno by default.
virtual bool IsMathErrnoDefault() const { return true; }
@@ -428,16 +441,15 @@ public:
getCompilerRTArgString(const llvm::opt::ArgList &Args, StringRef Component,
FileType Type = ToolChain::FT_Static) const;
- virtual std::string
- getCompilerRTBasename(const llvm::opt::ArgList &Args, StringRef Component,
- FileType Type = ToolChain::FT_Static,
- bool AddArch = true) const;
+ std::string getCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type = ToolChain::FT_Static) const;
// Returns target specific runtime path if it exists.
- virtual Optional<std::string> getRuntimePath() const;
+ virtual std::string getRuntimePath() const;
- // Returns target specific C++ library path if it exists.
- virtual Optional<std::string> getCXXStdlibPath() const;
+ // Returns target specific standard library path if it exists.
+ virtual std::string getStdlibPath() const;
// Returns <ResourceDir>/lib/<OSName>/<arch>. This is used by runtimes (such
// as OpenMP) to find arch-specific libraries.
@@ -456,6 +468,12 @@ public:
/// by default.
virtual bool IsUnwindTablesDefault(const llvm::opt::ArgList &Args) const;
+ /// Test whether this toolchain supports outline atomics by default.
+ virtual bool
+ IsAArch64OutlineAtomicsDefault(const llvm::opt::ArgList &Args) const {
+ return false;
+ }
+
/// Test whether this toolchain defaults to PIC.
virtual bool isPICDefault() const = 0;
@@ -528,6 +546,12 @@ public:
/// isThreadModelSupported() - Does this target support a thread model?
virtual bool isThreadModelSupported(const StringRef Model) const;
+ virtual std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const {
+ return TargetTriple.str();
+ }
+
/// ComputeLLVMTriple - Return the LLVM target triple to use, after taking
/// command line arguments into account.
virtual std::string
@@ -589,6 +613,9 @@ public:
// given compilation arguments.
virtual UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const;
+ // Detect the highest available version of libc++ in include path.
+ virtual std::string detectLibcxxVersion(StringRef IncludePath) const;
+
/// AddClangCXXStdlibIncludeArgs - Add the clang -cc1 level arguments to set
/// the include paths to use for the given C++ standard library type.
virtual void
@@ -653,6 +680,10 @@ public:
virtual VersionTuple computeMSVCVersion(const Driver *D,
const llvm::opt::ArgList &Args) const;
+ /// Get paths of HIP device libraries.
+ virtual llvm::SmallVector<std::string, 12>
+ getHIPDeviceLibs(const llvm::opt::ArgList &Args) const;
+
/// Return sanitizers which are available in this toolchain.
virtual SanitizerMask getSupportedSanitizers() const;
diff --git a/clang/include/clang/Driver/Types.def b/clang/include/clang/Driver/Types.def
index 79e8d109cd97..997eea445c22 100644
--- a/clang/include/clang/Driver/Types.def
+++ b/clang/include/clang/Driver/Types.def
@@ -38,6 +38,7 @@
TYPE("cpp-output", PP_C, INVALID, "i", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("c", C, PP_C, "c", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cl", CL, PP_C, "cl", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
+TYPE("clcpp", CLCXX, PP_CXX, "clcpp", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda-cpp-output", PP_CUDA, INVALID, "cui", phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda", CUDA, PP_CUDA, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
TYPE("cuda", CUDA_DEVICE, PP_CUDA, "cu", phases::Preprocess, phases::Compile, phases::Backend, phases::Assemble, phases::Link)
diff --git a/clang/include/clang/Driver/Types.h b/clang/include/clang/Driver/Types.h
index 97bf5fd672ab..c9d63551090c 100644
--- a/clang/include/clang/Driver/Types.h
+++ b/clang/include/clang/Driver/Types.h
@@ -66,6 +66,14 @@ namespace types {
/// isAcceptedByClang - Can clang handle this input type.
bool isAcceptedByClang(ID Id);
+ /// isDerivedFromC - Is the input derived from C.
+ ///
+ /// That is, does the lexer follow the rules of
+ /// TokenConcatenation::AvoidConcat. If this is the case, the preprocessor may
+ /// add and remove whitespace between tokens. Used to determine whether the
+ /// input can be processed by -fminimize-whitespace.
+ bool isDerivedFromC(ID Id);
+
/// isCXX - Is this a "C++" input (C++ and Obj-C++ sources and headers).
bool isCXX(ID Id);
@@ -81,6 +89,9 @@ namespace types {
/// isObjC - Is this an "ObjC" input (Obj-C and Obj-C++ sources and headers).
bool isObjC(ID Id);
+ /// isOpenCL - Is this an "OpenCL" input.
+ bool isOpenCL(ID Id);
+
/// isFortran - Is this a Fortran input.
bool isFortran(ID Id);
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 96c2a74e97db..c424e79a971c 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -19,6 +19,7 @@
#include "clang/Tooling/Inclusions/IncludeStyle.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Regex.h"
+#include "llvm/Support/SourceMgr.h"
#include <system_error>
namespace llvm {
@@ -52,6 +53,11 @@ std::error_code make_error_code(ParseError e);
/// The ``FormatStyle`` is used to configure the formatting to follow
/// specific guidelines.
struct FormatStyle {
+ // If the BasedOn: was InheritParentConfig and this style needs the file from
+ // the parent directories. It is not part of the actual style for formatting.
+ // Thus the // instead of ///.
+ bool InheritsParentConfig;
+
/// The extra indent or outdent of access modifiers, e.g. ``public:``.
int AccessModifierOffset;
@@ -84,6 +90,35 @@ struct FormatStyle {
/// brackets.
BracketAlignmentStyle AlignAfterOpenBracket;
+ /// Different style for aligning array initializers.
+ enum ArrayInitializerAlignmentStyle {
+ /// Align array column and left justify the columns e.g.:
+ /// \code
+ /// struct test demo[] =
+ /// {
+ /// {56, 23, "hello"},
+ /// {-1, 93463, "world"},
+ /// {7, 5, "!!" }
+ /// };
+ /// \endcode
+ AIAS_Left,
+ /// Align array column and right justify the columns e.g.:
+ /// \code
+ /// struct test demo[] =
+ /// {
+ /// {56, 23, "hello"},
+ /// {-1, 93463, "world"},
+ /// { 7, 5, "!!"}
+ /// };
+ /// \endcode
+ AIAS_Right,
+ /// Don't align array initializer columns.
+ AIAS_None
+ };
+ /// if not ``None``, when using initialization for an array of structs
+ /// aligns the fields into columns.
+ ArrayInitializerAlignmentStyle AlignArrayOfStructures;
+
/// Styles for alignment of consecutive tokens. Tokens can be assignment signs
/// (see
/// ``AlignConsecutiveAssignments``), bitfield member separators (see
@@ -614,37 +649,74 @@ struct FormatStyle {
/// single line.
ShortFunctionStyle AllowShortFunctionsOnASingleLine;
- /// Different styles for handling short if lines
+ /// Different styles for handling short if statements.
enum ShortIfStyle : unsigned char {
/// Never put short ifs on the same line.
/// \code
/// if (a)
- /// return ;
+ /// return;
+ ///
+ /// if (b)
+ /// return;
+ /// else
+ /// return;
+ ///
+ /// if (c)
+ /// return;
/// else {
/// return;
/// }
/// \endcode
SIS_Never,
- /// Without else put short ifs on the same line only if
- /// the else is not a compound statement.
+ /// Put short ifs on the same line only if there is no else statement.
/// \code
/// if (a) return;
+ ///
+ /// if (b)
+ /// return;
/// else
/// return;
+ ///
+ /// if (c)
+ /// return;
+ /// else {
+ /// return;
+ /// }
/// \endcode
SIS_WithoutElse,
- /// Always put short ifs on the same line if
- /// the else is not a compound statement or not.
+ /// Put short ifs, but not else ifs nor else statements, on the same line.
/// \code
/// if (a) return;
+ ///
+ /// if (b) return;
+ /// else if (b)
+ /// return;
+ /// else
+ /// return;
+ ///
+ /// if (c) return;
/// else {
/// return;
/// }
/// \endcode
- SIS_Always,
+ SIS_OnlyFirstIf,
+ /// Always put short ifs, else ifs and else statements on the same
+ /// line.
+ /// \code
+ /// if (a) return;
+ ///
+ /// if (b) return;
+ /// else return;
+ ///
+ /// if (c) return;
+ /// else {
+ /// return;
+ /// }
+ /// \endcode
+ SIS_AllIfsAndElse,
};
- /// If ``true``, ``if (a) return;`` can be put on a single line.
+ /// Dependent on the value, ``if (a) return;`` can be put on a single line.
ShortIfStyle AllowShortIfStatementsOnASingleLine;
/// Different styles for merging short lambdas containing at most one
@@ -1786,7 +1858,14 @@ struct FormatStyle {
/// Base2
/// {};
/// \endcode
- BILS_AfterColon
+ BILS_AfterColon,
+ /// Break inheritance list only after the commas.
+ /// \code
+ /// class Foo : Base1,
+ /// Base2
+ /// {};
+ /// \endcode
+ BILS_AfterComma,
};
/// The inheritance list style to use.
@@ -1885,6 +1964,56 @@ struct FormatStyle {
/// Disables formatting completely.
bool DisableFormat;
+ /// Different styles for empty line after access modifiers.
+ /// ``EmptyLineBeforeAccessModifier`` configuration handles the number of
+ /// empty lines between two access modifiers.
+ enum EmptyLineAfterAccessModifierStyle : unsigned char {
+ /// Remove all empty lines after access modifiers.
+ /// \code
+ /// struct foo {
+ /// private:
+ /// int i;
+ /// protected:
+ /// int j;
+ /// /* comment */
+ /// public:
+ /// foo() {}
+ /// private:
+ /// protected:
+ /// };
+ /// \endcode
+ ELAAMS_Never,
+ /// Keep existing empty lines after access modifiers.
+ /// MaxEmptyLinesToKeep is applied instead.
+ ELAAMS_Leave,
+ /// Always add empty line after access modifiers if there are none.
+ /// MaxEmptyLinesToKeep is applied also.
+ /// \code
+ /// struct foo {
+ /// private:
+ ///
+ /// int i;
+ /// protected:
+ ///
+ /// int j;
+ /// /* comment */
+ /// public:
+ ///
+ /// foo() {}
+ /// private:
+ ///
+ /// protected:
+ ///
+ /// };
+ /// \endcode
+ ELAAMS_Always,
+ };
+
+ /// Defines when to put an empty line after access modifiers.
+ /// ``EmptyLineBeforeAccessModifier`` configuration handles the number of
+ /// empty lines between two access modifiers.
+ EmptyLineAfterAccessModifierStyle EmptyLineAfterAccessModifier;
+
/// Different styles for empty line before access modifiers.
enum EmptyLineBeforeAccessModifierStyle : unsigned char {
/// Remove all empty lines before access modifiers.
@@ -1959,12 +2088,14 @@ struct FormatStyle {
/// not use this in config files, etc. Use at your own risk.
bool ExperimentalAutoDetectBinPacking;
- /// If ``true``, clang-format adds missing namespace end comments and
- /// fixes invalid existing ones.
+ /// If ``true``, clang-format adds missing namespace end comments for
+ /// short namespaces and fixes invalid existing ones. Short ones are
+ /// controlled by "ShortNamespaceLines".
/// \code
/// true: false:
/// namespace a { vs. namespace a {
/// foo(); foo();
+ /// bar(); bar();
/// } // namespace a }
/// \endcode
bool FixNamespaceComments;
@@ -1986,6 +2117,26 @@ struct FormatStyle {
/// For example: BOOST_FOREACH.
std::vector<std::string> ForEachMacros;
+ /// A vector of macros that should be interpreted as conditionals
+ /// instead of as function calls.
+ ///
+ /// These are expected to be macros of the form:
+ /// \code
+ /// IF(...)
+ /// <conditional-body>
+ /// else IF(...)
+ /// <conditional-body>
+ /// \endcode
+ ///
+ /// In the .clang-format configuration file, this can be configured like:
+ /// \code{.yaml}
+ /// IfMacros: ['IF']
+ /// \endcode
+ ///
+ /// For example: `KJ_IF_MAYBE
+ /// <https://github.com/capnproto/capnproto/blob/master/kjdoc/tour.md#maybes>`_
+ std::vector<std::string> IfMacros;
+
/// \brief A vector of macros that should be interpreted as type declarations
/// instead of as function calls.
///
@@ -2042,6 +2193,32 @@ struct FormatStyle {
tooling::IncludeStyle IncludeStyle;
+ /// Specify whether access modifiers should have their own indentation level.
+ ///
+ /// When ``false``, access modifiers are indented (or outdented) relative to
+ /// the record members, respecting the ``AccessModifierOffset``. Record
+ /// members are indented one level below the record.
+ /// When ``true``, access modifiers get their own indentation level. As a
+ /// consequence, record members are always indented 2 levels below the record,
+ /// regardless of the access modifier presence. Value of the
+ /// ``AccessModifierOffset`` is ignored.
+ /// \code
+ /// false: true:
+ /// class C { vs. class C {
+ /// class D { class D {
+ /// void bar(); void bar();
+ /// protected: protected:
+ /// D(); D();
+ /// }; };
+ /// public: public:
+ /// C(); C();
+ /// }; };
+ /// void foo() { void foo() {
+ /// return 1; return 1;
+ /// } }
+ /// \endcode
+ bool IndentAccessModifiers;
+
/// Indent case labels one level from the switch statement.
///
/// When ``false``, use the same indentation level as for the switch
@@ -2316,6 +2493,8 @@ struct FormatStyle {
LK_Java,
/// Should be used for JavaScript.
LK_JavaScript,
+ /// Should be used for JSON.
+ LK_Json,
/// Should be used for Objective-C, Objective-C++.
LK_ObjC,
/// Should be used for Protocol Buffers
@@ -2329,10 +2508,43 @@ struct FormatStyle {
};
bool isCpp() const { return Language == LK_Cpp || Language == LK_ObjC; }
bool isCSharp() const { return Language == LK_CSharp; }
+ bool isJson() const { return Language == LK_Json; }
/// Language, this format style is targeted at.
LanguageKind Language;
+ /// Indentation logic for lambda bodies.
+ enum LambdaBodyIndentationKind : unsigned char {
+ /// Align lambda body relative to the lambda signature. This is the default.
+ /// \code
+ /// someMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// });
+ /// \endcode
+ LBI_Signature,
+ /// Align lambda body relative to the indentation level of the outer scope
+ /// the lambda signature resides in.
+ /// \code
+ /// someMethod(
+ /// [](SomeReallyLongLambdaSignatureArgument foo) {
+ /// return;
+ /// });
+ /// \endcode
+ LBI_OuterScope,
+ };
+
+ /// The indentation style of lambda bodies. ``Signature`` (the default)
+ /// causes the lambda body to be indented one additional level relative to
+ /// the indentation level of the signature. ``OuterScope`` forces the lambda
+ /// body to be indented one additional level relative to the parent scope
+ /// containing the lambda signature. For callback-heavy code, it may improve
+ /// readability to have the signature indented two levels and to use
+ /// ``OuterScope``. The KJ style guide requires ``OuterScope``.
+ /// `KJ style guide
+ /// <https://github.com/capnproto/capnproto/blob/master/kjdoc/style-guide.md>`_
+ LambdaBodyIndentationKind LambdaBodyIndentation;
+
/// A regular expression matching macros that start a block.
/// \code
/// # With:
@@ -2516,7 +2728,7 @@ struct FormatStyle {
/// (counted relative to leading non-whitespace column).
unsigned PenaltyIndentedWhitespace;
- /// The ``&`` and ``*`` alignment style.
+ /// The ``&``, ``&&`` and ``*`` alignment style.
enum PointerAlignmentStyle : unsigned char {
/// Align pointer to the left.
/// \code
@@ -2538,6 +2750,20 @@ struct FormatStyle {
/// Pointer and reference alignment style.
PointerAlignmentStyle PointerAlignment;
+ /// The number of columns to use for indentation of preprocessor statements.
+ /// When set to -1 (default) ``IndentWidth`` is used also for preprocessor
+ /// statements.
+ /// \code
+ /// PPIndentWidth: 1
+ ///
+ /// #ifdef __linux__
+ /// # define FOO
+ /// #else
+ /// # define BAR
+ /// #endif
+ /// \endcode
+ int PPIndentWidth;
+
/// See documentation of ``RawStringFormats``.
struct RawStringFormat {
/// The language of this raw string.
@@ -2597,6 +2823,31 @@ struct FormatStyle {
/// \endcode
std::vector<RawStringFormat> RawStringFormats;
+ /// \brief The ``&`` and ``&&`` alignment style.
+ enum ReferenceAlignmentStyle {
+ /// Align reference like ``PointerAlignment``.
+ RAS_Pointer,
+ /// Align reference to the left.
+ /// \code
+ /// int& a;
+ /// \endcode
+ RAS_Left,
+ /// Align reference to the right.
+ /// \code
+ /// int &a;
+ /// \endcode
+ RAS_Right,
+ /// Align reference in the middle.
+ /// \code
+ /// int & a;
+ /// \endcode
+ RAS_Middle
+ };
+
+ /// \brief Reference alignment style (overrides ``PointerAlignment`` for
+ /// references).
+ ReferenceAlignmentStyle ReferenceAlignment;
+
// clang-format off
/// If ``true``, clang-format will attempt to re-flow comments.
/// \code
@@ -2613,13 +2864,65 @@ struct FormatStyle {
bool ReflowComments;
// clang-format on
- /// If ``true``, clang-format will sort ``#includes``.
+ /// The maximal number of unwrapped lines that a short namespace spans.
+ /// Defaults to 1.
+ ///
+ /// This determines the maximum length of short namespaces by counting
+ /// unwrapped lines (i.e. containing neither opening nor closing
+ /// namespace brace) and makes "FixNamespaceComments" omit adding
+ /// end comments for those.
/// \code
- /// false: true:
- /// #include "b.h" vs. #include "a.h"
- /// #include "a.h" #include "b.h"
+ /// ShortNamespaceLines: 1 vs. ShortNamespaceLines: 0
+ /// namespace a { namespace a {
+ /// int foo; int foo;
+ /// } } // namespace a
+ ///
+ /// ShortNamespaceLines: 1 vs. ShortNamespaceLines: 0
+ /// namespace b { namespace b {
+ /// int foo; int foo;
+ /// int bar; int bar;
+ /// } // namespace b } // namespace b
/// \endcode
- bool SortIncludes;
+ unsigned ShortNamespaceLines;
+
+ /// Include sorting options.
+ enum SortIncludesOptions : unsigned char {
+ /// Includes are never sorted.
+ /// \code
+ /// #include "B/A.h"
+ /// #include "A/B.h"
+ /// #include "a/b.h"
+ /// #include "A/b.h"
+ /// #include "B/a.h"
+ /// \endcode
+ SI_Never,
+ /// Includes are sorted in an ASCIIbetical or case sensitive fashion.
+ /// \code
+ /// #include "A/B.h"
+ /// #include "A/b.h"
+ /// #include "B/A.h"
+ /// #include "B/a.h"
+ /// #include "a/b.h"
+ /// \endcode
+ SI_CaseSensitive,
+ /// Includes are sorted in an alphabetical or case insensitive fashion.
+ /// \code
+ /// #include "A/B.h"
+ /// #include "A/b.h"
+ /// #include "a/b.h"
+ /// #include "B/A.h"
+ /// #include "B/a.h"
+ /// \endcode
+ SI_CaseInsensitive,
+ };
+
+ /// Controls if and how clang-format will sort ``#includes``.
+ /// If ``Never``, includes are never sorted.
+ /// If ``CaseInsensitive``, includes are sorted in an ASCIIbetical or case
+ /// insensitive fashion.
+ /// If ``CaseSensitive``, includes are sorted in an alphabetical or case
+ /// sensitive fashion.
+ SortIncludesOptions SortIncludes;
/// Position for Java Static imports.
enum SortJavaStaticImportOptions : unsigned char {
@@ -2778,8 +3081,10 @@ struct FormatStyle {
/// \endcode
SBPO_ControlStatements,
/// Same as ``SBPO_ControlStatements`` except this option doesn't apply to
- /// ForEach macros. This is useful in projects where ForEach macros are
- /// treated as function calls instead of control statements.
+ /// ForEach and If macros. This is useful in projects where ForEach/If
+ /// macros are treated as function calls instead of control statements.
+ /// ``SBPO_ControlStatementsExceptForEachMacros`` remains an alias for
+ /// backward compatability.
/// \code
/// void f() {
/// Q_FOREACH(...) {
@@ -2787,7 +3092,7 @@ struct FormatStyle {
/// }
/// }
/// \endcode
- SBPO_ControlStatementsExceptForEachMacros,
+ SBPO_ControlStatementsExceptControlMacros,
/// Put a space before opening parentheses only if the parentheses are not
/// empty i.e. '()'
/// \code
@@ -2860,14 +3165,27 @@ struct FormatStyle {
/// \endcode
unsigned SpacesBeforeTrailingComments;
- /// If ``true``, spaces will be inserted after ``<`` and before ``>``
- /// in template argument lists.
- /// \code
- /// true: false:
- /// static_cast< int >(arg); vs. static_cast<int>(arg);
- /// std::function< void(int) > fct; std::function<void(int)> fct;
- /// \endcode
- bool SpacesInAngles;
+ /// Styles for adding spacing after ``<`` and before ``>`
+ /// in template argument lists.
+ enum SpacesInAnglesStyle : unsigned char {
+ /// Remove spaces after ``<`` and before ``>``.
+ /// \code
+ /// static_cast<int>(arg);
+ /// std::function<void(int)> fct;
+ /// \endcode
+ SIAS_Never,
+ /// Add spaces after ``<`` and before ``>``.
+ /// \code
+ /// static_cast< int >(arg);
+ /// std::function< void(int) > fct;
+ /// \endcode
+ SIAS_Always,
+ /// Keep a single space after ``<`` and before ``>`` if any spaces were
+ /// present. Option ``Standard: Cpp03`` takes precedence.
+ SIAS_Leave
+ };
+ /// The SpacesInAnglesStyle to use for template argument lists.
+ SpacesInAnglesStyle SpacesInAngles;
/// If ``true``, spaces will be inserted around if/for/switch/while
/// conditions.
@@ -2894,6 +3212,43 @@ struct FormatStyle {
/// \endcode
bool SpacesInCStyleCastParentheses;
+ /// Control of spaces within a single line comment
+ struct SpacesInLineComment {
+ /// The minimum number of spaces at the start of the comment.
+ unsigned Minimum;
+ /// The maximum number of spaces at the start of the comment.
+ unsigned Maximum;
+ };
+
+ /// How many spaces are allowed at the start of a line comment. To disable the
+ /// maximum set it to ``-1``, apart from that the maximum takes precedence
+ /// over the minimum.
+ /// \code Minimum = 1 Maximum = -1
+ /// // One space is forced
+ ///
+ /// // but more spaces are possible
+ ///
+ /// Minimum = 0
+ /// Maximum = 0
+ /// //Forces to start every comment directly after the slashes
+ /// \endcode
+ ///
+ /// Note that in line comment sections the relative indent of the subsequent
+ /// lines is kept, that means the following:
+ /// \code
+ /// before: after:
+ /// Minimum: 1
+ /// //if (b) { // if (b) {
+ /// // return true; // return true;
+ /// //} // }
+ ///
+ /// Maximum: 0
+ /// /// List: ///List:
+ /// /// - Foo /// - Foo
+ /// /// - Bar /// - Bar
+ /// \endcode
+ SpacesInLineComment SpacesInLineCommentPrefix;
+
/// If ``true``, spaces will be inserted after ``(`` and before ``)``.
/// \code
/// true: false:
@@ -3028,6 +3383,7 @@ struct FormatStyle {
bool operator==(const FormatStyle &R) const {
return AccessModifierOffset == R.AccessModifierOffset &&
AlignAfterOpenBracket == R.AlignAfterOpenBracket &&
+ AlignArrayOfStructures == R.AlignArrayOfStructures &&
AlignConsecutiveAssignments == R.AlignConsecutiveAssignments &&
AlignConsecutiveBitFields == R.AlignConsecutiveBitFields &&
AlignConsecutiveDeclarations == R.AlignConsecutiveDeclarations &&
@@ -3077,6 +3433,7 @@ struct FormatStyle {
DeriveLineEnding == R.DeriveLineEnding &&
DerivePointerAlignment == R.DerivePointerAlignment &&
DisableFormat == R.DisableFormat &&
+ EmptyLineAfterAccessModifier == R.EmptyLineAfterAccessModifier &&
EmptyLineBeforeAccessModifier == R.EmptyLineBeforeAccessModifier &&
ExperimentalAutoDetectBinPacking ==
R.ExperimentalAutoDetectBinPacking &&
@@ -3088,6 +3445,7 @@ struct FormatStyle {
R.IncludeStyle.IncludeIsMainRegex &&
IncludeStyle.IncludeIsMainSourceRegex ==
R.IncludeStyle.IncludeIsMainSourceRegex &&
+ IndentAccessModifiers == R.IndentAccessModifiers &&
IndentCaseLabels == R.IndentCaseLabels &&
IndentCaseBlocks == R.IndentCaseBlocks &&
IndentGotoLabels == R.IndentGotoLabels &&
@@ -3101,6 +3459,7 @@ struct FormatStyle {
JavaScriptWrapImports == R.JavaScriptWrapImports &&
KeepEmptyLinesAtTheStartOfBlocks ==
R.KeepEmptyLinesAtTheStartOfBlocks &&
+ LambdaBodyIndentation == R.LambdaBodyIndentation &&
MacroBlockBegin == R.MacroBlockBegin &&
MacroBlockEnd == R.MacroBlockEnd &&
MaxEmptyLinesToKeep == R.MaxEmptyLinesToKeep &&
@@ -3124,6 +3483,9 @@ struct FormatStyle {
R.PenaltyBreakTemplateDeclaration &&
PointerAlignment == R.PointerAlignment &&
RawStringFormats == R.RawStringFormats &&
+ ReferenceAlignment == R.ReferenceAlignment &&
+ ShortNamespaceLines == R.ShortNamespaceLines &&
+ SortIncludes == R.SortIncludes &&
SortJavaStaticImport == R.SortJavaStaticImport &&
SpaceAfterCStyleCast == R.SpaceAfterCStyleCast &&
SpaceAfterLogicalNot == R.SpaceAfterLogicalNot &&
@@ -3145,6 +3507,10 @@ struct FormatStyle {
SpacesInConditionalStatement == R.SpacesInConditionalStatement &&
SpacesInContainerLiterals == R.SpacesInContainerLiterals &&
SpacesInCStyleCastParentheses == R.SpacesInCStyleCastParentheses &&
+ SpacesInLineCommentPrefix.Minimum ==
+ R.SpacesInLineCommentPrefix.Minimum &&
+ SpacesInLineCommentPrefix.Maximum ==
+ R.SpacesInLineCommentPrefix.Maximum &&
SpacesInParentheses == R.SpacesInParentheses &&
SpacesInSquareBrackets == R.SpacesInSquareBrackets &&
SpaceBeforeSquareBrackets == R.SpaceBeforeSquareBrackets &&
@@ -3191,9 +3557,11 @@ struct FormatStyle {
private:
FormatStyleSet StyleSet;
- friend std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
- FormatStyle *Style,
- bool AllowUnknownOptions);
+ friend std::error_code
+ parseConfiguration(llvm::MemoryBufferRef Config, FormatStyle *Style,
+ bool AllowUnknownOptions,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt);
};
/// Returns a format style complying with the LLVM coding standards:
@@ -3251,9 +3619,13 @@ bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
///
/// If AllowUnknownOptions is true, no errors are emitted if unknown
/// format options are occured.
-std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
- FormatStyle *Style,
- bool AllowUnknownOptions = false);
+///
+/// If set all diagnostics are emitted through the DiagHandler.
+std::error_code
+parseConfiguration(llvm::MemoryBufferRef Config, FormatStyle *Style,
+ bool AllowUnknownOptions = false,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler = nullptr,
+ void *DiagHandlerCtx = nullptr);
/// Like above but accepts an unnamed buffer.
inline std::error_code parseConfiguration(StringRef Config, FormatStyle *Style,
@@ -3427,6 +3799,8 @@ inline StringRef getLanguageName(FormatStyle::LanguageKind Language) {
return "Java";
case FormatStyle::LK_JavaScript:
return "JavaScript";
+ case FormatStyle::LK_Json:
+ return "Json";
case FormatStyle::LK_Proto:
return "Proto";
case FormatStyle::LK_TableGen:
diff --git a/clang/include/clang/Frontend/CommandLineSourceLoc.h b/clang/include/clang/Frontend/CommandLineSourceLoc.h
index 0827433462e1..dfc4454b4baf 100644
--- a/clang/include/clang/Frontend/CommandLineSourceLoc.h
+++ b/clang/include/clang/Frontend/CommandLineSourceLoc.h
@@ -48,6 +48,13 @@ public:
return PSL;
}
+
+ /// Serialize ParsedSourceLocation back to a string.
+ std::string ToString() const {
+ return (llvm::Twine(FileName == "<stdin>" ? "-" : FileName) + ":" +
+ Twine(Line) + ":" + Twine(Column))
+ .str();
+ }
};
/// A source range that has been parsed on the command line.
diff --git a/clang/include/clang/Frontend/CompilerInstance.h b/clang/include/clang/Frontend/CompilerInstance.h
index 57632f8770f0..861b15020329 100644
--- a/clang/include/clang/Frontend/CompilerInstance.h
+++ b/clang/include/clang/Frontend/CompilerInstance.h
@@ -22,6 +22,7 @@
#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/BuryPointer.h"
+#include "llvm/Support/FileSystem.h"
#include <cassert>
#include <list>
#include <memory>
@@ -150,7 +151,7 @@ class CompilerInstance : public ModuleLoader {
bool HaveFullGlobalModuleIndex = false;
/// One or more modules failed to build.
- bool ModuleBuildFailed = false;
+ bool DisableGeneratingGlobalModuleIndex = false;
/// The stream for verbose output if owned, otherwise nullptr.
std::unique_ptr<raw_ostream> OwnedVerboseOutputStream;
@@ -165,11 +166,10 @@ class CompilerInstance : public ModuleLoader {
/// failed.
struct OutputFile {
std::string Filename;
- std::string TempFilename;
+ Optional<llvm::sys::fs::TempFile> File;
- OutputFile(std::string filename, std::string tempFilename)
- : Filename(std::move(filename)), TempFilename(std::move(tempFilename)) {
- }
+ OutputFile(std::string filename, Optional<llvm::sys::fs::TempFile> file)
+ : Filename(std::move(filename)), File(std::move(file)) {}
};
/// The list of active output files.
@@ -382,6 +382,9 @@ public:
/// Replace the current AuxTarget.
void setAuxTarget(TargetInfo *Value);
+ // Create Target and AuxTarget based on current options
+ bool createTarget();
+
/// }
/// @name Virtual File System
/// {
@@ -693,15 +696,13 @@ public:
/// The files created by this are usually removed on signal, and, depending
/// on FrontendOptions, may also use a temporary file (that is, the data is
/// written to a temporary file which will atomically replace the target
- /// output on success). If a client (like libclang) needs to disable
- /// RemoveFileOnSignal, temporary files will be forced on.
+ /// output on success).
///
/// \return - Null on error.
- std::unique_ptr<raw_pwrite_stream>
- createDefaultOutputFile(bool Binary = true, StringRef BaseInput = "",
- StringRef Extension = "",
- bool RemoveFileOnSignal = true,
- bool CreateMissingDirectories = false);
+ std::unique_ptr<raw_pwrite_stream> createDefaultOutputFile(
+ bool Binary = true, StringRef BaseInput = "", StringRef Extension = "",
+ bool RemoveFileOnSignal = true, bool CreateMissingDirectories = false,
+ bool ForceUseTemporary = false);
/// Create a new output file, optionally deriving the output path name, and
/// add it to the list of tracked output files.
diff --git a/clang/include/clang/Frontend/CompilerInvocation.h b/clang/include/clang/Frontend/CompilerInvocation.h
index 0d83a228c301..2245439d0632 100644
--- a/clang/include/clang/Frontend/CompilerInvocation.h
+++ b/clang/include/clang/Frontend/CompilerInvocation.h
@@ -61,7 +61,15 @@ bool ParseDiagnosticArgs(DiagnosticOptions &Opts, llvm::opt::ArgList &Args,
DiagnosticsEngine *Diags = nullptr,
bool DefaultDiagColor = true);
-class CompilerInvocationBase {
+/// The base class of CompilerInvocation with reference semantics.
+///
+/// This class stores option objects behind reference-counted pointers. This is
+/// useful for clients that want to keep some option object around even after
+/// CompilerInvocation gets destroyed, without making a copy.
+///
+/// This is a separate class so that we can implement the copy constructor and
+/// assignment here and leave them defaulted in the rest of CompilerInvocation.
+class CompilerInvocationRefBase {
public:
/// Options controlling the language variant.
std::shared_ptr<LangOptions> LangOpts;
@@ -78,10 +86,15 @@ public:
/// Options controlling the preprocessor (aside from \#include handling).
std::shared_ptr<PreprocessorOptions> PreprocessorOpts;
- CompilerInvocationBase();
- CompilerInvocationBase(const CompilerInvocationBase &X);
- CompilerInvocationBase &operator=(const CompilerInvocationBase &) = delete;
- ~CompilerInvocationBase();
+ /// Options controlling the static analyzer.
+ AnalyzerOptionsRef AnalyzerOpts;
+
+ CompilerInvocationRefBase();
+ CompilerInvocationRefBase(const CompilerInvocationRefBase &X);
+ CompilerInvocationRefBase(CompilerInvocationRefBase &&X);
+ CompilerInvocationRefBase &operator=(CompilerInvocationRefBase X);
+ CompilerInvocationRefBase &operator=(CompilerInvocationRefBase &&X);
+ ~CompilerInvocationRefBase();
LangOptions *getLangOpts() { return LangOpts.get(); }
const LangOptions *getLangOpts() const { return LangOpts.get(); }
@@ -110,17 +123,13 @@ public:
const PreprocessorOptions &getPreprocessorOpts() const {
return *PreprocessorOpts;
}
-};
-/// Helper class for holding the data necessary to invoke the compiler.
-///
-/// This class is designed to represent an abstract "invocation" of the
-/// compiler, including data such as the include paths, the code generation
-/// options, the warning flags, and so on.
-class CompilerInvocation : public CompilerInvocationBase {
- /// Options controlling the static analyzer.
- AnalyzerOptionsRef AnalyzerOpts;
+ AnalyzerOptionsRef getAnalyzerOpts() const { return AnalyzerOpts; }
+};
+/// The base class of CompilerInvocation with value semantics.
+class CompilerInvocationValueBase {
+protected:
MigratorOptions MigratorOpts;
/// Options controlling IRgen and the backend.
@@ -139,11 +148,46 @@ class CompilerInvocation : public CompilerInvocationBase {
PreprocessorOutputOptions PreprocessorOutputOpts;
public:
- CompilerInvocation() : AnalyzerOpts(new AnalyzerOptions()) {}
+ MigratorOptions &getMigratorOpts() { return MigratorOpts; }
+ const MigratorOptions &getMigratorOpts() const { return MigratorOpts; }
- /// @name Utility Methods
- /// @{
+ CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+
+ DependencyOutputOptions &getDependencyOutputOpts() {
+ return DependencyOutputOpts;
+ }
+
+ const DependencyOutputOptions &getDependencyOutputOpts() const {
+ return DependencyOutputOpts;
+ }
+
+ FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
+
+ const FileSystemOptions &getFileSystemOpts() const {
+ return FileSystemOpts;
+ }
+
+ FrontendOptions &getFrontendOpts() { return FrontendOpts; }
+ const FrontendOptions &getFrontendOpts() const { return FrontendOpts; }
+
+ PreprocessorOutputOptions &getPreprocessorOutputOpts() {
+ return PreprocessorOutputOpts;
+ }
+
+ const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
+ return PreprocessorOutputOpts;
+ }
+};
+/// Helper class for holding the data necessary to invoke the compiler.
+///
+/// This class is designed to represent an abstract "invocation" of the
+/// compiler, including data such as the include paths, the code generation
+/// options, the warning flags, and so on.
+class CompilerInvocation : public CompilerInvocationRefBase,
+ public CompilerInvocationValueBase {
+public:
/// Create a compiler invocation from a list of input options.
/// \returns true on success.
///
@@ -199,67 +243,41 @@ public:
void generateCC1CommandLine(llvm::SmallVectorImpl<const char *> &Args,
StringAllocator SA) const;
- /// @}
- /// @name Option Subgroups
- /// @{
-
- AnalyzerOptionsRef getAnalyzerOpts() const { return AnalyzerOpts; }
-
- MigratorOptions &getMigratorOpts() { return MigratorOpts; }
- const MigratorOptions &getMigratorOpts() const { return MigratorOpts; }
-
- CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }
- const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
-
- DependencyOutputOptions &getDependencyOutputOpts() {
- return DependencyOutputOpts;
- }
-
- const DependencyOutputOptions &getDependencyOutputOpts() const {
- return DependencyOutputOpts;
- }
-
- FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
-
- const FileSystemOptions &getFileSystemOpts() const {
- return FileSystemOpts;
- }
-
- FrontendOptions &getFrontendOpts() { return FrontendOpts; }
- const FrontendOptions &getFrontendOpts() const { return FrontendOpts; }
-
- PreprocessorOutputOptions &getPreprocessorOutputOpts() {
- return PreprocessorOutputOpts;
- }
-
- const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
- return PreprocessorOutputOpts;
- }
-
- /// @}
-
private:
- /// Parse options for flags that expose marshalling information in their
- /// table-gen definition
- ///
- /// \param Args - The argument list containing the arguments to parse
- /// \param Diags - The DiagnosticsEngine associated with CreateFromArgs
- /// \returns - True if parsing was successful, false otherwise
- bool parseSimpleArgs(const llvm::opt::ArgList &Args,
- DiagnosticsEngine &Diags);
+ static bool CreateFromArgsImpl(CompilerInvocation &Res,
+ ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0);
+
+ /// Generate command line options from DiagnosticOptions.
+ static void GenerateDiagnosticArgs(const DiagnosticOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ StringAllocator SA, bool DefaultDiagColor);
/// Parse command line options that map to LangOptions.
- static void ParseLangArgs(LangOptions &Opts, llvm::opt::ArgList &Args,
+ static bool ParseLangArgs(LangOptions &Opts, llvm::opt::ArgList &Args,
InputKind IK, const llvm::Triple &T,
std::vector<std::string> &Includes,
DiagnosticsEngine &Diags);
+ /// Generate command line options from LangOptions.
+ static void GenerateLangArgs(const LangOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ StringAllocator SA, const llvm::Triple &T,
+ InputKind IK);
+
/// Parse command line options that map to CodeGenOptions.
static bool ParseCodeGenArgs(CodeGenOptions &Opts, llvm::opt::ArgList &Args,
InputKind IK, DiagnosticsEngine &Diags,
const llvm::Triple &T,
const std::string &OutputFile,
const LangOptions &LangOptsRef);
+
+ // Generate command line options from CodeGenOptions.
+ static void GenerateCodeGenArgs(const CodeGenOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ StringAllocator SA, const llvm::Triple &T,
+ const std::string &OutputFile,
+ const LangOptions *LangOpts);
};
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
diff --git a/clang/include/clang/Frontend/DependencyOutputOptions.h b/clang/include/clang/Frontend/DependencyOutputOptions.h
index 7a4f3337936f..78a2841d1e10 100644
--- a/clang/include/clang/Frontend/DependencyOutputOptions.h
+++ b/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -20,6 +20,14 @@ enum class ShowIncludesDestination { None, Stdout, Stderr };
/// DependencyOutputFormat - Format for the compiler dependency file.
enum class DependencyOutputFormat { Make, NMake };
+/// ExtraDepKind - The kind of extra dependency file.
+enum ExtraDepKind {
+ EDK_SanitizeIgnorelist,
+ EDK_ProfileList,
+ EDK_ModuleFile,
+ EDK_DepFileEntry,
+};
+
/// DependencyOutputOptions - Options for controlling the compiler dependency
/// file generation.
class DependencyOutputOptions {
@@ -31,6 +39,10 @@ public:
/// problems.
unsigned AddMissingHeaderDeps : 1; ///< Add missing headers to dependency list
unsigned IncludeModuleFiles : 1; ///< Include module file dependencies.
+ unsigned ShowSkippedHeaderIncludes : 1; ///< With ShowHeaderIncludes, show
+ /// also includes that were skipped
+ /// due to the "include guard
+ /// optimization" or #pragma once.
/// Destination of cl.exe style /showIncludes info.
ShowIncludesDestination ShowIncludesDest = ShowIncludesDestination::None;
@@ -51,8 +63,9 @@ public:
/// must contain at least one entry.
std::vector<std::string> Targets;
- /// A list of filenames to be used as extra dependencies for every target.
- std::vector<std::string> ExtraDeps;
+ /// A list of extra dependencies (filename and kind) to be used for every
+ /// target.
+ std::vector<std::pair<std::string, ExtraDepKind>> ExtraDeps;
/// In /showIncludes mode, pretend the main TU is a header with this name.
std::string ShowIncludesPretendHeader;
@@ -66,7 +79,8 @@ public:
public:
DependencyOutputOptions()
: IncludeSystemHeaders(0), ShowHeaderIncludes(0), UsePhonyTargets(0),
- AddMissingHeaderDeps(0), IncludeModuleFiles(0) {}
+ AddMissingHeaderDeps(0), IncludeModuleFiles(0),
+ ShowSkippedHeaderIncludes(0) {}
};
} // end namespace clang
diff --git a/clang/include/clang/Frontend/FrontendAction.h b/clang/include/clang/Frontend/FrontendAction.h
index 319b3bc62cc4..dfefddfb4527 100644
--- a/clang/include/clang/Frontend/FrontendAction.h
+++ b/clang/include/clang/Frontend/FrontendAction.h
@@ -234,7 +234,7 @@ public:
/// Perform any per-file post processing, deallocate per-file
/// objects, and run statistics and output file cleanup code.
- void EndSourceFile();
+ virtual void EndSourceFile();
/// @}
};
@@ -302,15 +302,16 @@ public:
/// some existing action's behavior. It implements every virtual method in
/// the FrontendAction interface by forwarding to the wrapped action.
class WrapperFrontendAction : public FrontendAction {
+protected:
std::unique_ptr<FrontendAction> WrappedAction;
-protected:
bool PrepareToExecuteAction(CompilerInstance &CI) override;
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
bool BeginInvocation(CompilerInstance &CI) override;
bool BeginSourceFileAction(CompilerInstance &CI) override;
void ExecuteAction() override;
+ void EndSourceFile() override;
void EndSourceFileAction() override;
bool shouldEraseOutputFiles() override;
diff --git a/clang/include/clang/Frontend/FrontendActions.h b/clang/include/clang/Frontend/FrontendActions.h
index 25ca95980806..ff8d4417eaa4 100644
--- a/clang/include/clang/Frontend/FrontendActions.h
+++ b/clang/include/clang/Frontend/FrontendActions.h
@@ -34,6 +34,17 @@ public:
bool usesPreprocessorOnly() const override { return false; }
};
+/// Preprocessor-based frontend action that also loads PCH files.
+class ReadPCHAndPreprocessAction : public FrontendAction {
+ void ExecuteAction() override;
+
+ std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) override;
+
+public:
+ bool usesPreprocessorOnly() const override { return false; }
+};
+
class DumpCompilerOptionsAction : public FrontendAction {
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override {
diff --git a/clang/include/clang/Frontend/FrontendOptions.h b/clang/include/clang/Frontend/FrontendOptions.h
index 223c1e05d053..15c905d712a3 100644
--- a/clang/include/clang/Frontend/FrontendOptions.h
+++ b/clang/include/clang/Frontend/FrontendOptions.h
@@ -15,10 +15,11 @@
#include "clang/Sema/CodeCompleteOptions.h"
#include "clang/Serialization/ModuleFileExtension.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MemoryBuffer.h"
#include <cassert>
+#include <map>
#include <memory>
#include <string>
-#include <unordered_map>
#include <vector>
namespace llvm {
@@ -374,6 +375,10 @@ public:
std::string MTMigrateDir;
std::string ARCMTMigrateReportOut;
+ /// The input kind, either specified via -x argument or deduced from the input
+ /// file name.
+ InputKind DashX;
+
/// The input files and their types.
SmallVector<FrontendInputFile, 0> Inputs;
@@ -400,7 +405,7 @@ public:
std::string ActionName;
/// Args to pass to the plugins
- std::unordered_map<std::string,std::vector<std::string>> PluginArgs;
+ std::map<std::string, std::vector<std::string>> PluginArgs;
/// The list of plugin actions to run in addition to the normal action.
std::vector<std::string> AddPluginActions;
diff --git a/clang/include/clang/Frontend/PreprocessorOutputOptions.h b/clang/include/clang/Frontend/PreprocessorOutputOptions.h
index 72e5ad1137fb..257538ee0606 100644
--- a/clang/include/clang/Frontend/PreprocessorOutputOptions.h
+++ b/clang/include/clang/Frontend/PreprocessorOutputOptions.h
@@ -24,6 +24,7 @@ public:
unsigned ShowIncludeDirectives : 1; ///< Print includes, imports etc. within preprocessed output.
unsigned RewriteIncludes : 1; ///< Preprocess include directives only.
unsigned RewriteImports : 1; ///< Include contents of transitively-imported modules.
+ unsigned MinimizeWhitespace : 1; ///< Ignore whitespace from input.
public:
PreprocessorOutputOptions() {
@@ -36,6 +37,7 @@ public:
ShowIncludeDirectives = 0;
RewriteIncludes = 0;
RewriteImports = 0;
+ MinimizeWhitespace = 0;
}
};
diff --git a/clang/include/clang/Frontend/TextDiagnostic.h b/clang/include/clang/Frontend/TextDiagnostic.h
index 7cf54839afbe..a2eec46beccd 100644
--- a/clang/include/clang/Frontend/TextDiagnostic.h
+++ b/clang/include/clang/Frontend/TextDiagnostic.h
@@ -50,8 +50,7 @@ public:
/// TextDiagnostic logic requires.
static void printDiagnosticLevel(raw_ostream &OS,
DiagnosticsEngine::Level Level,
- bool ShowColors,
- bool CLFallbackMode = false);
+ bool ShowColors);
/// Pretty-print a diagnostic message to a raw_ostream.
///
diff --git a/clang/include/clang/Index/CommentToXML.h b/clang/include/clang/Index/CommentToXML.h
index 66b8650c5efb..29904f163dc7 100644
--- a/clang/include/clang/Index/CommentToXML.h
+++ b/clang/include/clang/Index/CommentToXML.h
@@ -10,7 +10,6 @@
#define LLVM_CLANG_INDEX_COMMENTTOXML_H
#include "clang/Basic/LLVM.h"
-#include <memory>
namespace clang {
class ASTContext;
diff --git a/clang/include/clang/Index/DeclOccurrence.h b/clang/include/clang/Index/DeclOccurrence.h
index 16f03a84579e..72f5799466bd 100644
--- a/clang/include/clang/Index/DeclOccurrence.h
+++ b/clang/include/clang/Index/DeclOccurrence.h
@@ -9,26 +9,31 @@
#ifndef LLVM_CLANG_INDEX_DECLOCCURRENCE_H
#define LLVM_CLANG_INDEX_DECLOCCURRENCE_H
+#include "clang/AST/DeclBase.h"
#include "clang/Basic/LLVM.h"
#include "clang/Index/IndexSymbol.h"
+#include "clang/Lex/MacroInfo.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/SmallVector.h"
namespace clang {
-class Decl;
-
namespace index {
struct DeclOccurrence {
SymbolRoleSet Roles;
unsigned Offset;
- const Decl *Dcl;
+ llvm::PointerUnion<const Decl *, const MacroInfo *> DeclOrMacro;
+ const IdentifierInfo *MacroName = nullptr;
SmallVector<SymbolRelation, 3> Relations;
DeclOccurrence(SymbolRoleSet R, unsigned Offset, const Decl *D,
ArrayRef<SymbolRelation> Relations)
- : Roles(R), Offset(Offset), Dcl(D),
+ : Roles(R), Offset(Offset), DeclOrMacro(D),
Relations(Relations.begin(), Relations.end()) {}
+ DeclOccurrence(SymbolRoleSet R, unsigned Offset, const IdentifierInfo *Name,
+ const MacroInfo *MI)
+ : Roles(R), Offset(Offset), DeclOrMacro(MI), MacroName(Name) {}
friend bool operator<(const DeclOccurrence &LHS, const DeclOccurrence &RHS) {
return LHS.Offset < RHS.Offset;
diff --git a/clang/include/clang/Index/IndexSymbol.h b/clang/include/clang/Index/IndexSymbol.h
index de98b8147e8a..2ba81986c2fe 100644
--- a/clang/include/clang/Index/IndexSymbol.h
+++ b/clang/include/clang/Index/IndexSymbol.h
@@ -75,6 +75,7 @@ enum class SymbolSubKind : uint8_t {
AccessorSetter,
UsingTypename,
UsingValue,
+ UsingEnum,
};
typedef uint16_t SymbolPropertySet;
diff --git a/clang/include/clang/Index/IndexingOptions.h b/clang/include/clang/Index/IndexingOptions.h
index 9f5c03d1b3b9..d19653848d59 100644
--- a/clang/include/clang/Index/IndexingOptions.h
+++ b/clang/include/clang/Index/IndexingOptions.h
@@ -28,6 +28,7 @@ struct IndexingOptions {
SystemSymbolFilterKind::DeclarationsOnly;
bool IndexFunctionLocals = false;
bool IndexImplicitInstantiation = false;
+ bool IndexMacros = true;
// Whether to index macro definitions in the Preprocesor when preprocessor
// callback is not available (e.g. after parsing has finished). Note that
// macro references are not available in Proprocessor.
diff --git a/clang/include/clang/Interpreter/Interpreter.h b/clang/include/clang/Interpreter/Interpreter.h
new file mode 100644
index 000000000000..020cbe2db3d0
--- /dev/null
+++ b/clang/include/clang/Interpreter/Interpreter.h
@@ -0,0 +1,71 @@
+//===--- Interpreter.h - Incremental Compilation and Execution---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the component which performs incremental code
+// compilation and execution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_INTERPRETER_H
+#define LLVM_CLANG_INTERPRETER_INTERPRETER_H
+
+#include "clang/Interpreter/PartialTranslationUnit.h"
+
+#include "llvm/Support/Error.h"
+
+#include <memory>
+#include <vector>
+
+namespace llvm {
+namespace orc {
+class ThreadSafeContext;
+}
+class Module;
+} // namespace llvm
+
+namespace clang {
+
+class CompilerInstance;
+class DeclGroupRef;
+class IncrementalExecutor;
+class IncrementalParser;
+
+/// Create a pre-configured \c CompilerInstance for incremental processing.
+class IncrementalCompilerBuilder {
+public:
+ static llvm::Expected<std::unique_ptr<CompilerInstance>>
+ create(std::vector<const char *> &ClangArgv);
+};
+
+/// Provides top-level interfaces for incremental compilation and execution.
+class Interpreter {
+ std::unique_ptr<llvm::orc::ThreadSafeContext> TSCtx;
+ std::unique_ptr<IncrementalParser> IncrParser;
+ std::unique_ptr<IncrementalExecutor> IncrExecutor;
+
+ Interpreter(std::unique_ptr<CompilerInstance> CI, llvm::Error &Err);
+
+public:
+ ~Interpreter();
+ static llvm::Expected<std::unique_ptr<Interpreter>>
+ create(std::unique_ptr<CompilerInstance> CI);
+ const CompilerInstance *getCompilerInstance() const;
+ llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Code);
+ llvm::Error Execute(PartialTranslationUnit &T);
+ llvm::Error ParseAndExecute(llvm::StringRef Code) {
+ auto PTU = Parse(Code);
+ if (!PTU)
+ return PTU.takeError();
+ if (PTU->TheModule)
+ return Execute(*PTU);
+ return llvm::Error::success();
+ }
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_INTERPRETER_INTERPRETER_H
diff --git a/clang/include/clang/Interpreter/PartialTranslationUnit.h b/clang/include/clang/Interpreter/PartialTranslationUnit.h
new file mode 100644
index 000000000000..bf91d559452b
--- /dev/null
+++ b/clang/include/clang/Interpreter/PartialTranslationUnit.h
@@ -0,0 +1,37 @@
+//===--- Transaction.h - Incremental Compilation and Execution---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utilities tracking the incrementally processed pieces of
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INTERPRETER_PARTIALTRANSLATIONUNIT_H
+#define LLVM_CLANG_INTERPRETER_PARTIALTRANSLATIONUNIT_H
+
+#include <memory>
+
+namespace llvm {
+class Module;
+}
+
+namespace clang {
+
+class TranslationUnitDecl;
+
+/// The class keeps track of various objects created as part of processing
+/// incremental inputs.
+struct PartialTranslationUnit {
+ TranslationUnitDecl *TUPart = nullptr;
+
+ /// The llvm IR produced for the input.
+ std::unique_ptr<llvm::Module> TheModule;
+};
+} // namespace clang
+
+#endif // LLVM_CLANG_INTERPRETER_PARTIALTRANSLATIONUNIT_H
diff --git a/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h b/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
index d832df6b6146..9bb820156c25 100644
--- a/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
+++ b/clang/include/clang/Lex/DependencyDirectivesSourceMinimizer.h
@@ -44,6 +44,8 @@ enum TokenKind {
pp_ifdef,
pp_ifndef,
pp_elif,
+ pp_elifdef,
+ pp_elifndef,
pp_else,
pp_endif,
decl_at_import,
diff --git a/clang/include/clang/Lex/HeaderMap.h b/clang/include/clang/Lex/HeaderMap.h
index accb061e51ba..53108b00bd16 100644
--- a/clang/include/clang/Lex/HeaderMap.h
+++ b/clang/include/clang/Lex/HeaderMap.h
@@ -16,6 +16,7 @@
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MemoryBuffer.h"
#include <memory>
@@ -29,6 +30,7 @@ struct HMapHeader;
class HeaderMapImpl {
std::unique_ptr<const llvm::MemoryBuffer> FileBuffer;
bool NeedsBSwap;
+ mutable llvm::StringMap<StringRef> ReverseMap;
public:
HeaderMapImpl(std::unique_ptr<const llvm::MemoryBuffer> File, bool NeedsBSwap)
@@ -48,6 +50,9 @@ public:
/// Print the contents of this headermap to stderr.
void dump() const;
+ /// Return key for specifed path.
+ StringRef reverseLookupFilename(StringRef DestPath) const;
+
private:
unsigned getEndianAdjustedWord(unsigned X) const;
const HMapHeader &getHeader() const;
@@ -79,9 +84,10 @@ public:
/// ".." and a filename "../file.h" this would be "../../file.h".
Optional<FileEntryRef> LookupFile(StringRef Filename, FileManager &FM) const;
- using HeaderMapImpl::lookupFilename;
- using HeaderMapImpl::getFileName;
using HeaderMapImpl::dump;
+ using HeaderMapImpl::getFileName;
+ using HeaderMapImpl::lookupFilename;
+ using HeaderMapImpl::reverseLookupFilename;
};
} // end namespace clang.
diff --git a/clang/include/clang/Lex/LiteralSupport.h b/clang/include/clang/Lex/LiteralSupport.h
index 0c4f0fe277b7..f131f045a73e 100644
--- a/clang/include/clang/Lex/LiteralSupport.h
+++ b/clang/include/clang/Lex/LiteralSupport.h
@@ -63,6 +63,7 @@ public:
bool isUnsigned : 1;
bool isLong : 1; // This is *not* set for long long.
bool isLongLong : 1;
+ bool isSizeT : 1; // 1z, 1uz (C++2b)
bool isHalf : 1; // 1.0h
bool isFloat : 1; // 1.0f
bool isImaginary : 1; // 1.0i
diff --git a/clang/include/clang/Lex/MacroInfo.h b/clang/include/clang/Lex/MacroInfo.h
index 550abf35c841..0347a7a37186 100644
--- a/clang/include/clang/Lex/MacroInfo.h
+++ b/clang/include/clang/Lex/MacroInfo.h
@@ -521,7 +521,7 @@ public:
}
static void Profile(llvm::FoldingSetNodeID &ID, Module *OwningModule,
- IdentifierInfo *II) {
+ const IdentifierInfo *II) {
ID.AddPointer(OwningModule);
ID.AddPointer(II);
}
diff --git a/clang/include/clang/Lex/ModuleLoader.h b/clang/include/clang/Lex/ModuleLoader.h
index c1f7f068c0f1..bf044e0e5f50 100644
--- a/clang/include/clang/Lex/ModuleLoader.h
+++ b/clang/include/clang/Lex/ModuleLoader.h
@@ -45,9 +45,6 @@ public:
// The module exists but cannot be imported due to a configuration mismatch.
ConfigMismatch,
-
- // We failed to load the module, but we shouldn't cache the failure.
- OtherUncachedFailure,
};
llvm::PointerIntPair<Module *, 2, LoadResultKind> Storage;
diff --git a/clang/include/clang/Lex/ModuleMap.h b/clang/include/clang/Lex/ModuleMap.h
index 6827408f10a3..41f85a1f572d 100644
--- a/clang/include/clang/Lex/ModuleMap.h
+++ b/clang/include/clang/Lex/ModuleMap.h
@@ -14,7 +14,6 @@
#ifndef LLVM_CLANG_LEX_MODULEMAP_H
#define LLVM_CLANG_LEX_MODULEMAP_H
-#include "clang/Basic/FileEntry.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
@@ -38,6 +37,7 @@ namespace clang {
class DiagnosticsEngine;
class DirectoryEntry;
+class FileEntry;
class FileManager;
class HeaderSearch;
class SourceManager;
@@ -648,13 +648,15 @@ public:
/// Sets the umbrella header of the given module to the given
/// header.
- void setUmbrellaHeader(Module *Mod, FileEntryRef UmbrellaHeader,
- Twine NameAsWritten);
+ void setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
/// Sets the umbrella directory of the given module to the given
/// directory.
- void setUmbrellaDir(Module *Mod, DirectoryEntryRef UmbrellaDir,
- Twine NameAsWritten);
+ void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory);
/// Adds this header to the given module.
/// \param Role The role of the header wrt the module.
@@ -696,6 +698,9 @@ public:
module_iterator module_begin() const { return Modules.begin(); }
module_iterator module_end() const { return Modules.end(); }
+ llvm::iterator_range<module_iterator> modules() const {
+ return {module_begin(), module_end()};
+ }
/// Cache a module load. M might be nullptr.
void cacheModuleLoad(const IdentifierInfo &II, Module *M) {
diff --git a/clang/include/clang/Lex/PPCallbacks.h b/clang/include/clang/Lex/PPCallbacks.h
index de5e8eb2ca22..bcf49c577735 100644
--- a/clang/include/clang/Lex/PPCallbacks.h
+++ b/clang/include/clang/Lex/PPCallbacks.h
@@ -191,6 +191,10 @@ public:
StringRef Str) {
}
+ /// Callback invoked when a \#pragma mark comment is read.
+ virtual void PragmaMark(SourceLocation Loc, StringRef Trivia) {
+ }
+
/// Callback invoked when a \#pragma detect_mismatch directive is
/// read.
virtual void PragmaDetectMismatch(SourceLocation Loc, StringRef Name,
@@ -351,6 +355,22 @@ public:
const MacroDefinition &MD) {
}
+ /// Hook called whenever an \#elifdef branch is taken.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefinition if the name was a macro, null otherwise.
+ virtual void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ }
+ /// Hook called whenever an \#elifdef is skipped.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void Elifdef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ }
+
/// Hook called whenever an \#ifndef is seen.
/// \param Loc the source location of the directive.
/// \param MacroNameTok Information on the token being tested.
@@ -359,6 +379,22 @@ public:
const MacroDefinition &MD) {
}
+ /// Hook called whenever an \#elifndef branch is taken.
+ /// \param Loc the source location of the directive.
+ /// \param MacroNameTok Information on the token being tested.
+ /// \param MD The MacroDefinition if the name was a macro, null otherwise.
+ virtual void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ }
+ /// Hook called whenever an \#elifndef is skipped.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void Elifndef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ }
+
/// Hook called whenever an \#else is seen.
/// \param Loc the source location of the directive.
/// \param IfLoc the source location of the \#if/\#ifdef/\#ifndef directive.
@@ -586,6 +622,19 @@ public:
Second->Ifdef(Loc, MacroNameTok, MD);
}
+ /// Hook called whenever an \#elifdef is taken.
+ void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ First->Elifdef(Loc, MacroNameTok, MD);
+ Second->Elifdef(Loc, MacroNameTok, MD);
+ }
+ /// Hook called whenever an \#elifdef is skipped.
+ void Elifdef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override {
+ First->Elifdef(Loc, ConditionRange, IfLoc);
+ Second->Elifdef(Loc, ConditionRange, IfLoc);
+ }
+
/// Hook called whenever an \#ifndef is seen.
void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override {
@@ -593,6 +642,19 @@ public:
Second->Ifndef(Loc, MacroNameTok, MD);
}
+ /// Hook called whenever an \#elifndef is taken.
+ void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ First->Elifndef(Loc, MacroNameTok, MD);
+ Second->Elifndef(Loc, MacroNameTok, MD);
+ }
+ /// Hook called whenever an \#elifndef is skipped.
+ void Elifndef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override {
+ First->Elifndef(Loc, ConditionRange, IfLoc);
+ Second->Elifndef(Loc, ConditionRange, IfLoc);
+ }
+
/// Hook called whenever an \#else is seen.
void Else(SourceLocation Loc, SourceLocation IfLoc) override {
First->Else(Loc, IfLoc);
diff --git a/clang/include/clang/Lex/PPConditionalDirectiveRecord.h b/clang/include/clang/Lex/PPConditionalDirectiveRecord.h
index 077437435303..d8c556ae2531 100644
--- a/clang/include/clang/Lex/PPConditionalDirectiveRecord.h
+++ b/clang/include/clang/Lex/PPConditionalDirectiveRecord.h
@@ -93,6 +93,14 @@ private:
const MacroDefinition &MD) override;
void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override;
+ void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override;
+ void Elifdef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override;
+ void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override;
+ void Elifndef(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) override;
void Else(SourceLocation Loc, SourceLocation IfLoc) override;
void Endif(SourceLocation Loc, SourceLocation IfLoc) override;
};
diff --git a/clang/include/clang/Lex/PreprocessingRecord.h b/clang/include/clang/Lex/PreprocessingRecord.h
index 11607811dc8f..0137d871e916 100644
--- a/clang/include/clang/Lex/PreprocessingRecord.h
+++ b/clang/include/clang/Lex/PreprocessingRecord.h
@@ -539,6 +539,13 @@ class Token;
void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) override;
+ using PPCallbacks::Elifdef;
+ using PPCallbacks::Elifndef;
+ void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override;
+ void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override;
+
/// Hook called whenever the 'defined' operator is seen.
void Defined(const Token &MacroNameTok, const MacroDefinition &MD,
SourceRange Range) override;
diff --git a/clang/include/clang/Lex/Preprocessor.h b/clang/include/clang/Lex/Preprocessor.h
index 68139cb24b31..7ab13640ce2c 100644
--- a/clang/include/clang/Lex/Preprocessor.h
+++ b/clang/include/clang/Lex/Preprocessor.h
@@ -264,9 +264,11 @@ class Preprocessor {
/// avoid tearing the Lexer and etc. down).
bool IncrementalProcessing = false;
+public:
/// The kind of translation unit we are processing.
- TranslationUnitKind TUKind;
+ const TranslationUnitKind TUKind;
+private:
/// The code-completion handler.
CodeCompletionHandler *CodeComplete = nullptr;
@@ -781,8 +783,7 @@ private:
/// deserializing from PCH, we don't need to deserialize identifier & macros
/// just so that we can report that they are unused, we just warn using
/// the SourceLocations of this set (that will be filled by the ASTReader).
- /// We are using SmallPtrSet instead of a vector for faster removal.
- using WarnUnusedMacroLocsTy = llvm::SmallPtrSet<SourceLocation, 32>;
+ using WarnUnusedMacroLocsTy = llvm::SmallDenseSet<SourceLocation, 32>;
WarnUnusedMacroLocsTy WarnUnusedMacroLocs;
/// A "freelist" of MacroArg objects that can be
@@ -1151,7 +1152,7 @@ public:
/// Register an exported macro for a module and identifier.
ModuleMacro *addModuleMacro(Module *Mod, IdentifierInfo *II, MacroInfo *Macro,
ArrayRef<ModuleMacro *> Overrides, bool &IsNew);
- ModuleMacro *getModuleMacro(Module *Mod, IdentifierInfo *II);
+ ModuleMacro *getModuleMacro(Module *Mod, const IdentifierInfo *II);
/// Get the list of leaf (non-overridden) module macros for a name.
ArrayRef<ModuleMacro*> getLeafModuleMacros(const IdentifierInfo *II) const {
@@ -1163,6 +1164,11 @@ public:
return None;
}
+ /// Get the list of submodules that we're currently building.
+ ArrayRef<BuildingSubmoduleInfo> getBuildingSubmodules() const {
+ return BuildingSubmoduleStack;
+ }
+
/// \{
/// Iterators for the macro history table. Currently defined macros have
/// IdentifierInfo::hasMacroDefinition() set and an empty
@@ -2352,14 +2358,15 @@ private:
bool ReadAnyTokensBeforeDirective);
void HandleEndifDirective(Token &EndifToken);
void HandleElseDirective(Token &Result, const Token &HashToken);
- void HandleElifDirective(Token &ElifToken, const Token &HashToken);
+ void HandleElifFamilyDirective(Token &ElifToken, const Token &HashToken,
+ tok::PPKeywordKind Kind);
// Pragmas.
void HandlePragmaDirective(PragmaIntroducer Introducer);
public:
void HandlePragmaOnce(Token &OnceTok);
- void HandlePragmaMark();
+ void HandlePragmaMark(Token &MarkTok);
void HandlePragmaPoison();
void HandlePragmaSystemHeader(Token &SysHeaderTok);
void HandlePragmaDependency(Token &DependencyTok);
diff --git a/clang/include/clang/Lex/PreprocessorOptions.h b/clang/include/clang/Lex/PreprocessorOptions.h
index 7f024989bf9b..99085b98fc7a 100644
--- a/clang/include/clang/Lex/PreprocessorOptions.h
+++ b/clang/include/clang/Lex/PreprocessorOptions.h
@@ -106,6 +106,10 @@ public:
/// When true, a PCH with compiler errors will not be rejected.
bool AllowPCHWithCompilerErrors = false;
+ /// When true, a PCH with modules cache path different to the current
+ /// compilation will not be rejected.
+ bool AllowPCHWithDifferentModulesCachePath = false;
+
/// Dump declarations that are deserialized from PCH, for testing.
bool DumpDeserializedPCHDecls = false;
diff --git a/clang/include/clang/Lex/Token.h b/clang/include/clang/Lex/Token.h
index 89042a674fec..00fbe6d18f72 100644
--- a/clang/include/clang/Lex/Token.h
+++ b/clang/include/clang/Lex/Token.h
@@ -33,7 +33,7 @@ class IdentifierInfo;
/// information about the SourceRange of the tokens and the type object.
class Token {
/// The location of the token. This is actually a SourceLocation.
- unsigned Loc;
+ SourceLocation::UIntTy Loc;
// Conceptually these next two fields could be in a union. However, this
// causes gcc 4.2 to pessimize LexTokenInternal, a very performance critical
@@ -43,7 +43,7 @@ class Token {
/// UintData - This holds either the length of the token text, when
/// a normal token, or the end of the SourceRange when an annotation
/// token.
- unsigned UintData;
+ SourceLocation::UIntTy UintData;
/// PtrData - This is a union of four different pointer types, which depends
/// on what type of token this is:
diff --git a/clang/include/clang/Lex/VariadicMacroSupport.h b/clang/include/clang/Lex/VariadicMacroSupport.h
index 989e0ac703c9..119f02201fc6 100644
--- a/clang/include/clang/Lex/VariadicMacroSupport.h
+++ b/clang/include/clang/Lex/VariadicMacroSupport.h
@@ -39,17 +39,14 @@ namespace clang {
assert(Ident__VA_ARGS__->isPoisoned() && "__VA_ARGS__ should be poisoned "
"outside an ISO C/C++ variadic "
"macro definition!");
- assert(
- !Ident__VA_OPT__ ||
- (Ident__VA_OPT__->isPoisoned() && "__VA_OPT__ should be poisoned!"));
+ assert(Ident__VA_OPT__->isPoisoned() && "__VA_OPT__ should be poisoned!");
}
/// Client code should call this function just before the Preprocessor is
/// about to Lex tokens from the definition of a variadic (ISO C/C++) macro.
void enterScope() {
Ident__VA_ARGS__->setIsPoisoned(false);
- if (Ident__VA_OPT__)
- Ident__VA_OPT__->setIsPoisoned(false);
+ Ident__VA_OPT__->setIsPoisoned(false);
}
/// Client code should call this function as soon as the Preprocessor has
@@ -58,8 +55,7 @@ namespace clang {
/// (might be explicitly called, and then reinvoked via the destructor).
void exitScope() {
Ident__VA_ARGS__->setIsPoisoned(true);
- if (Ident__VA_OPT__)
- Ident__VA_OPT__->setIsPoisoned(true);
+ Ident__VA_OPT__->setIsPoisoned(true);
}
~VariadicMacroScopeGuard() { exitScope(); }
diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h
index 02aab3b43be0..8eb3f9029d9d 100644
--- a/clang/include/clang/Parse/Parser.h
+++ b/clang/include/clang/Parse/Parser.h
@@ -114,14 +114,17 @@ class Parser : public CodeCompletionHandler {
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
+ mutable IdentifierInfo *Ident_abstract;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
- /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
- /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
+ /// Ident_vector, Ident_bool, Ident_Bool - cached IdentifierInfos for "vector"
+ /// and "bool" fast comparison. Only present if AltiVec or ZVector are
+ /// enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
+ IdentifierInfo *Ident_Bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
@@ -879,6 +882,7 @@ private:
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
+ Tok.getIdentifierInfo() != Ident_Bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
@@ -941,8 +945,8 @@ private:
bool isActive;
public:
- explicit TentativeParsingAction(Parser& p) : P(p) {
- PrevPreferredType = P.PreferredType;
+ explicit TentativeParsingAction(Parser &p)
+ : P(p), PrevPreferredType(P.PreferredType) {
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
@@ -1572,27 +1576,6 @@ private:
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
- struct ParsedAttributesWithRange : ParsedAttributes {
- ParsedAttributesWithRange(AttributeFactory &factory)
- : ParsedAttributes(factory) {}
-
- void clear() {
- ParsedAttributes::clear();
- Range = SourceRange();
- }
-
- SourceRange Range;
- };
- struct ParsedAttributesViewWithRange : ParsedAttributesView {
- ParsedAttributesViewWithRange() : ParsedAttributesView() {}
- void clearListOnly() {
- ParsedAttributesView::clearListOnly();
- Range = SourceRange();
- }
-
- SourceRange Range;
- };
-
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
@@ -1818,6 +1801,7 @@ private:
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
+ ExprResult ParseSYCLUniqueStableNameExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
@@ -1991,7 +1975,8 @@ private:
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
- ForRangeInfo *FRI = nullptr);
+ ForRangeInfo *FRI = nullptr,
+ bool EnterForConditionScope = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines
@@ -2018,8 +2003,11 @@ private:
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
- ExprResult ParseInitializerWithPotentialDesignator(
- llvm::function_ref<void(const Designation &)> CodeCompleteCB);
+ struct DesignatorCompletionInfo {
+ SmallVectorImpl<Expr *> &InitExprs;
+ QualType PreferredBaseType;
+ };
+ ExprResult ParseInitializerWithPotentialDesignator(DesignatorCompletionInfo);
//===--------------------------------------------------------------------===//
// clang Expressions
@@ -2635,7 +2623,8 @@ private:
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
- unsigned DiagID);
+ unsigned DiagID,
+ bool DiagnoseEmptyAttrs = false);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
@@ -2646,6 +2635,10 @@ private:
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
+ /// Emit warnings for C++11 and C2x attributes that are in a position that
+ /// clang accepts as an extension.
+ void DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs);
+
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
@@ -2656,6 +2649,61 @@ private:
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
+ enum ParseAttrKindMask {
+ PAKM_GNU = 1 << 0,
+ PAKM_Declspec = 1 << 1,
+ PAKM_CXX11 = 1 << 2,
+ };
+
+ /// \brief Parse attributes based on what syntaxes are desired, allowing for
+ /// the order to vary. e.g. with PAKM_GNU | PAKM_Declspec:
+ /// __attribute__((...)) __declspec(...) __attribute__((...)))
+ /// Note that Microsoft attributes (spelled with single square brackets) are
+ /// not supported by this because of parsing ambiguities with other
+ /// constructs.
+ ///
+ /// There are some attribute parse orderings that should not be allowed in
+ /// arbitrary order. e.g.,
+ ///
+ /// [[]] __attribute__(()) int i; // OK
+ /// __attribute__(()) [[]] int i; // Not OK
+ ///
+ /// Such situations should use the specific attribute parsing functionality.
+ void ParseAttributes(unsigned WhichAttrKinds,
+ ParsedAttributesWithRange &Attrs,
+ SourceLocation *End = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr);
+ void ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
+ SourceLocation *End = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr) {
+ ParsedAttributesWithRange AttrsWithRange(AttrFactory);
+ ParseAttributes(WhichAttrKinds, AttrsWithRange, End, LateAttrs);
+ Attrs.takeAllFrom(AttrsWithRange);
+ }
+ /// \brief Possibly parse attributes based on what syntaxes are desired,
+ /// allowing for the order to vary.
+ bool MaybeParseAttributes(unsigned WhichAttrKinds,
+ ParsedAttributesWithRange &Attrs,
+ SourceLocation *End = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr) {
+ if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
+ (standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
+ ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
+ return true;
+ }
+ return false;
+ }
+ bool MaybeParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
+ SourceLocation *End = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr) {
+ if (Tok.isOneOf(tok::kw___attribute, tok::kw___declspec) ||
+ (standardAttributesAllowed() && isCXX11AttributeSpecifier())) {
+ ParseAttributes(WhichAttrKinds, Attrs, End, LateAttrs);
+ return true;
+ }
+ return false;
+ }
+
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
@@ -2665,14 +2713,50 @@ private:
D.takeAttributes(attrs, endLoc);
}
}
- void MaybeParseGNUAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc = nullptr,
+
+ /// Parses GNU-style attributes and returns them without source range
+ /// information.
+ ///
+ /// This API is discouraged. Use the version that takes a
+ /// ParsedAttributesWithRange instead.
+ bool MaybeParseGNUAttributes(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
- if (Tok.is(tok::kw___attribute))
- ParseGNUAttributes(attrs, endLoc, LateAttrs);
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributesWithRange AttrsWithRange(AttrFactory);
+ ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
+ Attrs.takeAllFrom(AttrsWithRange);
+ return true;
+ }
+ return false;
}
- void ParseGNUAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc = nullptr,
+
+ bool MaybeParseGNUAttributes(ParsedAttributesWithRange &Attrs,
+ SourceLocation *EndLoc = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr) {
+ if (Tok.is(tok::kw___attribute)) {
+ ParseGNUAttributes(Attrs, EndLoc, LateAttrs);
+ return true;
+ }
+ return false;
+ }
+
+ /// Parses GNU-style attributes and returns them without source range
+ /// information.
+ ///
+ /// This API is discouraged. Use the version that takes a
+ /// ParsedAttributesWithRange instead.
+ void ParseGNUAttributes(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc = nullptr,
+ LateParsedAttrList *LateAttrs = nullptr,
+ Declarator *D = nullptr) {
+ ParsedAttributesWithRange AttrsWithRange(AttrFactory);
+ ParseGNUAttributes(AttrsWithRange, EndLoc, LateAttrs, D);
+ Attrs.takeAllFrom(AttrsWithRange);
+ }
+
+ void ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
+ SourceLocation *EndLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
@@ -2688,6 +2772,16 @@ private:
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
+ void ReplayOpenMPAttributeTokens(CachedTokens &OpenMPTokens) {
+ // If parsing the attributes found an OpenMP directive, emit those tokens
+ // to the parse stream now.
+ if (!OpenMPTokens.empty()) {
+ PP.EnterToken(Tok, /*IsReinject*/ true);
+ PP.EnterTokenStream(OpenMPTokens, /*DisableMacroExpansion*/ true,
+ /*IsReinject*/ true);
+ ConsumeAnyToken(/*ConsumeCodeCompletionTok*/ true);
+ }
+ }
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
@@ -2706,16 +2800,29 @@ private:
}
return false;
}
- void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
+ bool MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
- isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
+ isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) {
ParseCXX11Attributes(attrs, endLoc);
+ return true;
+ }
+ return false;
}
- void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
- SourceLocation *EndLoc = nullptr);
+ void ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
+ CachedTokens &OpenMPTokens);
+
+ void ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
+ CachedTokens &OpenMPTokens,
+ SourceLocation *EndLoc = nullptr);
+ void ParseCXX11AttributeSpecifier(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc = nullptr) {
+ CachedTokens OpenMPTokens;
+ ParseCXX11AttributeSpecifierInternal(Attrs, OpenMPTokens, EndLoc);
+ ReplayOpenMPAttributeTokens(OpenMPTokens);
+ }
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
@@ -2724,7 +2831,8 @@ private:
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc);
+ SourceLocation ScopeLoc,
+ CachedTokens &OpenMPTokens);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
@@ -2736,11 +2844,14 @@ private:
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
- void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
+ bool MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
- if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
+ if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) {
ParseMicrosoftDeclSpecs(Attrs, End);
+ return true;
+ }
+ return false;
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
@@ -2754,17 +2865,6 @@ private:
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
- /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
- /// or higher.
- /// \return false if error happens.
- bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
- if (getLangOpts().OpenCL)
- return ParseOpenCLUnrollHintAttribute(Attrs);
- return true;
- }
- /// Parses opencl_unroll_hint attribute.
- /// \return false if error happens.
- bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
@@ -2839,6 +2939,7 @@ private:
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
+ bool isClassCompatibleKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
@@ -2985,6 +3086,7 @@ private:
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &Attrs,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
@@ -3111,10 +3213,12 @@ private:
/// Parse 'omp end assumes' directive.
void ParseOpenMPEndAssumesDirective(SourceLocation Loc);
- /// Parse clauses for '#pragma omp declare target'.
- DeclGroupPtrTy ParseOMPDeclareTargetClauses();
+ /// Parse clauses for '#pragma omp [begin] declare target'.
+ void ParseOMPDeclareTargetClauses(Sema::DeclareTargetContextInfo &DTCI);
+
/// Parse '#pragma omp end declare target'.
- void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+ void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
+ OpenMPDirectiveKind EndDKind,
SourceLocation Loc);
/// Skip tokens until a `annot_pragma_openmp_end` was found. Emit a warning if
@@ -3206,6 +3310,10 @@ private:
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
bool ParseOnly);
+
+ /// Parses the 'sizes' clause of a '#pragma omp tile' directive.
+ OMPClause *ParseOpenMPSizesClause();
+
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
@@ -3232,6 +3340,14 @@ private:
/// '(' { <allocator> [ '(' <allocator_traits> ')' ] }+ ')'
OMPClause *ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind);
+ /// Parses clause with an interop variable of kind \a Kind.
+ ///
+ /// \param Kind Kind of current clause.
+ /// \param ParseOnly true to skip the clause's semantic actions and return
+ /// nullptr.
+ //
+ OMPClause *ParseOpenMPInteropClause(OpenMPClauseKind Kind, bool ParseOnly);
+
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
diff --git a/clang/include/clang/Rewrite/Core/RewriteRope.h b/clang/include/clang/Rewrite/Core/RewriteRope.h
index 039927c48b08..8fa7af245eb8 100644
--- a/clang/include/clang/Rewrite/Core/RewriteRope.h
+++ b/clang/include/clang/Rewrite/Core/RewriteRope.h
@@ -83,8 +83,7 @@ namespace clang {
/// over bytes that are in a RopePieceBTree. This first iterates over bytes
/// in a RopePiece, then iterates over RopePiece's in a RopePieceBTreeLeaf,
/// then iterates over RopePieceBTreeLeaf's in a RopePieceBTree.
- class RopePieceBTreeIterator :
- public std::iterator<std::forward_iterator_tag, const char, ptrdiff_t> {
+ class RopePieceBTreeIterator {
/// CurNode - The current B+Tree node that we are inspecting.
const void /*RopePieceBTreeLeaf*/ *CurNode = nullptr;
@@ -96,6 +95,12 @@ namespace clang {
unsigned CurChar = 0;
public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = const char;
+ using difference_type = std::ptrdiff_t;
+ using pointer = value_type *;
+ using reference = value_type &;
+
RopePieceBTreeIterator() = default;
RopePieceBTreeIterator(const void /*RopePieceBTreeNode*/ *N);
diff --git a/clang/include/clang/Sema/AnalysisBasedWarnings.h b/clang/include/clang/Sema/AnalysisBasedWarnings.h
index e13fe955eaf4..49b69c585ff7 100644
--- a/clang/include/clang/Sema/AnalysisBasedWarnings.h
+++ b/clang/include/clang/Sema/AnalysisBasedWarnings.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_SEMA_ANALYSISBASEDWARNINGS_H
#include "llvm/ADT/DenseMap.h"
+#include <memory>
namespace clang {
@@ -47,6 +48,9 @@ private:
Sema &S;
Policy DefaultPolicy;
+ class InterProceduralData;
+ std::unique_ptr<InterProceduralData> IPData;
+
enum VisitFlag { NotVisited = 0, Visited = 1, Pending = 2 };
llvm::DenseMap<const FunctionDecl*, VisitFlag> VisitedFD;
@@ -88,6 +92,7 @@ private:
public:
AnalysisBasedWarnings(Sema &s);
+ ~AnalysisBasedWarnings();
void IssueWarnings(Policy P, FunctionScopeInfo *fscope,
const Decl *D, QualType BlockType);
@@ -97,6 +102,7 @@ public:
void PrintStats() const;
};
-}} // end namespace clang::sema
+} // namespace sema
+} // namespace clang
#endif
diff --git a/clang/include/clang/Sema/DeclSpec.h b/clang/include/clang/Sema/DeclSpec.h
index 3b16295941e4..423f4f4ee7b7 100644
--- a/clang/include/clang/Sema/DeclSpec.h
+++ b/clang/include/clang/Sema/DeclSpec.h
@@ -2620,7 +2620,8 @@ public:
VS_Final = 2,
VS_Sealed = 4,
// Represents the __final keyword, which is legal for gcc in pre-C++11 mode.
- VS_GNU_Final = 8
+ VS_GNU_Final = 8,
+ VS_Abstract = 16
};
VirtSpecifiers() : Specifiers(0), LastSpecifier(VS_None) { }
@@ -2636,6 +2637,7 @@ public:
bool isFinalSpecified() const { return Specifiers & (VS_Final | VS_Sealed | VS_GNU_Final); }
bool isFinalSpelledSealed() const { return Specifiers & VS_Sealed; }
SourceLocation getFinalLoc() const { return VS_finalLoc; }
+ SourceLocation getAbstractLoc() const { return VS_abstractLoc; }
void clear() { Specifiers = 0; }
@@ -2649,7 +2651,7 @@ private:
unsigned Specifiers;
Specifier LastSpecifier;
- SourceLocation VS_overrideLoc, VS_finalLoc;
+ SourceLocation VS_overrideLoc, VS_finalLoc, VS_abstractLoc;
SourceLocation FirstLocation;
SourceLocation LastLocation;
};
diff --git a/clang/include/clang/Sema/ExternalSemaSource.h b/clang/include/clang/Sema/ExternalSemaSource.h
index 2854b4893484..9c18aa1398d3 100644
--- a/clang/include/clang/Sema/ExternalSemaSource.h
+++ b/clang/include/clang/Sema/ExternalSemaSource.h
@@ -199,8 +199,8 @@ public:
/// and variable decls which may cause deferred diags. Note that this routine
/// may be invoked multiple times; the external source should take care not to
/// introduce the same declarations repeatedly.
- virtual void ReadDeclsToCheckForDeferredDiags(
- llvm::SmallVector<Decl *, 4> &Decls) {}
+ virtual void
+ ReadDeclsToCheckForDeferredDiags(llvm::SmallSetVector<Decl *, 4> &Decls) {}
/// \copydoc Sema::CorrectTypo
/// \note LookupKind must correspond to a valid Sema::LookupNameKind
diff --git a/clang/include/clang/Sema/Initialization.h b/clang/include/clang/Sema/Initialization.h
index dcdfa3c3cf64..8feb66995f95 100644
--- a/clang/include/clang/Sema/Initialization.h
+++ b/clang/include/clang/Sema/Initialization.h
@@ -187,8 +187,8 @@ private:
ObjCMethodDecl *MethodDecl;
/// When Kind == EK_Parameter, the ParmVarDecl, with the
- /// low bit indicating whether the parameter is "consumed".
- uintptr_t Parameter;
+ /// integer indicating whether the parameter is "consumed".
+ llvm::PointerIntPair<ParmVarDecl *, 1> Parameter;
/// When Kind == EK_Temporary or EK_CompoundLiteralInit, the type
/// source information for the temporary.
@@ -197,9 +197,9 @@ private:
struct LN LocAndNRVO;
/// When Kind == EK_Base, the base specifier that provides the
- /// base class. The lower bit specifies whether the base is an inherited
+ /// base class. The integer specifies whether the base is an inherited
/// virtual base.
- uintptr_t Base;
+ llvm::PointerIntPair<const CXXBaseSpecifier *, 1> Base;
/// When Kind == EK_ArrayElement, EK_VectorElement, or
/// EK_ComplexElement, the index of the array or vector element being
@@ -252,15 +252,14 @@ public:
/// Create the initialization entity for a parameter.
static InitializedEntity InitializeParameter(ASTContext &Context,
- const ParmVarDecl *Parm) {
+ ParmVarDecl *Parm) {
return InitializeParameter(Context, Parm, Parm->getType());
}
/// Create the initialization entity for a parameter, but use
/// another type.
- static InitializedEntity InitializeParameter(ASTContext &Context,
- const ParmVarDecl *Parm,
- QualType Type) {
+ static InitializedEntity
+ InitializeParameter(ASTContext &Context, ParmVarDecl *Parm, QualType Type) {
bool Consumed = (Context.getLangOpts().ObjCAutoRefCount &&
Parm->hasAttr<NSConsumedAttr>());
@@ -269,8 +268,7 @@ public:
Entity.Type =
Context.getVariableArrayDecayedType(Type.getUnqualifiedType());
Entity.Parent = nullptr;
- Entity.Parameter
- = (static_cast<uintptr_t>(Consumed) | reinterpret_cast<uintptr_t>(Parm));
+ Entity.Parameter = {Parm, Consumed};
return Entity;
}
@@ -283,7 +281,7 @@ public:
Entity.Kind = EK_Parameter;
Entity.Type = Context.getVariableArrayDecayedType(Type);
Entity.Parent = nullptr;
- Entity.Parameter = (Consumed);
+ Entity.Parameter = {nullptr, Consumed};
return Entity;
}
@@ -466,19 +464,19 @@ public:
/// parameter.
bool isParameterConsumed() const {
assert(isParameterKind() && "Not a parameter");
- return (Parameter & 1);
+ return Parameter.getInt();
}
/// Retrieve the base specifier.
const CXXBaseSpecifier *getBaseSpecifier() const {
assert(getKind() == EK_Base && "Not a base specifier");
- return reinterpret_cast<const CXXBaseSpecifier *>(Base & ~0x1);
+ return Base.getPointer();
}
/// Return whether the base is an inherited virtual base.
bool isInheritedVirtualBase() const {
assert(getKind() == EK_Base && "Not a base specifier");
- return Base & 0x1;
+ return Base.getInt();
}
/// Determine whether this is an array new with an unknown bound.
@@ -806,7 +804,7 @@ public:
SK_ResolveAddressOfOverloadedFunction,
/// Perform a derived-to-base cast, producing an rvalue.
- SK_CastDerivedToBaseRValue,
+ SK_CastDerivedToBasePRValue,
/// Perform a derived-to-base cast, producing an xvalue.
SK_CastDerivedToBaseXValue,
@@ -833,8 +831,8 @@ public:
/// function or via a constructor.
SK_UserConversion,
- /// Perform a qualification conversion, producing an rvalue.
- SK_QualificationConversionRValue,
+ /// Perform a qualification conversion, producing a prvalue.
+ SK_QualificationConversionPRValue,
/// Perform a qualification conversion, producing an xvalue.
SK_QualificationConversionXValue,
diff --git a/clang/include/clang/Sema/MultiplexExternalSemaSource.h b/clang/include/clang/Sema/MultiplexExternalSemaSource.h
index b54a6283d640..78658dcf990c 100644
--- a/clang/include/clang/Sema/MultiplexExternalSemaSource.h
+++ b/clang/include/clang/Sema/MultiplexExternalSemaSource.h
@@ -337,7 +337,7 @@ public:
/// may be invoked multiple times; the external source should take care not to
/// introduce the same declarations repeatedly.
void ReadDeclsToCheckForDeferredDiags(
- llvm::SmallVector<Decl *, 4> &Decls) override;
+ llvm::SmallSetVector<Decl *, 4> &Decls) override;
/// \copydoc ExternalSemaSource::CorrectTypo
/// \note Returns the first nonempty correction.
diff --git a/clang/include/clang/Sema/Overload.h b/clang/include/clang/Sema/Overload.h
index 5be6a618711c..82661cb3d12a 100644
--- a/clang/include/clang/Sema/Overload.h
+++ b/clang/include/clang/Sema/Overload.h
@@ -760,9 +760,6 @@ class Sema;
/// This candidate was not viable because its address could not be taken.
ovl_fail_addr_not_available,
- /// This candidate was not viable because its OpenCL extension is disabled.
- ovl_fail_ext_disabled,
-
/// This inherited constructor is not viable because it would slice the
/// argument.
ovl_fail_inhctor_slice,
@@ -1051,9 +1048,6 @@ class Sema;
void destroyCandidates();
- /// Whether diagnostics should be deferred.
- bool shouldDeferDiags(Sema &S, ArrayRef<Expr *> Args, SourceLocation OpLoc);
-
public:
OverloadCandidateSet(SourceLocation Loc, CandidateSetKind CSK,
OperatorRewriteInfo RewriteInfo = {})
@@ -1066,6 +1060,9 @@ class Sema;
CandidateSetKind getKind() const { return Kind; }
OperatorRewriteInfo getRewriteInfo() const { return RewriteInfo; }
+ /// Whether diagnostics should be deferred.
+ bool shouldDeferDiags(Sema &S, ArrayRef<Expr *> Args, SourceLocation OpLoc);
+
/// Determine when this overload candidate will be new to the
/// overload set.
bool isNewCandidate(Decl *F, OverloadCandidateParamOrder PO =
diff --git a/clang/include/clang/Sema/ParsedAttr.h b/clang/include/clang/Sema/ParsedAttr.h
index 43c21faaece9..f47f557adeb1 100644
--- a/clang/include/clang/Sema/ParsedAttr.h
+++ b/clang/include/clang/Sema/ParsedAttr.h
@@ -39,6 +39,7 @@ class IdentifierInfo;
class LangOptions;
class ParsedAttr;
class Sema;
+class Stmt;
class TargetInfo;
struct ParsedAttrInfo {
@@ -80,6 +81,17 @@ struct ParsedAttrInfo {
const Decl *D) const {
return true;
}
+ /// Check if this attribute appertains to St, and issue a diagnostic if not.
+ virtual bool diagAppertainsToStmt(Sema &S, const ParsedAttr &Attr,
+ const Stmt *St) const {
+ return true;
+ }
+ /// Check if the given attribute is mutually exclusive with other attributes
+ /// already applied to the given declaration.
+ virtual bool diagMutualExclusion(Sema &S, const ParsedAttr &A,
+ const Decl *D) const {
+ return true;
+ }
/// Check if this attribute is allowed by the language we are compiling, and
/// issue a diagnostic if not.
virtual bool diagLangOpts(Sema &S, const ParsedAttr &Attr) const {
@@ -573,6 +585,16 @@ public:
return MacroExpansionLoc;
}
+ /// Check if the attribute has exactly as many args as Num. May output an
+ /// error. Returns false if a diagnostic is produced.
+ bool checkExactlyNumArgs(class Sema &S, unsigned Num) const;
+ /// Check if the attribute has at least as many args as Num. May output an
+ /// error. Returns false if a diagnostic is produced.
+ bool checkAtLeastNumArgs(class Sema &S, unsigned Num) const;
+ /// Check if the attribute has at most as many args as Num. May output an
+ /// error. Returns false if a diagnostic is produced.
+ bool checkAtMostNumArgs(class Sema &S, unsigned Num) const;
+
bool isTargetSpecificAttr() const;
bool isTypeAttr() const;
bool isStmtAttr() const;
@@ -582,6 +604,14 @@ public:
unsigned getMaxArgs() const;
bool hasVariadicArg() const;
bool diagnoseAppertainsTo(class Sema &S, const Decl *D) const;
+ bool diagnoseAppertainsTo(class Sema &S, const Stmt *St) const;
+ bool diagnoseMutualExclusion(class Sema &S, const Decl *D) const;
+ // This function stub exists for parity with the declaration checking code so
+ // that checkCommonAttributeFeatures() can work generically on declarations
+ // or statements.
+ bool diagnoseMutualExclusion(class Sema &S, const Stmt *St) const {
+ return true;
+ }
bool appliesToDecl(const Decl *D, attr::SubjectMatchRule MatchRule) const;
void getMatchRules(const LangOptions &LangOpts,
SmallVectorImpl<std::pair<attr::SubjectMatchRule, bool>>
@@ -598,8 +628,8 @@ public:
/// a Spelling enumeration, the value UINT_MAX is returned.
unsigned getSemanticSpelling() const;
- /// If this is an OpenCL addr space attribute returns its representation
- /// in LangAS, otherwise returns default addr space.
+ /// If this is an OpenCL address space attribute returns its representation
+ /// in LangAS, otherwise returns default address space.
LangAS asOpenCLLangAS() const {
switch (getParsedKind()) {
case ParsedAttr::AT_OpenCLConstantAddressSpace:
@@ -621,6 +651,26 @@ public:
}
}
+ /// If this is an OpenCL address space attribute returns its SYCL
+ /// representation in LangAS, otherwise returns default address space.
+ LangAS asSYCLLangAS() const {
+ switch (getKind()) {
+ case ParsedAttr::AT_OpenCLGlobalAddressSpace:
+ return LangAS::sycl_global;
+ case ParsedAttr::AT_OpenCLGlobalDeviceAddressSpace:
+ return LangAS::sycl_global_device;
+ case ParsedAttr::AT_OpenCLGlobalHostAddressSpace:
+ return LangAS::sycl_global_host;
+ case ParsedAttr::AT_OpenCLLocalAddressSpace:
+ return LangAS::sycl_local;
+ case ParsedAttr::AT_OpenCLPrivateAddressSpace:
+ return LangAS::sycl_private;
+ case ParsedAttr::AT_OpenCLGenericAddressSpace:
+ default:
+ return LangAS::Default;
+ }
+ }
+
AttributeCommonInfo::Kind getKind() const {
return AttributeCommonInfo::Kind(Info.AttrKind);
}
@@ -1017,6 +1067,27 @@ private:
mutable AttributePool pool;
};
+struct ParsedAttributesWithRange : ParsedAttributes {
+ ParsedAttributesWithRange(AttributeFactory &factory)
+ : ParsedAttributes(factory) {}
+
+ void clear() {
+ ParsedAttributes::clear();
+ Range = SourceRange();
+ }
+
+ SourceRange Range;
+};
+struct ParsedAttributesViewWithRange : ParsedAttributesView {
+ ParsedAttributesViewWithRange() : ParsedAttributesView() {}
+ void clearListOnly() {
+ ParsedAttributesView::clearListOnly();
+ Range = SourceRange();
+ }
+
+ SourceRange Range;
+};
+
/// These constants match the enumerated choices of
/// err_attribute_argument_n_type and err_attribute_argument_type.
enum AttributeArgumentNType {
diff --git a/clang/include/clang/Sema/Scope.h b/clang/include/clang/Sema/Scope.h
index b7260f15fe1b..b499ba1e7c2a 100644
--- a/clang/include/clang/Sema/Scope.h
+++ b/clang/include/clang/Sema/Scope.h
@@ -129,11 +129,17 @@ public:
/// This is a compound statement scope.
CompoundStmtScope = 0x400000,
- /// We are between inheritance colon and the real class/struct definition scope.
+ /// We are between inheritance colon and the real class/struct definition
+ /// scope.
ClassInheritanceScope = 0x800000,
/// This is the scope of a C++ catch statement.
CatchScope = 0x1000000,
+
+ /// This is a scope in which a condition variable is currently being
+ /// parsed. If such a scope is a ContinueScope, it's invalid to jump to the
+ /// continue block from here.
+ ConditionVarScope = 0x2000000,
};
private:
@@ -247,6 +253,17 @@ public:
return const_cast<Scope*>(this)->getContinueParent();
}
+ // Set whether we're in the scope of a condition variable, where 'continue'
+ // is disallowed despite being a continue scope.
+ void setIsConditionVarScope(bool InConditionVarScope) {
+ Flags = (Flags & ~ConditionVarScope) |
+ (InConditionVarScope ? ConditionVarScope : 0);
+ }
+
+ bool isConditionVarScope() const {
+ return Flags & ConditionVarScope;
+ }
+
/// getBreakParent - Return the closest scope that a break statement
/// would be affected by.
Scope *getBreakParent() {
diff --git a/clang/include/clang/Sema/ScopeInfo.h b/clang/include/clang/Sema/ScopeInfo.h
index 8ec74cafeeca..98ed75acd9d2 100644
--- a/clang/include/clang/Sema/ScopeInfo.h
+++ b/clang/include/clang/Sema/ScopeInfo.h
@@ -118,6 +118,10 @@ public:
/// Whether this function contains any indirect gotos.
bool HasIndirectGoto : 1;
+ /// Whether this function contains any statement marked with
+ /// \c [[clang::musttail]].
+ bool HasMustTail : 1;
+
/// Whether a statement was dropped because it was invalid.
bool HasDroppedStmt : 1;
@@ -370,14 +374,13 @@ protected:
public:
FunctionScopeInfo(DiagnosticsEngine &Diag)
: Kind(SK_Function), HasBranchProtectedScope(false),
- HasBranchIntoScope(false), HasIndirectGoto(false),
+ HasBranchIntoScope(false), HasIndirectGoto(false), HasMustTail(false),
HasDroppedStmt(false), HasOMPDeclareReductionCombiner(false),
HasFallthroughStmt(false), UsesFPIntrin(false),
- HasPotentialAvailabilityViolations(false),
- ObjCShouldCallSuper(false), ObjCIsDesignatedInit(false),
- ObjCWarnForNoDesignatedInitChain(false), ObjCIsSecondaryInit(false),
- ObjCWarnForNoInitDelegation(false), NeedsCoroutineSuspends(true),
- ErrorTrap(Diag) {}
+ HasPotentialAvailabilityViolations(false), ObjCShouldCallSuper(false),
+ ObjCIsDesignatedInit(false), ObjCWarnForNoDesignatedInitChain(false),
+ ObjCIsSecondaryInit(false), ObjCWarnForNoInitDelegation(false),
+ NeedsCoroutineSuspends(true), ErrorTrap(Diag) {}
virtual ~FunctionScopeInfo();
@@ -423,6 +426,8 @@ public:
HasIndirectGoto = true;
}
+ void setHasMustTail() { HasMustTail = true; }
+
void setHasDroppedStmt() {
HasDroppedStmt = true;
}
@@ -450,9 +455,8 @@ public:
}
bool NeedsScopeChecking() const {
- return !HasDroppedStmt &&
- (HasIndirectGoto ||
- (HasBranchProtectedScope && HasBranchIntoScope));
+ return !HasDroppedStmt && (HasIndirectGoto || HasMustTail ||
+ (HasBranchProtectedScope && HasBranchIntoScope));
}
// Add a block introduced in this function.
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 7f7c84eb1b1d..83a2d132bf6a 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -32,9 +32,12 @@
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtOpenMP.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
@@ -286,24 +289,29 @@ public:
}
};
-/// Keeps track of expected type during expression parsing. The type is tied to
-/// a particular token, all functions that update or consume the type take a
-/// start location of the token they are looking at as a parameter. This allows
-/// to avoid updating the type on hot paths in the parser.
+/// Tracks expected type during expression parsing, for use in code completion.
+/// The type is tied to a particular token, all functions that update or consume
+/// the type take a start location of the token they are looking at as a
+/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
- PreferredTypeBuilder() = default;
- explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
+ PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
+ /// Handles e.g. BaseType{ .D = Tok...
+ void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
+ const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
+ ///
+ /// The callback should also emit signature help as a side-effect, but only
+ /// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
@@ -316,8 +324,14 @@ public:
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
+ /// Get the expected type associated with this location, if any.
+ ///
+ /// If the location is a function argument, determining the expected type
+ /// involves considering all function overloads and the arguments so far.
+ /// In this case, signature help for these function overloads will be reported
+ /// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
- if (Tok != ExpectedLoc)
+ if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
@@ -327,6 +341,7 @@ public:
}
private:
+ bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
@@ -341,9 +356,6 @@ class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
- /// A key method to reduce duplicate debug info from Sema.
- virtual void anchor();
-
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
@@ -902,6 +914,10 @@ public:
OpaqueParser = P;
}
+ // Does the work necessary to deal with a SYCL kernel lambda. At the moment,
+ // this just marks the list of lambdas required to name the kernel.
+ void AddSYCLKernelLambda(const FunctionDecl *FD);
+
class DelayedDiagnostics;
class DelayedDiagnosticsState {
@@ -1362,7 +1378,7 @@ public:
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
- TranslationUnitKind TUKind;
+ const TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
@@ -1505,6 +1521,13 @@ public:
bool WarnedStackExhausted = false;
+ /// Increment when we find a reference; decrement when we find an ignored
+ /// assignment. Ultimately the value is 0 if every reference is an ignored
+ /// assignment.
+ llvm::DenseMap<const VarDecl *, int> RefsMinusAssignments;
+
+ Optional<std::unique_ptr<DarwinSDKInfo>> CachedDarwinSDKInfo;
+
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
@@ -1515,6 +1538,13 @@ public:
/// initialized but before it parses anything.
void Initialize();
+ /// This virtual key function only exists to limit the emission of debug info
+ /// describing the Sema class. GCC and Clang only emit debug info for a class
+ /// with a vtable when the vtable is emitted. Sema is final and not
+ /// polymorphic, but the debug info size savings are so significant that it is
+ /// worth adding a vtable just to take advantage of this optimization.
+ virtual void anchor();
+
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
@@ -1526,6 +1556,8 @@ public:
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
+ DarwinSDKInfo *getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
+ StringRef Platform);
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
@@ -1743,6 +1775,22 @@ public:
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
+ /// Whether deferrable diagnostics should be deferred.
+ bool DeferDiags = false;
+
+ /// RAII class to control scope of DeferDiags.
+ class DeferDiagsRAII {
+ Sema &S;
+ bool SavedDeferDiags = false;
+
+ public:
+ DeferDiagsRAII(Sema &S, bool DeferDiags)
+ : S(S), SavedDeferDiags(S.DeferDiags) {
+ S.DeferDiags = DeferDiags;
+ }
+ ~DeferDiagsRAII() { S.DeferDiags = SavedDeferDiags; }
+ };
+
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
@@ -1770,7 +1818,7 @@ public:
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
- SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
+ llvm::SmallSetVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
@@ -1836,6 +1884,7 @@ public:
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
+ void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
@@ -1865,6 +1914,10 @@ public:
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
+ /// Retrieve the current function, if any, that should be analyzed for
+ /// potential availability violations.
+ sema::FunctionScopeInfo *getCurFunctionAvailabilityContext();
+
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
@@ -2281,6 +2334,7 @@ public:
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
+ QualType getDecltypeForParenthesizedExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
@@ -2572,10 +2626,15 @@ public:
SourceLocation Less,
SourceLocation Greater);
+ void warnOnReservedIdentifier(const NamedDecl *D);
+
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
+ bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
+ QualType &T, SourceLocation Loc,
+ unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
@@ -2595,6 +2654,8 @@ public:
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
+ NamedDecl *getShadowedDeclaration(const BindingDecl *D,
+ const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
@@ -2688,8 +2749,7 @@ public:
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
- ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param,
- Expr *DefaultArg,
+ ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
@@ -3115,6 +3175,7 @@ public:
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
+ bool IsAbstract,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
@@ -3231,6 +3292,9 @@ public:
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
+ /// Merge availability attributes for an implementation of
+ /// an optional protocol requirement.
+ AMK_OptionalProtocolImplementation
};
/// Describes the kind of priority given to an availability attribute.
@@ -3290,12 +3354,6 @@ public:
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
- NoSpeculativeLoadHardeningAttr *
- mergeNoSpeculativeLoadHardeningAttr(Decl *D,
- const NoSpeculativeLoadHardeningAttr &AL);
- SpeculativeLoadHardeningAttr *
- mergeSpeculativeLoadHardeningAttr(Decl *D,
- const SpeculativeLoadHardeningAttr &AL);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
@@ -3303,8 +3361,6 @@ public:
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
- CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
- CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
@@ -3421,12 +3477,6 @@ public:
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
- ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
- const VarDecl *NRVOCandidate,
- QualType ResultType,
- Expr *Value,
- bool AllowNRVO = true);
-
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
@@ -3458,7 +3508,6 @@ public:
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
- CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
@@ -4094,7 +4143,8 @@ public:
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
- bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
+ bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id,
+ bool IsUDSuffix);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
@@ -4246,6 +4296,13 @@ public:
void checkUnusedDeclAttributes(Declarator &D);
+ /// Handles semantic checking for features that are common to all attributes,
+ /// such as checking whether a parameter was properly specified, or the
+ /// correct number of arguments were passed, etc. Returns true if the
+ /// attribute has been diagnosed.
+ bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
+ bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
+
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
@@ -4260,6 +4317,7 @@ public:
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
+ llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
@@ -4283,10 +4341,11 @@ public:
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
- /// Stmt attributes - this routine is the top level dispatcher.
- StmtResult ProcessStmtAttributes(Stmt *Stmt,
- const ParsedAttributesView &Attrs,
- SourceRange Range);
+ /// Process the attributes before creating an attributed statement. Returns
+ /// the semantic attributes that have been processed.
+ void ProcessStmtAttributes(Stmt *Stmt,
+ const ParsedAttributesWithRange &InAttrs,
+ SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
@@ -4625,8 +4684,9 @@ public:
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
- StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
- ArrayRef<const Attr*> Attrs,
+ StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
+ ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
+ StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
class ConditionResult;
@@ -4716,26 +4776,33 @@ public:
SourceLocation Loc,
unsigned NumParams);
- enum CopyElisionSemanticsKind {
- CES_Strict = 0,
- CES_AllowParameters = 1,
- CES_AllowDifferentTypes = 2,
- CES_AllowExceptionVariables = 4,
- CES_FormerDefault = (CES_AllowParameters),
- CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
- CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
- CES_AllowExceptionVariables),
+ struct NamedReturnInfo {
+ const VarDecl *Candidate;
+
+ enum Status : uint8_t { None, MoveEligible, MoveEligibleAndCopyElidable };
+ Status S;
+
+ bool isMoveEligible() const { return S != None; };
+ bool isCopyElidable() const { return S == MoveEligibleAndCopyElidable; }
};
+ enum class SimplerImplicitMoveMode { ForceOff, Normal, ForceOn };
+ NamedReturnInfo getNamedReturnInfo(
+ Expr *&E, SimplerImplicitMoveMode Mode = SimplerImplicitMoveMode::Normal);
+ NamedReturnInfo getNamedReturnInfo(const VarDecl *VD);
+ const VarDecl *getCopyElisionCandidate(NamedReturnInfo &Info,
+ QualType ReturnType);
- VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
- CopyElisionSemanticsKind CESK);
- bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
- CopyElisionSemanticsKind CESK);
+ ExprResult
+ PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const NamedReturnInfo &NRInfo, Expr *Value,
+ bool SupressSimplerImplicitMoves = false);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
- StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+ StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
+ NamedReturnInfo &NRInfo,
+ bool SupressSimplerImplicitMoves);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
@@ -4830,6 +4897,10 @@ public:
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
+ /// If VD is set but not otherwise used, diagnose, for a parameter or a
+ /// variable.
+ void DiagnoseUnusedButSetDecl(const VarDecl *VD);
+
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
@@ -5155,6 +5226,15 @@ public:
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
+ ExprResult BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ TypeSourceInfo *TSI);
+ ExprResult ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ ParsedType ParsedTy);
+
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
@@ -5352,6 +5432,8 @@ public:
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
+ Expr *BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
+ MultiExprArg CallArgs);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
@@ -5558,6 +5640,9 @@ public:
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
+ ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
@@ -5640,11 +5725,12 @@ public:
SourceLocation IdentLoc,
IdentifierInfo *Ident);
+ void FilterUsingLookup(Scope *S, LookupResult &lookup);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
- bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
+ bool CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
- UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
+ UsingShadowDecl *BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
@@ -5653,17 +5739,23 @@ public:
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
- bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
- bool HasTypename,
+ bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
- SourceLocation NameLoc);
+ SourceLocation NameLoc,
+ const LookupResult *R = nullptr,
+ const UsingDecl *UD = nullptr);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
- const ParsedAttributesView &AttrList, bool IsInstantiation);
+ const ParsedAttributesView &AttrList, bool IsInstantiation,
+ bool IsUsingIfExists);
+ NamedDecl *BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ SourceLocation EnumLoc,
+ SourceLocation NameLoc, EnumDecl *ED);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
@@ -5681,6 +5773,9 @@ public:
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
+ Decl *ActOnUsingEnumDeclaration(Scope *CurScope, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ SourceLocation EnumLoc, const DeclSpec &);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
@@ -5978,9 +6073,9 @@ public:
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
- MultiExprArg ArgsPtr,
+ QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
- SmallVectorImpl<Expr*> &ConvertedArgs,
+ SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
@@ -6004,6 +6099,12 @@ public:
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
+ // Checks that the vector type should be initialized from a scalar
+ // by splatting the value rather than populating a single element.
+ // This is the case for AltiVecVector types as well as with
+ // AltiVecPixel and AltiVecBool when -faltivec-src-compat=xl is specified.
+ bool ShouldSplatAltivecScalarInCast(const VectorType *VecTy);
+
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
@@ -6335,6 +6436,9 @@ public:
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
+ // Complete an enum decl, maybe without a scope spec.
+ bool RequireCompleteEnumDecl(EnumDecl *D, SourceLocation L,
+ CXXScopeSpec *SS = nullptr);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
@@ -6558,7 +6662,7 @@ public:
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
- Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None);
+ Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
@@ -7424,6 +7528,11 @@ public:
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
+ bool BuildTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstraint,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc,
+ bool AllowUnexpandedPack);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
@@ -10107,85 +10216,42 @@ public:
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
- // OpenCL extensions.
+ // OpenMP directives and clauses.
//
private:
- std::string CurrOpenCLExtension;
- /// Extensions required by an OpenCL type.
- llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
- /// Extensions required by an OpenCL declaration.
- llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
-public:
- llvm::StringRef getCurrentOpenCLExtension() const {
- return CurrOpenCLExtension;
- }
+ void *VarDataSharingAttributesStack;
- /// Check if a function declaration \p FD associates with any
- /// extensions present in OpenCLDeclExtMap and if so return the
- /// extension(s) name(s).
- std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
+ struct DeclareTargetContextInfo {
+ struct MapInfo {
+ OMPDeclareTargetDeclAttr::MapTypeTy MT;
+ SourceLocation Loc;
+ };
+ /// Explicitly listed variables and functions in a 'to' or 'link' clause.
+ llvm::DenseMap<NamedDecl *, MapInfo> ExplicitlyMapped;
- /// Check if a function type \p FT associates with any
- /// extensions present in OpenCLTypeExtMap and if so return the
- /// extension(s) name(s).
- std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
+ /// The 'device_type' as parsed from the clause.
+ OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
- /// Find an extension in an appropriate extension map and return its name
- template<typename T, typename MapT>
- std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
+ /// The directive kind, `begin declare target` or `declare target`.
+ OpenMPDirectiveKind Kind;
- void setCurrentOpenCLExtension(llvm::StringRef Ext) {
- CurrOpenCLExtension = std::string(Ext);
- }
+ /// The directive location.
+ SourceLocation Loc;
- /// Set OpenCL extensions for a type which can only be used when these
- /// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
- /// \param Exts A space separated list of OpenCL extensions.
- void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
-
- /// Set OpenCL extensions for a declaration which can only be
- /// used when these OpenCL extensions are enabled. If \p Exts is empty, do
- /// nothing.
- /// \param Exts A space separated list of OpenCL extensions.
- void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
-
- /// Set current OpenCL extensions for a type which can only be used
- /// when these OpenCL extensions are enabled. If current OpenCL extension is
- /// empty, do nothing.
- void setCurrentOpenCLExtensionForType(QualType T);
-
- /// Set current OpenCL extensions for a declaration which
- /// can only be used when these OpenCL extensions are enabled. If current
- /// OpenCL extension is empty, do nothing.
- void setCurrentOpenCLExtensionForDecl(Decl *FD);
-
- bool isOpenCLDisabledDecl(Decl *FD);
-
- /// Check if type \p T corresponding to declaration specifier \p DS
- /// is disabled due to required OpenCL extensions being disabled. If so,
- /// emit diagnostics.
- /// \return true if type is disabled.
- bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
-
- /// Check if declaration \p D used by expression \p E
- /// is disabled due to required OpenCL extensions being disabled. If so,
- /// emit diagnostics.
- /// \return true if type is disabled.
- bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
+ DeclareTargetContextInfo(OpenMPDirectiveKind Kind, SourceLocation Loc)
+ : Kind(Kind), Loc(Loc) {}
+ };
- //===--------------------------------------------------------------------===//
- // OpenMP directives and clauses.
- //
-private:
- void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
- SmallVector<SourceLocation, 4> DeclareTargetNesting;
+ SmallVector<DeclareTargetContextInfo, 4> DeclareTargetNesting;
+
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
- bool StrictlyPositive = true);
+ bool StrictlyPositive = true,
+ bool SuppressExprDiags = false);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
@@ -10203,20 +10269,24 @@ private:
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
- /// Checks if a type or a declaration is disabled due to the owning extension
- /// being disabled, and emits diagnostic messages if it is disabled.
- /// \param D type or declaration to be checked.
- /// \param DiagLoc source location for the diagnostic message.
- /// \param DiagInfo information to be emitted for the diagnostic message.
- /// \param SrcRange source range of the declaration.
- /// \param Map maps type or declaration to the extensions.
- /// \param Selector selects diagnostic message: 0 for type and 1 for
- /// declaration.
- /// \return true if the type or declaration is disabled.
- template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
- bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
- MapT &Map, unsigned Selector = 0,
- SourceRange SrcRange = SourceRange());
+ /// Analyzes and checks a loop nest for use by a loop transformation.
+ ///
+ /// \param Kind The loop transformation directive kind.
+ /// \param NumLoops How many nested loops the directive is expecting.
+ /// \param AStmt Associated statement of the transformation directive.
+ /// \param LoopHelpers [out] The loop analysis result.
+ /// \param Body [out] The body code nested in \p NumLoops loop.
+ /// \param OriginalInits [out] Collection of statements and declarations that
+ /// must have been executed/declared before entering the
+ /// loop.
+ ///
+ /// \return Whether there was any error.
+ bool checkTransformableLoopNest(
+ OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
+ SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
+ Stmt *&Body,
+ SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
+ &OriginalInits);
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
@@ -10445,19 +10515,28 @@ public:
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
- bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
- /// Called at the end of target region i.e. '#pragme omp end declare target'.
- void ActOnFinishOpenMPDeclareTargetDirective();
+ bool ActOnStartOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
+
+ /// Called at the end of target region i.e. '#pragma omp end declare target'.
+ const DeclareTargetContextInfo ActOnOpenMPEndDeclareTargetDirective();
+
+ /// Called once a target context is completed, that can be when a
+ /// '#pragma omp end declare target' was encountered or when a
+ /// '#pragma omp declare target' without declaration-definition-seq was
+ /// encountered.
+ void ActOnFinishedOpenMPDeclareTargetContext(DeclareTargetContextInfo &DTCI);
+
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
- NamedDecl *
- lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id,
- NamedDeclSetType &SameDirectiveDecls);
+ NamedDecl *lookupOpenMPDeclareTargetName(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id);
+
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
+
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
@@ -10479,6 +10558,11 @@ public:
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
+
+ /// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
+ /// an OpenMP loop directive.
+ StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
+
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
@@ -10504,6 +10588,16 @@ public:
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '#pragma omp tile' after parsing of its clauses and
+ /// the associated statement.
+ StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed '#pragma omp unroll' after parsing of its clauses
+ /// and the associated statement.
+ StmtResult ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
@@ -10764,6 +10858,20 @@ public:
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
+ /// Called on well-formed '\#pragma omp interop'.
+ StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp dispatch' after parsing of the
+ // /associated statement.
+ StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed '\#pragma omp masked' after parsing of the
+ // /associated statement.
+ StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
@@ -10840,6 +10948,18 @@ public:
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
+ /// Called on well-form 'sizes' clause.
+ OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+ /// Called on well-form 'full' clauses.
+ OMPClause *ActOnOpenMPFullClause(SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ /// Called on well-form 'partial' clauses.
+ OMPClause *ActOnOpenMPPartialClause(Expr *FactorExpr, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
@@ -10948,9 +11068,39 @@ public:
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
+
+ /// Called on well-formed 'init' clause.
+ OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
+ bool IsTarget, bool IsTargetSync,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc);
+
+ /// Called on well-formed 'use' clause.
+ OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc, SourceLocation EndLoc);
+
/// Called on well-formed 'destroy' clause.
- OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc,
+ OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
SourceLocation EndLoc);
+ /// Called on well-formed 'novariants' clause.
+ OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'nocontext' clause.
+ OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
+ /// Called on well-formed 'filter' clause.
+ OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
@@ -11199,11 +11349,11 @@ public:
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
- ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
- ExprValueKind VK = VK_RValue,
- const CXXCastPath *BasePath = nullptr,
- CheckedConversionKind CCK
- = CCK_ImplicitConversion);
+ ExprResult
+ ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
+ ExprValueKind VK = VK_PRValue,
+ const CXXCastPath *BasePath = nullptr,
+ CheckedConversionKind CCK = CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
@@ -11277,6 +11427,18 @@ public:
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
+ /// Check whether the given statement can have musttail applied to it,
+ /// issuing a diagnostic and returning false if not. In the success case,
+ /// the statement is rewritten to remove implicit nodes from the return
+ /// value.
+ bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
+
+private:
+ /// Check whether the given statement can have musttail applied to it,
+ /// issuing a diagnostic and returning false if not.
+ bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
+
+public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
@@ -11476,7 +11638,7 @@ public:
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
- Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
+ Expr *E, QualType Ty, ExprValueKind VK = VK_PRValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
@@ -11535,9 +11697,9 @@ public:
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
- QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
- ExprResult &RHS,
- SourceLocation QuestionLoc);
+ QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
@@ -11581,6 +11743,9 @@ public:
bool isValidSveBitcast(QualType srcType, QualType destType);
+ bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
+
+ bool areVectorTypesSameSize(QualType srcType, QualType destType);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
@@ -11639,6 +11804,13 @@ public:
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType &paramType);
+ // CheckMatrixCast - Check type constraints for matrix casts.
+ // We allow casting between matrixes of the same dimensions i.e. when they
+ // have the same number of rows and column. Returns true if the cast is
+ // invalid.
+ bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
+ CastKind &Kind);
+
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
@@ -11948,8 +12120,8 @@ public:
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
- SemaDiagnosticBuilder diagIfOpenMPDeviceCode(SourceLocation Loc,
- unsigned DiagID);
+ SemaDiagnosticBuilder
+ diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
@@ -11965,17 +12137,19 @@ public:
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID);
+ unsigned DiagID, FunctionDecl *FD);
- SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
+ SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
+ FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
- const PartialDiagnostic &PD) {
- return targetDiag(Loc, PD.getDiagID()) << PD;
+ const PartialDiagnostic &PD,
+ FunctionDecl *FD = nullptr) {
+ return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
- void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc);
+ void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
@@ -11994,6 +12168,15 @@ public:
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
+ enum CUDAVariableTarget {
+ CVT_Device, /// Emitted on device side with a shadow variable on host side
+ CVT_Host, /// Emitted on host side only
+ CVT_Both, /// Emitted on both sides with different addresses
+ CVT_Unified, /// Emitted as a unified address, e.g. managed variables
+ };
+ /// Determines whether the given variable is emitted on host or device side.
+ CUDAVariableTarget IdentifyCUDATarget(const VarDecl *D);
+
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
@@ -12192,8 +12375,14 @@ public:
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
- /// Reports signatures for a call to CodeCompleteConsumer and returns the
- /// preferred type for the current argument. Returned type can be null.
+ /// Determines the preferred type of the current function argument, by
+ /// examining the signatures of all possible overloads.
+ /// Returns null if unknown or ambiguous, or if code completion is off.
+ ///
+ /// If the code completion point has been reached, also reports the function
+ /// signatures that were considered.
+ ///
+ /// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
@@ -12328,10 +12517,12 @@ private:
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
- void CheckConstructorCall(FunctionDecl *FDecl,
+ void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
- const FunctionProtoType *Proto,
- SourceLocation Loc);
+ const FunctionProtoType *Proto, SourceLocation Loc);
+
+ void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
+ StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
@@ -12386,6 +12577,9 @@ private:
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum);
+ bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
+ CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
@@ -12394,6 +12588,7 @@ private:
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
+ bool SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum);
public:
// Used by C++ template instantiation.
@@ -12405,6 +12600,7 @@ public:
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
+ bool SemaBuiltinArithmeticFence(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
diff --git a/clang/include/clang/Sema/Template.h b/clang/include/clang/Sema/Template.h
index 0dcaf565591b..540d2c9aa87e 100644
--- a/clang/include/clang/Sema/Template.h
+++ b/clang/include/clang/Sema/Template.h
@@ -537,6 +537,8 @@ enum class TemplateSubstitutionKind : char {
Decl *VisitDecl(Decl *D);
Decl *VisitVarDecl(VarDecl *D, bool InstantiatingVarTemplate,
ArrayRef<BindingDecl *> *Bindings = nullptr);
+ Decl *VisitBaseUsingDecls(BaseUsingDecl *D, BaseUsingDecl *Inst,
+ LookupResult *Lookup);
// Enable late instantiation of attributes. Late instantiated attributes
// will be stored in LA.
diff --git a/clang/include/clang/Serialization/ASTBitCodes.h b/clang/include/clang/Serialization/ASTBitCodes.h
index e9fc202f8d1d..027a981df22c 100644
--- a/clang/include/clang/Serialization/ASTBitCodes.h
+++ b/clang/include/clang/Serialization/ASTBitCodes.h
@@ -41,7 +41,7 @@ namespace serialization {
/// Version 4 of AST files also requires that the version control branch and
/// revision match exactly, since there is no backward compatibility of
/// AST files at this time.
-const unsigned VERSION_MAJOR = 11;
+const unsigned VERSION_MAJOR = 15;
/// AST file minor version number supported by this version of
/// Clang.
@@ -109,2017 +109,2028 @@ public:
}
};
- /// A structure for putting "fast"-unqualified QualTypes into a
- /// DenseMap. This uses the standard pointer hash function.
- struct UnsafeQualTypeDenseMapInfo {
- static bool isEqual(QualType A, QualType B) { return A == B; }
+/// A structure for putting "fast"-unqualified QualTypes into a
+/// DenseMap. This uses the standard pointer hash function.
+struct UnsafeQualTypeDenseMapInfo {
+ static bool isEqual(QualType A, QualType B) { return A == B; }
- static QualType getEmptyKey() {
- return QualType::getFromOpaquePtr((void*) 1);
- }
+ static QualType getEmptyKey() {
+ return QualType::getFromOpaquePtr((void *)1);
+ }
+
+ static QualType getTombstoneKey() {
+ return QualType::getFromOpaquePtr((void *)2);
+ }
+
+ static unsigned getHashValue(QualType T) {
+ assert(!T.getLocalFastQualifiers() &&
+ "hash invalid for types with fast quals");
+ uintptr_t v = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ return (unsigned(v) >> 4) ^ (unsigned(v) >> 9);
+ }
+};
+
+/// An ID number that refers to an identifier in an AST file.
+using IdentID = uint32_t;
+
+/// The number of predefined identifier IDs.
+const unsigned int NUM_PREDEF_IDENT_IDS = 1;
+
+/// An ID number that refers to a macro in an AST file.
+using MacroID = uint32_t;
- static QualType getTombstoneKey() {
- return QualType::getFromOpaquePtr((void*) 2);
- }
-
- static unsigned getHashValue(QualType T) {
- assert(!T.getLocalFastQualifiers() &&
- "hash invalid for types with fast quals");
- uintptr_t v = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
- return (unsigned(v) >> 4) ^ (unsigned(v) >> 9);
- }
- };
-
- /// An ID number that refers to an identifier in an AST file.
- using IdentID = uint32_t;
-
- /// The number of predefined identifier IDs.
- const unsigned int NUM_PREDEF_IDENT_IDS = 1;
-
- /// An ID number that refers to a macro in an AST file.
- using MacroID = uint32_t;
-
- /// A global ID number that refers to a macro in an AST file.
- using GlobalMacroID = uint32_t;
-
- /// A local to a module ID number that refers to a macro in an
- /// AST file.
- using LocalMacroID = uint32_t;
-
- /// The number of predefined macro IDs.
- const unsigned int NUM_PREDEF_MACRO_IDS = 1;
-
- /// An ID number that refers to an ObjC selector in an AST file.
- using SelectorID = uint32_t;
-
- /// The number of predefined selector IDs.
- const unsigned int NUM_PREDEF_SELECTOR_IDS = 1;
-
- /// An ID number that refers to a set of CXXBaseSpecifiers in an
- /// AST file.
- using CXXBaseSpecifiersID = uint32_t;
-
- /// An ID number that refers to a list of CXXCtorInitializers in an
- /// AST file.
- using CXXCtorInitializersID = uint32_t;
-
- /// An ID number that refers to an entity in the detailed
- /// preprocessing record.
- using PreprocessedEntityID = uint32_t;
-
- /// An ID number that refers to a submodule in a module file.
- using SubmoduleID = uint32_t;
-
- /// The number of predefined submodule IDs.
- const unsigned int NUM_PREDEF_SUBMODULE_IDS = 1;
-
- /// Source range/offset of a preprocessed entity.
- struct PPEntityOffset {
- /// Raw source location of beginning of range.
- unsigned Begin;
-
- /// Raw source location of end of range.
- unsigned End;
-
- /// Offset in the AST file relative to ModuleFile::MacroOffsetsBase.
- uint32_t BitOffset;
-
- PPEntityOffset(SourceRange R, uint32_t BitOffset)
- : Begin(R.getBegin().getRawEncoding()),
- End(R.getEnd().getRawEncoding()), BitOffset(BitOffset) {}
-
- SourceLocation getBegin() const {
- return SourceLocation::getFromRawEncoding(Begin);
- }
-
- SourceLocation getEnd() const {
- return SourceLocation::getFromRawEncoding(End);
- }
- };
-
- /// Source range of a skipped preprocessor region
- struct PPSkippedRange {
- /// Raw source location of beginning of range.
- unsigned Begin;
- /// Raw source location of end of range.
- unsigned End;
-
- PPSkippedRange(SourceRange R)
- : Begin(R.getBegin().getRawEncoding()),
- End(R.getEnd().getRawEncoding()) { }
-
- SourceLocation getBegin() const {
- return SourceLocation::getFromRawEncoding(Begin);
- }
- SourceLocation getEnd() const {
- return SourceLocation::getFromRawEncoding(End);
- }
- };
-
- /// Offset in the AST file. Use splitted 64-bit integer into low/high
- /// parts to keep structure alignment 32-bit (it is important because
- /// blobs in bitstream are 32-bit aligned). This structure is serialized
- /// "as is" to the AST file.
- struct UnderalignedInt64 {
- uint32_t BitOffsetLow = 0;
- uint32_t BitOffsetHigh = 0;
-
- UnderalignedInt64() = default;
- UnderalignedInt64(uint64_t BitOffset) { setBitOffset(BitOffset); }
-
- void setBitOffset(uint64_t Offset) {
- BitOffsetLow = Offset;
- BitOffsetHigh = Offset >> 32;
- }
-
- uint64_t getBitOffset() const {
- return BitOffsetLow | (uint64_t(BitOffsetHigh) << 32);
- }
- };
-
- /// Source location and bit offset of a declaration.
- struct DeclOffset {
- /// Raw source location.
- unsigned Loc = 0;
-
- /// Offset relative to the start of the DECLTYPES_BLOCK block. Keep
- /// structure alignment 32-bit and avoid padding gap because undefined
- /// value in the padding affects AST hash.
- UnderalignedInt64 BitOffset;
-
- DeclOffset() = default;
- DeclOffset(SourceLocation Loc, uint64_t BitOffset,
- uint64_t DeclTypesBlockStartOffset) {
- setLocation(Loc);
- setBitOffset(BitOffset, DeclTypesBlockStartOffset);
- }
-
- void setLocation(SourceLocation L) {
- Loc = L.getRawEncoding();
- }
-
- SourceLocation getLocation() const {
- return SourceLocation::getFromRawEncoding(Loc);
- }
-
- void setBitOffset(uint64_t Offset,
- const uint64_t DeclTypesBlockStartOffset) {
- BitOffset.setBitOffset(Offset - DeclTypesBlockStartOffset);
- }
-
- uint64_t getBitOffset(const uint64_t DeclTypesBlockStartOffset) const {
- return BitOffset.getBitOffset() + DeclTypesBlockStartOffset;
- }
- };
-
- /// The number of predefined preprocessed entity IDs.
- const unsigned int NUM_PREDEF_PP_ENTITY_IDS = 1;
-
- /// Describes the various kinds of blocks that occur within
- /// an AST file.
- enum BlockIDs {
- /// The AST block, which acts as a container around the
- /// full AST block.
- AST_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID,
-
- /// The block containing information about the source
- /// manager.
- SOURCE_MANAGER_BLOCK_ID,
-
- /// The block containing information about the
- /// preprocessor.
- PREPROCESSOR_BLOCK_ID,
-
- /// The block containing the definitions of all of the
- /// types and decls used within the AST file.
- DECLTYPES_BLOCK_ID,
-
- /// The block containing the detailed preprocessing record.
- PREPROCESSOR_DETAIL_BLOCK_ID,
-
- /// The block containing the submodule structure.
- SUBMODULE_BLOCK_ID,
-
- /// The block containing comments.
- COMMENTS_BLOCK_ID,
-
- /// The control block, which contains all of the
- /// information that needs to be validated prior to committing
- /// to loading the AST file.
- CONTROL_BLOCK_ID,
-
- /// The block of input files, which were used as inputs
- /// to create this AST file.
- ///
- /// This block is part of the control block.
- INPUT_FILES_BLOCK_ID,
-
- /// The block of configuration options, used to check that
- /// a module is being used in a configuration compatible with the
- /// configuration in which it was built.
- ///
- /// This block is part of the control block.
- OPTIONS_BLOCK_ID,
-
- /// A block containing a module file extension.
- EXTENSION_BLOCK_ID,
-
- /// A block with unhashed content.
- ///
- /// These records should not change the \a ASTFileSignature. See \a
- /// UnhashedControlBlockRecordTypes for the list of records.
- UNHASHED_CONTROL_BLOCK_ID,
- };
-
- /// Record types that occur within the control block.
- enum ControlRecordTypes {
- /// AST file metadata, including the AST file version number
- /// and information about the compiler used to build this AST file.
- METADATA = 1,
-
- /// Record code for the list of other AST files imported by
- /// this AST file.
- IMPORTS,
-
- /// Record code for the original file that was used to
- /// generate the AST file, including both its file ID and its
- /// name.
- ORIGINAL_FILE,
-
- /// The directory that the PCH was originally created in.
- ORIGINAL_PCH_DIR,
-
- /// Record code for file ID of the file or buffer that was used to
- /// generate the AST file.
- ORIGINAL_FILE_ID,
-
- /// Offsets into the input-files block where input files
- /// reside.
- INPUT_FILE_OFFSETS,
-
- /// Record code for the module name.
- MODULE_NAME,
-
- /// Record code for the module map file that was used to build this
- /// AST file.
- MODULE_MAP_FILE,
-
- /// Record code for the module build directory.
- MODULE_DIRECTORY,
- };
-
- /// Record types that occur within the options block inside
- /// the control block.
- enum OptionsRecordTypes {
- /// Record code for the language options table.
- ///
- /// The record with this code contains the contents of the
- /// LangOptions structure. We serialize the entire contents of
- /// the structure, and let the reader decide which options are
- /// actually important to check.
- LANGUAGE_OPTIONS = 1,
-
- /// Record code for the target options table.
- TARGET_OPTIONS,
-
- /// Record code for the filesystem options table.
- FILE_SYSTEM_OPTIONS,
-
- /// Record code for the headers search options table.
- HEADER_SEARCH_OPTIONS,
-
- /// Record code for the preprocessor options table.
- PREPROCESSOR_OPTIONS,
- };
-
- /// Record codes for the unhashed control block.
- enum UnhashedControlBlockRecordTypes {
- /// Record code for the signature that identifiers this AST file.
- SIGNATURE = 1,
-
- /// Record code for the content hash of the AST block.
- AST_BLOCK_HASH,
-
- /// Record code for the diagnostic options table.
- DIAGNOSTIC_OPTIONS,
-
- /// Record code for \#pragma diagnostic mappings.
- DIAG_PRAGMA_MAPPINGS,
- };
-
- /// Record code for extension blocks.
- enum ExtensionBlockRecordTypes {
- /// Metadata describing this particular extension.
- EXTENSION_METADATA = 1,
-
- /// The first record ID allocated to the extensions themselves.
- FIRST_EXTENSION_RECORD_ID = 4
- };
-
- /// Record types that occur within the input-files block
- /// inside the control block.
- enum InputFileRecordTypes {
- /// An input file.
- INPUT_FILE = 1,
-
- /// The input file content hash
- INPUT_FILE_HASH
- };
-
- /// Record types that occur within the AST block itself.
- enum ASTRecordTypes {
- /// Record code for the offsets of each type.
- ///
- /// The TYPE_OFFSET constant describes the record that occurs
- /// within the AST block. The record itself is an array of offsets that
- /// point into the declarations and types block (identified by
- /// DECLTYPES_BLOCK_ID). The index into the array is based on the ID
- /// of a type. For a given type ID @c T, the lower three bits of
- /// @c T are its qualifiers (const, volatile, restrict), as in
- /// the QualType class. The upper bits, after being shifted and
- /// subtracting NUM_PREDEF_TYPE_IDS, are used to index into the
- /// TYPE_OFFSET block to determine the offset of that type's
- /// corresponding record within the DECLTYPES_BLOCK_ID block.
- TYPE_OFFSET = 1,
-
- /// Record code for the offsets of each decl.
- ///
- /// The DECL_OFFSET constant describes the record that occurs
- /// within the block identified by DECL_OFFSETS_BLOCK_ID within
- /// the AST block. The record itself is an array of offsets that
- /// point into the declarations and types block (identified by
- /// DECLTYPES_BLOCK_ID). The declaration ID is an index into this
- /// record, after subtracting one to account for the use of
- /// declaration ID 0 for a NULL declaration pointer. Index 0 is
- /// reserved for the translation unit declaration.
- DECL_OFFSET = 2,
-
- /// Record code for the table of offsets of each
- /// identifier ID.
- ///
- /// The offset table contains offsets into the blob stored in
- /// the IDENTIFIER_TABLE record. Each offset points to the
- /// NULL-terminated string that corresponds to that identifier.
- IDENTIFIER_OFFSET = 3,
-
- /// This is so that older clang versions, before the introduction
- /// of the control block, can read and reject the newer PCH format.
- /// *DON'T CHANGE THIS NUMBER*.
- METADATA_OLD_FORMAT = 4,
-
- /// Record code for the identifier table.
- ///
- /// The identifier table is a simple blob that contains
- /// NULL-terminated strings for all of the identifiers
- /// referenced by the AST file. The IDENTIFIER_OFFSET table
- /// contains the mapping from identifier IDs to the characters
- /// in this blob. Note that the starting offsets of all of the
- /// identifiers are odd, so that, when the identifier offset
- /// table is loaded in, we can use the low bit to distinguish
- /// between offsets (for unresolved identifier IDs) and
- /// IdentifierInfo pointers (for already-resolved identifier
- /// IDs).
- IDENTIFIER_TABLE = 5,
-
- /// Record code for the array of eagerly deserialized decls.
- ///
- /// The AST file contains a list of all of the declarations that should be
- /// eagerly deserialized present within the parsed headers, stored as an
- /// array of declaration IDs. These declarations will be
- /// reported to the AST consumer after the AST file has been
- /// read, since their presence can affect the semantics of the
- /// program (e.g., for code generation).
- EAGERLY_DESERIALIZED_DECLS = 6,
-
- /// Record code for the set of non-builtin, special
- /// types.
- ///
- /// This record contains the type IDs for the various type nodes
- /// that are constructed during semantic analysis (e.g.,
- /// __builtin_va_list). The SPECIAL_TYPE_* constants provide
- /// offsets into this record.
- SPECIAL_TYPES = 7,
-
- /// Record code for the extra statistics we gather while
- /// generating an AST file.
- STATISTICS = 8,
-
- /// Record code for the array of tentative definitions.
- TENTATIVE_DEFINITIONS = 9,
-
- // ID 10 used to be for a list of extern "C" declarations.
-
- /// Record code for the table of offsets into the
- /// Objective-C method pool.
- SELECTOR_OFFSETS = 11,
-
- /// Record code for the Objective-C method pool,
- METHOD_POOL = 12,
-
- /// The value of the next __COUNTER__ to dispense.
- /// [PP_COUNTER_VALUE, Val]
- PP_COUNTER_VALUE = 13,
-
- /// Record code for the table of offsets into the block
- /// of source-location information.
- SOURCE_LOCATION_OFFSETS = 14,
-
- /// Record code for the set of source location entries
- /// that need to be preloaded by the AST reader.
- ///
- /// This set contains the source location entry for the
- /// predefines buffer and for any file entries that need to be
- /// preloaded.
- SOURCE_LOCATION_PRELOADS = 15,
-
- /// Record code for the set of ext_vector type names.
- EXT_VECTOR_DECLS = 16,
-
- /// Record code for the array of unused file scoped decls.
- UNUSED_FILESCOPED_DECLS = 17,
-
- /// Record code for the table of offsets to entries in the
- /// preprocessing record.
- PPD_ENTITIES_OFFSETS = 18,
-
- /// Record code for the array of VTable uses.
- VTABLE_USES = 19,
-
- // ID 20 used to be for a list of dynamic classes.
-
- /// Record code for referenced selector pool.
- REFERENCED_SELECTOR_POOL = 21,
-
- /// Record code for an update to the TU's lexically contained
- /// declarations.
- TU_UPDATE_LEXICAL = 22,
-
- // ID 23 used to be for a list of local redeclarations.
-
- /// Record code for declarations that Sema keeps references of.
- SEMA_DECL_REFS = 24,
-
- /// Record code for weak undeclared identifiers.
- WEAK_UNDECLARED_IDENTIFIERS = 25,
+/// A global ID number that refers to a macro in an AST file.
+using GlobalMacroID = uint32_t;
- /// Record code for pending implicit instantiations.
- PENDING_IMPLICIT_INSTANTIATIONS = 26,
+/// A local to a module ID number that refers to a macro in an
+/// AST file.
+using LocalMacroID = uint32_t;
- // ID 27 used to be for a list of replacement decls.
+/// The number of predefined macro IDs.
+const unsigned int NUM_PREDEF_MACRO_IDS = 1;
- /// Record code for an update to a decl context's lookup table.
- ///
- /// In practice, this should only be used for the TU and namespaces.
- UPDATE_VISIBLE = 28,
+/// An ID number that refers to an ObjC selector in an AST file.
+using SelectorID = uint32_t;
- /// Record for offsets of DECL_UPDATES records for declarations
- /// that were modified after being deserialized and need updates.
- DECL_UPDATE_OFFSETS = 29,
+/// The number of predefined selector IDs.
+const unsigned int NUM_PREDEF_SELECTOR_IDS = 1;
- // ID 30 used to be a decl update record. These are now in the DECLTYPES
- // block.
+/// An ID number that refers to a set of CXXBaseSpecifiers in an
+/// AST file.
+using CXXBaseSpecifiersID = uint32_t;
+
+/// An ID number that refers to a list of CXXCtorInitializers in an
+/// AST file.
+using CXXCtorInitializersID = uint32_t;
+
+/// An ID number that refers to an entity in the detailed
+/// preprocessing record.
+using PreprocessedEntityID = uint32_t;
+
+/// An ID number that refers to a submodule in a module file.
+using SubmoduleID = uint32_t;
+
+/// The number of predefined submodule IDs.
+const unsigned int NUM_PREDEF_SUBMODULE_IDS = 1;
+
+/// Source range/offset of a preprocessed entity.
+struct PPEntityOffset {
+ /// Raw source location of beginning of range.
+ SourceLocation::UIntTy Begin;
+
+ /// Raw source location of end of range.
+ SourceLocation::UIntTy End;
+
+ /// Offset in the AST file relative to ModuleFile::MacroOffsetsBase.
+ uint32_t BitOffset;
+
+ PPEntityOffset(SourceRange R, uint32_t BitOffset)
+ : Begin(R.getBegin().getRawEncoding()), End(R.getEnd().getRawEncoding()),
+ BitOffset(BitOffset) {}
+
+ SourceLocation getBegin() const {
+ return SourceLocation::getFromRawEncoding(Begin);
+ }
- // ID 31 used to be a list of offsets to DECL_CXX_BASE_SPECIFIERS records.
+ SourceLocation getEnd() const {
+ return SourceLocation::getFromRawEncoding(End);
+ }
+};
- // ID 32 used to be the code for \#pragma diagnostic mappings.
+/// Source range of a skipped preprocessor region
+struct PPSkippedRange {
+ /// Raw source location of beginning of range.
+ SourceLocation::UIntTy Begin;
+ /// Raw source location of end of range.
+ SourceLocation::UIntTy End;
- /// Record code for special CUDA declarations.
- CUDA_SPECIAL_DECL_REFS = 33,
+ PPSkippedRange(SourceRange R)
+ : Begin(R.getBegin().getRawEncoding()), End(R.getEnd().getRawEncoding()) {
+ }
- /// Record code for header search information.
- HEADER_SEARCH_TABLE = 34,
+ SourceLocation getBegin() const {
+ return SourceLocation::getFromRawEncoding(Begin);
+ }
+ SourceLocation getEnd() const {
+ return SourceLocation::getFromRawEncoding(End);
+ }
+};
- /// Record code for floating point \#pragma options.
- FP_PRAGMA_OPTIONS = 35,
+/// Offset in the AST file. Use splitted 64-bit integer into low/high
+/// parts to keep structure alignment 32-bit (it is important because
+/// blobs in bitstream are 32-bit aligned). This structure is serialized
+/// "as is" to the AST file.
+struct UnderalignedInt64 {
+ uint32_t BitOffsetLow = 0;
+ uint32_t BitOffsetHigh = 0;
- /// Record code for enabled OpenCL extensions.
- OPENCL_EXTENSIONS = 36,
+ UnderalignedInt64() = default;
+ UnderalignedInt64(uint64_t BitOffset) { setBitOffset(BitOffset); }
- /// The list of delegating constructor declarations.
- DELEGATING_CTORS = 37,
+ void setBitOffset(uint64_t Offset) {
+ BitOffsetLow = Offset;
+ BitOffsetHigh = Offset >> 32;
+ }
- /// Record code for the set of known namespaces, which are used
- /// for typo correction.
- KNOWN_NAMESPACES = 38,
+ uint64_t getBitOffset() const {
+ return BitOffsetLow | (uint64_t(BitOffsetHigh) << 32);
+ }
+};
- /// Record code for the remapping information used to relate
- /// loaded modules to the various offsets and IDs(e.g., source location
- /// offests, declaration and type IDs) that are used in that module to
- /// refer to other modules.
- MODULE_OFFSET_MAP = 39,
+/// Source location and bit offset of a declaration.
+struct DeclOffset {
+ /// Raw source location.
+ SourceLocation::UIntTy Loc = 0;
+
+ /// Offset relative to the start of the DECLTYPES_BLOCK block. Keep
+ /// structure alignment 32-bit and avoid padding gap because undefined
+ /// value in the padding affects AST hash.
+ UnderalignedInt64 BitOffset;
+
+ DeclOffset() = default;
+ DeclOffset(SourceLocation Loc, uint64_t BitOffset,
+ uint64_t DeclTypesBlockStartOffset) {
+ setLocation(Loc);
+ setBitOffset(BitOffset, DeclTypesBlockStartOffset);
+ }
- /// Record code for the source manager line table information,
- /// which stores information about \#line directives.
- SOURCE_MANAGER_LINE_TABLE = 40,
+ void setLocation(SourceLocation L) { Loc = L.getRawEncoding(); }
- /// Record code for map of Objective-C class definition IDs to the
- /// ObjC categories in a module that are attached to that class.
- OBJC_CATEGORIES_MAP = 41,
+ SourceLocation getLocation() const {
+ return SourceLocation::getFromRawEncoding(Loc);
+ }
- /// Record code for a file sorted array of DeclIDs in a module.
- FILE_SORTED_DECLS = 42,
+ void setBitOffset(uint64_t Offset, const uint64_t DeclTypesBlockStartOffset) {
+ BitOffset.setBitOffset(Offset - DeclTypesBlockStartOffset);
+ }
- /// Record code for an array of all of the (sub)modules that were
- /// imported by the AST file.
- IMPORTED_MODULES = 43,
+ uint64_t getBitOffset(const uint64_t DeclTypesBlockStartOffset) const {
+ return BitOffset.getBitOffset() + DeclTypesBlockStartOffset;
+ }
+};
- // ID 44 used to be a table of merged canonical declarations.
- // ID 45 used to be a list of declaration IDs of local redeclarations.
+/// The number of predefined preprocessed entity IDs.
+const unsigned int NUM_PREDEF_PP_ENTITY_IDS = 1;
+
+/// Describes the various kinds of blocks that occur within
+/// an AST file.
+enum BlockIDs {
+ /// The AST block, which acts as a container around the
+ /// full AST block.
+ AST_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID,
+
+ /// The block containing information about the source
+ /// manager.
+ SOURCE_MANAGER_BLOCK_ID,
+
+ /// The block containing information about the
+ /// preprocessor.
+ PREPROCESSOR_BLOCK_ID,
+
+ /// The block containing the definitions of all of the
+ /// types and decls used within the AST file.
+ DECLTYPES_BLOCK_ID,
+
+ /// The block containing the detailed preprocessing record.
+ PREPROCESSOR_DETAIL_BLOCK_ID,
+
+ /// The block containing the submodule structure.
+ SUBMODULE_BLOCK_ID,
+
+ /// The block containing comments.
+ COMMENTS_BLOCK_ID,
+
+ /// The control block, which contains all of the
+ /// information that needs to be validated prior to committing
+ /// to loading the AST file.
+ CONTROL_BLOCK_ID,
+
+ /// The block of input files, which were used as inputs
+ /// to create this AST file.
+ ///
+ /// This block is part of the control block.
+ INPUT_FILES_BLOCK_ID,
+
+ /// The block of configuration options, used to check that
+ /// a module is being used in a configuration compatible with the
+ /// configuration in which it was built.
+ ///
+ /// This block is part of the control block.
+ OPTIONS_BLOCK_ID,
+
+ /// A block containing a module file extension.
+ EXTENSION_BLOCK_ID,
+
+ /// A block with unhashed content.
+ ///
+ /// These records should not change the \a ASTFileSignature. See \a
+ /// UnhashedControlBlockRecordTypes for the list of records.
+ UNHASHED_CONTROL_BLOCK_ID,
+};
- /// Record code for the array of Objective-C categories (including
- /// extensions).
- ///
- /// This array can only be interpreted properly using the Objective-C
- /// categories map.
- OBJC_CATEGORIES = 46,
+/// Record types that occur within the control block.
+enum ControlRecordTypes {
+ /// AST file metadata, including the AST file version number
+ /// and information about the compiler used to build this AST file.
+ METADATA = 1,
- /// Record code for the table of offsets of each macro ID.
- ///
- /// The offset table contains offsets into the blob stored in
- /// the preprocessor block. Each offset points to the corresponding
- /// macro definition.
- MACRO_OFFSET = 47,
+ /// Record code for the list of other AST files imported by
+ /// this AST file.
+ IMPORTS,
- /// A list of "interesting" identifiers. Only used in C++ (where we
- /// don't normally do lookups into the serialized identifier table). These
- /// are eagerly deserialized.
- INTERESTING_IDENTIFIERS = 48,
+ /// Record code for the original file that was used to
+ /// generate the AST file, including both its file ID and its
+ /// name.
+ ORIGINAL_FILE,
- /// Record code for undefined but used functions and variables that
- /// need a definition in this TU.
- UNDEFINED_BUT_USED = 49,
+ /// The directory that the PCH was originally created in.
+ ORIGINAL_PCH_DIR,
- /// Record code for late parsed template functions.
- LATE_PARSED_TEMPLATE = 50,
+ /// Record code for file ID of the file or buffer that was used to
+ /// generate the AST file.
+ ORIGINAL_FILE_ID,
- /// Record code for \#pragma optimize options.
- OPTIMIZE_PRAGMA_OPTIONS = 51,
+ /// Offsets into the input-files block where input files
+ /// reside.
+ INPUT_FILE_OFFSETS,
- /// Record code for potentially unused local typedef names.
- UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES = 52,
+ /// Record code for the module name.
+ MODULE_NAME,
- // ID 53 used to be a table of constructor initializer records.
+ /// Record code for the module map file that was used to build this
+ /// AST file.
+ MODULE_MAP_FILE,
- /// Delete expressions that will be analyzed later.
- DELETE_EXPRS_TO_ANALYZE = 54,
+ /// Record code for the module build directory.
+ MODULE_DIRECTORY,
+};
- /// Record code for \#pragma ms_struct options.
- MSSTRUCT_PRAGMA_OPTIONS = 55,
+/// Record types that occur within the options block inside
+/// the control block.
+enum OptionsRecordTypes {
+ /// Record code for the language options table.
+ ///
+ /// The record with this code contains the contents of the
+ /// LangOptions structure. We serialize the entire contents of
+ /// the structure, and let the reader decide which options are
+ /// actually important to check.
+ LANGUAGE_OPTIONS = 1,
- /// Record code for \#pragma ms_struct options.
- POINTERS_TO_MEMBERS_PRAGMA_OPTIONS = 56,
+ /// Record code for the target options table.
+ TARGET_OPTIONS,
- /// Number of unmatched #pragma clang cuda_force_host_device begin
- /// directives we've seen.
- CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH = 57,
+ /// Record code for the filesystem options table.
+ FILE_SYSTEM_OPTIONS,
- /// Record code for types associated with OpenCL extensions.
- OPENCL_EXTENSION_TYPES = 58,
+ /// Record code for the headers search options table.
+ HEADER_SEARCH_OPTIONS,
- /// Record code for declarations associated with OpenCL extensions.
- OPENCL_EXTENSION_DECLS = 59,
+ /// Record code for the preprocessor options table.
+ PREPROCESSOR_OPTIONS,
+};
- MODULAR_CODEGEN_DECLS = 60,
+/// Record codes for the unhashed control block.
+enum UnhashedControlBlockRecordTypes {
+ /// Record code for the signature that identifiers this AST file.
+ SIGNATURE = 1,
- /// Record code for \#pragma align/pack options.
- ALIGN_PACK_PRAGMA_OPTIONS = 61,
+ /// Record code for the content hash of the AST block.
+ AST_BLOCK_HASH,
- /// The stack of open #ifs/#ifdefs recorded in a preamble.
- PP_CONDITIONAL_STACK = 62,
+ /// Record code for the diagnostic options table.
+ DIAGNOSTIC_OPTIONS,
- /// A table of skipped ranges within the preprocessing record.
- PPD_SKIPPED_RANGES = 63,
+ /// Record code for \#pragma diagnostic mappings.
+ DIAG_PRAGMA_MAPPINGS,
+};
- /// Record code for the Decls to be checked for deferred diags.
- DECLS_TO_CHECK_FOR_DEFERRED_DIAGS = 64,
+/// Record code for extension blocks.
+enum ExtensionBlockRecordTypes {
+ /// Metadata describing this particular extension.
+ EXTENSION_METADATA = 1,
- /// Record code for \#pragma float_control options.
- FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
- };
+ /// The first record ID allocated to the extensions themselves.
+ FIRST_EXTENSION_RECORD_ID = 4
+};
- /// Record types used within a source manager block.
- enum SourceManagerRecordTypes {
- /// Describes a source location entry (SLocEntry) for a
- /// file.
- SM_SLOC_FILE_ENTRY = 1,
+/// Record types that occur within the input-files block
+/// inside the control block.
+enum InputFileRecordTypes {
+ /// An input file.
+ INPUT_FILE = 1,
- /// Describes a source location entry (SLocEntry) for a
- /// buffer.
- SM_SLOC_BUFFER_ENTRY = 2,
+ /// The input file content hash
+ INPUT_FILE_HASH
+};
- /// Describes a blob that contains the data for a buffer
- /// entry. This kind of record always directly follows a
- /// SM_SLOC_BUFFER_ENTRY record or a SM_SLOC_FILE_ENTRY with an
- /// overridden buffer.
- SM_SLOC_BUFFER_BLOB = 3,
+/// Record types that occur within the AST block itself.
+enum ASTRecordTypes {
+ /// Record code for the offsets of each type.
+ ///
+ /// The TYPE_OFFSET constant describes the record that occurs
+ /// within the AST block. The record itself is an array of offsets that
+ /// point into the declarations and types block (identified by
+ /// DECLTYPES_BLOCK_ID). The index into the array is based on the ID
+ /// of a type. For a given type ID @c T, the lower three bits of
+ /// @c T are its qualifiers (const, volatile, restrict), as in
+ /// the QualType class. The upper bits, after being shifted and
+ /// subtracting NUM_PREDEF_TYPE_IDS, are used to index into the
+ /// TYPE_OFFSET block to determine the offset of that type's
+ /// corresponding record within the DECLTYPES_BLOCK_ID block.
+ TYPE_OFFSET = 1,
+
+ /// Record code for the offsets of each decl.
+ ///
+ /// The DECL_OFFSET constant describes the record that occurs
+ /// within the block identified by DECL_OFFSETS_BLOCK_ID within
+ /// the AST block. The record itself is an array of offsets that
+ /// point into the declarations and types block (identified by
+ /// DECLTYPES_BLOCK_ID). The declaration ID is an index into this
+ /// record, after subtracting one to account for the use of
+ /// declaration ID 0 for a NULL declaration pointer. Index 0 is
+ /// reserved for the translation unit declaration.
+ DECL_OFFSET = 2,
+
+ /// Record code for the table of offsets of each
+ /// identifier ID.
+ ///
+ /// The offset table contains offsets into the blob stored in
+ /// the IDENTIFIER_TABLE record. Each offset points to the
+ /// NULL-terminated string that corresponds to that identifier.
+ IDENTIFIER_OFFSET = 3,
+
+ /// This is so that older clang versions, before the introduction
+ /// of the control block, can read and reject the newer PCH format.
+ /// *DON'T CHANGE THIS NUMBER*.
+ METADATA_OLD_FORMAT = 4,
+
+ /// Record code for the identifier table.
+ ///
+ /// The identifier table is a simple blob that contains
+ /// NULL-terminated strings for all of the identifiers
+ /// referenced by the AST file. The IDENTIFIER_OFFSET table
+ /// contains the mapping from identifier IDs to the characters
+ /// in this blob. Note that the starting offsets of all of the
+ /// identifiers are odd, so that, when the identifier offset
+ /// table is loaded in, we can use the low bit to distinguish
+ /// between offsets (for unresolved identifier IDs) and
+ /// IdentifierInfo pointers (for already-resolved identifier
+ /// IDs).
+ IDENTIFIER_TABLE = 5,
+
+ /// Record code for the array of eagerly deserialized decls.
+ ///
+ /// The AST file contains a list of all of the declarations that should be
+ /// eagerly deserialized present within the parsed headers, stored as an
+ /// array of declaration IDs. These declarations will be
+ /// reported to the AST consumer after the AST file has been
+ /// read, since their presence can affect the semantics of the
+ /// program (e.g., for code generation).
+ EAGERLY_DESERIALIZED_DECLS = 6,
+
+ /// Record code for the set of non-builtin, special
+ /// types.
+ ///
+ /// This record contains the type IDs for the various type nodes
+ /// that are constructed during semantic analysis (e.g.,
+ /// __builtin_va_list). The SPECIAL_TYPE_* constants provide
+ /// offsets into this record.
+ SPECIAL_TYPES = 7,
+
+ /// Record code for the extra statistics we gather while
+ /// generating an AST file.
+ STATISTICS = 8,
+
+ /// Record code for the array of tentative definitions.
+ TENTATIVE_DEFINITIONS = 9,
+
+ // ID 10 used to be for a list of extern "C" declarations.
+
+ /// Record code for the table of offsets into the
+ /// Objective-C method pool.
+ SELECTOR_OFFSETS = 11,
+
+ /// Record code for the Objective-C method pool,
+ METHOD_POOL = 12,
+
+ /// The value of the next __COUNTER__ to dispense.
+ /// [PP_COUNTER_VALUE, Val]
+ PP_COUNTER_VALUE = 13,
+
+ /// Record code for the table of offsets into the block
+ /// of source-location information.
+ SOURCE_LOCATION_OFFSETS = 14,
+
+ /// Record code for the set of source location entries
+ /// that need to be preloaded by the AST reader.
+ ///
+ /// This set contains the source location entry for the
+ /// predefines buffer and for any file entries that need to be
+ /// preloaded.
+ SOURCE_LOCATION_PRELOADS = 15,
+
+ /// Record code for the set of ext_vector type names.
+ EXT_VECTOR_DECLS = 16,
+
+ /// Record code for the array of unused file scoped decls.
+ UNUSED_FILESCOPED_DECLS = 17,
+
+ /// Record code for the table of offsets to entries in the
+ /// preprocessing record.
+ PPD_ENTITIES_OFFSETS = 18,
+
+ /// Record code for the array of VTable uses.
+ VTABLE_USES = 19,
+
+ // ID 20 used to be for a list of dynamic classes.
+
+ /// Record code for referenced selector pool.
+ REFERENCED_SELECTOR_POOL = 21,
+
+ /// Record code for an update to the TU's lexically contained
+ /// declarations.
+ TU_UPDATE_LEXICAL = 22,
+
+ // ID 23 used to be for a list of local redeclarations.
+
+ /// Record code for declarations that Sema keeps references of.
+ SEMA_DECL_REFS = 24,
+
+ /// Record code for weak undeclared identifiers.
+ WEAK_UNDECLARED_IDENTIFIERS = 25,
- /// Describes a zlib-compressed blob that contains the data for
- /// a buffer entry.
- SM_SLOC_BUFFER_BLOB_COMPRESSED = 4,
+ /// Record code for pending implicit instantiations.
+ PENDING_IMPLICIT_INSTANTIATIONS = 26,
+
+ // ID 27 used to be for a list of replacement decls.
+
+ /// Record code for an update to a decl context's lookup table.
+ ///
+ /// In practice, this should only be used for the TU and namespaces.
+ UPDATE_VISIBLE = 28,
+
+ /// Record for offsets of DECL_UPDATES records for declarations
+ /// that were modified after being deserialized and need updates.
+ DECL_UPDATE_OFFSETS = 29,
+
+ // ID 30 used to be a decl update record. These are now in the DECLTYPES
+ // block.
+
+ // ID 31 used to be a list of offsets to DECL_CXX_BASE_SPECIFIERS records.
+
+ // ID 32 used to be the code for \#pragma diagnostic mappings.
+
+ /// Record code for special CUDA declarations.
+ CUDA_SPECIAL_DECL_REFS = 33,
+
+ /// Record code for header search information.
+ HEADER_SEARCH_TABLE = 34,
+
+ /// Record code for floating point \#pragma options.
+ FP_PRAGMA_OPTIONS = 35,
+
+ /// Record code for enabled OpenCL extensions.
+ OPENCL_EXTENSIONS = 36,
+
+ /// The list of delegating constructor declarations.
+ DELEGATING_CTORS = 37,
+
+ /// Record code for the set of known namespaces, which are used
+ /// for typo correction.
+ KNOWN_NAMESPACES = 38,
+
+ /// Record code for the remapping information used to relate
+ /// loaded modules to the various offsets and IDs(e.g., source location
+ /// offests, declaration and type IDs) that are used in that module to
+ /// refer to other modules.
+ MODULE_OFFSET_MAP = 39,
+
+ /// Record code for the source manager line table information,
+ /// which stores information about \#line directives.
+ SOURCE_MANAGER_LINE_TABLE = 40,
+
+ /// Record code for map of Objective-C class definition IDs to the
+ /// ObjC categories in a module that are attached to that class.
+ OBJC_CATEGORIES_MAP = 41,
+
+ /// Record code for a file sorted array of DeclIDs in a module.
+ FILE_SORTED_DECLS = 42,
+
+ /// Record code for an array of all of the (sub)modules that were
+ /// imported by the AST file.
+ IMPORTED_MODULES = 43,
+
+ // ID 44 used to be a table of merged canonical declarations.
+ // ID 45 used to be a list of declaration IDs of local redeclarations.
+
+ /// Record code for the array of Objective-C categories (including
+ /// extensions).
+ ///
+ /// This array can only be interpreted properly using the Objective-C
+ /// categories map.
+ OBJC_CATEGORIES = 46,
+
+ /// Record code for the table of offsets of each macro ID.
+ ///
+ /// The offset table contains offsets into the blob stored in
+ /// the preprocessor block. Each offset points to the corresponding
+ /// macro definition.
+ MACRO_OFFSET = 47,
+
+ /// A list of "interesting" identifiers. Only used in C++ (where we
+ /// don't normally do lookups into the serialized identifier table). These
+ /// are eagerly deserialized.
+ INTERESTING_IDENTIFIERS = 48,
+
+ /// Record code for undefined but used functions and variables that
+ /// need a definition in this TU.
+ UNDEFINED_BUT_USED = 49,
+
+ /// Record code for late parsed template functions.
+ LATE_PARSED_TEMPLATE = 50,
+
+ /// Record code for \#pragma optimize options.
+ OPTIMIZE_PRAGMA_OPTIONS = 51,
+
+ /// Record code for potentially unused local typedef names.
+ UNUSED_LOCAL_TYPEDEF_NAME_CANDIDATES = 52,
+
+ // ID 53 used to be a table of constructor initializer records.
+
+ /// Delete expressions that will be analyzed later.
+ DELETE_EXPRS_TO_ANALYZE = 54,
+
+ /// Record code for \#pragma ms_struct options.
+ MSSTRUCT_PRAGMA_OPTIONS = 55,
+
+ /// Record code for \#pragma ms_struct options.
+ POINTERS_TO_MEMBERS_PRAGMA_OPTIONS = 56,
+
+ /// Number of unmatched #pragma clang cuda_force_host_device begin
+ /// directives we've seen.
+ CUDA_PRAGMA_FORCE_HOST_DEVICE_DEPTH = 57,
+
+ /// Record code for types associated with OpenCL extensions.
+ OPENCL_EXTENSION_TYPES = 58,
+
+ /// Record code for declarations associated with OpenCL extensions.
+ OPENCL_EXTENSION_DECLS = 59,
+
+ MODULAR_CODEGEN_DECLS = 60,
+
+ /// Record code for \#pragma align/pack options.
+ ALIGN_PACK_PRAGMA_OPTIONS = 61,
+
+ /// The stack of open #ifs/#ifdefs recorded in a preamble.
+ PP_CONDITIONAL_STACK = 62,
+
+ /// A table of skipped ranges within the preprocessing record.
+ PPD_SKIPPED_RANGES = 63,
+
+ /// Record code for the Decls to be checked for deferred diags.
+ DECLS_TO_CHECK_FOR_DEFERRED_DIAGS = 64,
+
+ /// Record code for \#pragma float_control options.
+ FLOAT_CONTROL_PRAGMA_OPTIONS = 65,
+};
- /// Describes a source location entry (SLocEntry) for a
- /// macro expansion.
- SM_SLOC_EXPANSION_ENTRY = 5
- };
+/// Record types used within a source manager block.
+enum SourceManagerRecordTypes {
+ /// Describes a source location entry (SLocEntry) for a
+ /// file.
+ SM_SLOC_FILE_ENTRY = 1,
+
+ /// Describes a source location entry (SLocEntry) for a
+ /// buffer.
+ SM_SLOC_BUFFER_ENTRY = 2,
+
+ /// Describes a blob that contains the data for a buffer
+ /// entry. This kind of record always directly follows a
+ /// SM_SLOC_BUFFER_ENTRY record or a SM_SLOC_FILE_ENTRY with an
+ /// overridden buffer.
+ SM_SLOC_BUFFER_BLOB = 3,
+
+ /// Describes a zlib-compressed blob that contains the data for
+ /// a buffer entry.
+ SM_SLOC_BUFFER_BLOB_COMPRESSED = 4,
+
+ /// Describes a source location entry (SLocEntry) for a
+ /// macro expansion.
+ SM_SLOC_EXPANSION_ENTRY = 5
+};
- /// Record types used within a preprocessor block.
- enum PreprocessorRecordTypes {
- // The macros in the PP section are a PP_MACRO_* instance followed by a
- // list of PP_TOKEN instances for each token in the definition.
+/// Record types used within a preprocessor block.
+enum PreprocessorRecordTypes {
+ // The macros in the PP section are a PP_MACRO_* instance followed by a
+ // list of PP_TOKEN instances for each token in the definition.
- /// An object-like macro definition.
- /// [PP_MACRO_OBJECT_LIKE, IdentInfoID, SLoc, IsUsed]
- PP_MACRO_OBJECT_LIKE = 1,
+ /// An object-like macro definition.
+ /// [PP_MACRO_OBJECT_LIKE, IdentInfoID, SLoc, IsUsed]
+ PP_MACRO_OBJECT_LIKE = 1,
- /// A function-like macro definition.
- /// [PP_MACRO_FUNCTION_LIKE, \<ObjectLikeStuff>, IsC99Varargs,
- /// IsGNUVarars, NumArgs, ArgIdentInfoID* ]
- PP_MACRO_FUNCTION_LIKE = 2,
+ /// A function-like macro definition.
+ /// [PP_MACRO_FUNCTION_LIKE, \<ObjectLikeStuff>, IsC99Varargs,
+ /// IsGNUVarars, NumArgs, ArgIdentInfoID* ]
+ PP_MACRO_FUNCTION_LIKE = 2,
- /// Describes one token.
- /// [PP_TOKEN, SLoc, Length, IdentInfoID, Kind, Flags]
- PP_TOKEN = 3,
+ /// Describes one token.
+ /// [PP_TOKEN, SLoc, Length, IdentInfoID, Kind, Flags]
+ PP_TOKEN = 3,
- /// The macro directives history for a particular identifier.
- PP_MACRO_DIRECTIVE_HISTORY = 4,
+ /// The macro directives history for a particular identifier.
+ PP_MACRO_DIRECTIVE_HISTORY = 4,
- /// A macro directive exported by a module.
- /// [PP_MODULE_MACRO, SubmoduleID, MacroID, (Overridden SubmoduleID)*]
- PP_MODULE_MACRO = 5,
- };
+ /// A macro directive exported by a module.
+ /// [PP_MODULE_MACRO, SubmoduleID, MacroID, (Overridden SubmoduleID)*]
+ PP_MODULE_MACRO = 5,
+};
- /// Record types used within a preprocessor detail block.
- enum PreprocessorDetailRecordTypes {
- /// Describes a macro expansion within the preprocessing record.
- PPD_MACRO_EXPANSION = 0,
+/// Record types used within a preprocessor detail block.
+enum PreprocessorDetailRecordTypes {
+ /// Describes a macro expansion within the preprocessing record.
+ PPD_MACRO_EXPANSION = 0,
- /// Describes a macro definition within the preprocessing record.
- PPD_MACRO_DEFINITION = 1,
+ /// Describes a macro definition within the preprocessing record.
+ PPD_MACRO_DEFINITION = 1,
- /// Describes an inclusion directive within the preprocessing
- /// record.
- PPD_INCLUSION_DIRECTIVE = 2
- };
+ /// Describes an inclusion directive within the preprocessing
+ /// record.
+ PPD_INCLUSION_DIRECTIVE = 2
+};
- /// Record types used within a submodule description block.
- enum SubmoduleRecordTypes {
- /// Metadata for submodules as a whole.
- SUBMODULE_METADATA = 0,
+/// Record types used within a submodule description block.
+enum SubmoduleRecordTypes {
+ /// Metadata for submodules as a whole.
+ SUBMODULE_METADATA = 0,
- /// Defines the major attributes of a submodule, including its
- /// name and parent.
- SUBMODULE_DEFINITION = 1,
+ /// Defines the major attributes of a submodule, including its
+ /// name and parent.
+ SUBMODULE_DEFINITION = 1,
- /// Specifies the umbrella header used to create this module,
- /// if any.
- SUBMODULE_UMBRELLA_HEADER = 2,
+ /// Specifies the umbrella header used to create this module,
+ /// if any.
+ SUBMODULE_UMBRELLA_HEADER = 2,
- /// Specifies a header that falls into this (sub)module.
- SUBMODULE_HEADER = 3,
+ /// Specifies a header that falls into this (sub)module.
+ SUBMODULE_HEADER = 3,
- /// Specifies a top-level header that falls into this (sub)module.
- SUBMODULE_TOPHEADER = 4,
+ /// Specifies a top-level header that falls into this (sub)module.
+ SUBMODULE_TOPHEADER = 4,
- /// Specifies an umbrella directory.
- SUBMODULE_UMBRELLA_DIR = 5,
+ /// Specifies an umbrella directory.
+ SUBMODULE_UMBRELLA_DIR = 5,
- /// Specifies the submodules that are imported by this
- /// submodule.
- SUBMODULE_IMPORTS = 6,
+ /// Specifies the submodules that are imported by this
+ /// submodule.
+ SUBMODULE_IMPORTS = 6,
- /// Specifies the submodules that are re-exported from this
- /// submodule.
- SUBMODULE_EXPORTS = 7,
+ /// Specifies the submodules that are re-exported from this
+ /// submodule.
+ SUBMODULE_EXPORTS = 7,
- /// Specifies a required feature.
- SUBMODULE_REQUIRES = 8,
+ /// Specifies a required feature.
+ SUBMODULE_REQUIRES = 8,
- /// Specifies a header that has been explicitly excluded
- /// from this submodule.
- SUBMODULE_EXCLUDED_HEADER = 9,
+ /// Specifies a header that has been explicitly excluded
+ /// from this submodule.
+ SUBMODULE_EXCLUDED_HEADER = 9,
- /// Specifies a library or framework to link against.
- SUBMODULE_LINK_LIBRARY = 10,
+ /// Specifies a library or framework to link against.
+ SUBMODULE_LINK_LIBRARY = 10,
- /// Specifies a configuration macro for this module.
- SUBMODULE_CONFIG_MACRO = 11,
+ /// Specifies a configuration macro for this module.
+ SUBMODULE_CONFIG_MACRO = 11,
- /// Specifies a conflict with another module.
- SUBMODULE_CONFLICT = 12,
+ /// Specifies a conflict with another module.
+ SUBMODULE_CONFLICT = 12,
- /// Specifies a header that is private to this submodule.
- SUBMODULE_PRIVATE_HEADER = 13,
+ /// Specifies a header that is private to this submodule.
+ SUBMODULE_PRIVATE_HEADER = 13,
- /// Specifies a header that is part of the module but must be
- /// textually included.
- SUBMODULE_TEXTUAL_HEADER = 14,
+ /// Specifies a header that is part of the module but must be
+ /// textually included.
+ SUBMODULE_TEXTUAL_HEADER = 14,
- /// Specifies a header that is private to this submodule but
- /// must be textually included.
- SUBMODULE_PRIVATE_TEXTUAL_HEADER = 15,
+ /// Specifies a header that is private to this submodule but
+ /// must be textually included.
+ SUBMODULE_PRIVATE_TEXTUAL_HEADER = 15,
- /// Specifies some declarations with initializers that must be
- /// emitted to initialize the module.
- SUBMODULE_INITIALIZERS = 16,
+ /// Specifies some declarations with initializers that must be
+ /// emitted to initialize the module.
+ SUBMODULE_INITIALIZERS = 16,
- /// Specifies the name of the module that will eventually
- /// re-export the entities in this module.
- SUBMODULE_EXPORT_AS = 17,
- };
+ /// Specifies the name of the module that will eventually
+ /// re-export the entities in this module.
+ SUBMODULE_EXPORT_AS = 17,
+};
- /// Record types used within a comments block.
- enum CommentRecordTypes {
- COMMENTS_RAW_COMMENT = 0
- };
+/// Record types used within a comments block.
+enum CommentRecordTypes { COMMENTS_RAW_COMMENT = 0 };
- /// \defgroup ASTAST AST file AST constants
- ///
- /// The constants in this group describe various components of the
- /// abstract syntax tree within an AST file.
- ///
- /// @{
+/// \defgroup ASTAST AST file AST constants
+///
+/// The constants in this group describe various components of the
+/// abstract syntax tree within an AST file.
+///
+/// @{
- /// Predefined type IDs.
- ///
- /// These type IDs correspond to predefined types in the AST
- /// context, such as built-in types (int) and special place-holder
- /// types (the \<overload> and \<dependent> type markers). Such
- /// types are never actually serialized, since they will be built
- /// by the AST context when it is created.
- enum PredefinedTypeIDs {
- /// The NULL type.
- PREDEF_TYPE_NULL_ID = 0,
+/// Predefined type IDs.
+///
+/// These type IDs correspond to predefined types in the AST
+/// context, such as built-in types (int) and special place-holder
+/// types (the \<overload> and \<dependent> type markers). Such
+/// types are never actually serialized, since they will be built
+/// by the AST context when it is created.
+enum PredefinedTypeIDs {
+ /// The NULL type.
+ PREDEF_TYPE_NULL_ID = 0,
- /// The void type.
- PREDEF_TYPE_VOID_ID = 1,
+ /// The void type.
+ PREDEF_TYPE_VOID_ID = 1,
- /// The 'bool' or '_Bool' type.
- PREDEF_TYPE_BOOL_ID = 2,
+ /// The 'bool' or '_Bool' type.
+ PREDEF_TYPE_BOOL_ID = 2,
- /// The 'char' type, when it is unsigned.
- PREDEF_TYPE_CHAR_U_ID = 3,
+ /// The 'char' type, when it is unsigned.
+ PREDEF_TYPE_CHAR_U_ID = 3,
- /// The 'unsigned char' type.
- PREDEF_TYPE_UCHAR_ID = 4,
+ /// The 'unsigned char' type.
+ PREDEF_TYPE_UCHAR_ID = 4,
- /// The 'unsigned short' type.
- PREDEF_TYPE_USHORT_ID = 5,
+ /// The 'unsigned short' type.
+ PREDEF_TYPE_USHORT_ID = 5,
- /// The 'unsigned int' type.
- PREDEF_TYPE_UINT_ID = 6,
+ /// The 'unsigned int' type.
+ PREDEF_TYPE_UINT_ID = 6,
- /// The 'unsigned long' type.
- PREDEF_TYPE_ULONG_ID = 7,
+ /// The 'unsigned long' type.
+ PREDEF_TYPE_ULONG_ID = 7,
- /// The 'unsigned long long' type.
- PREDEF_TYPE_ULONGLONG_ID = 8,
+ /// The 'unsigned long long' type.
+ PREDEF_TYPE_ULONGLONG_ID = 8,
- /// The 'char' type, when it is signed.
- PREDEF_TYPE_CHAR_S_ID = 9,
+ /// The 'char' type, when it is signed.
+ PREDEF_TYPE_CHAR_S_ID = 9,
- /// The 'signed char' type.
- PREDEF_TYPE_SCHAR_ID = 10,
+ /// The 'signed char' type.
+ PREDEF_TYPE_SCHAR_ID = 10,
- /// The C++ 'wchar_t' type.
- PREDEF_TYPE_WCHAR_ID = 11,
+ /// The C++ 'wchar_t' type.
+ PREDEF_TYPE_WCHAR_ID = 11,
- /// The (signed) 'short' type.
- PREDEF_TYPE_SHORT_ID = 12,
+ /// The (signed) 'short' type.
+ PREDEF_TYPE_SHORT_ID = 12,
- /// The (signed) 'int' type.
- PREDEF_TYPE_INT_ID = 13,
+ /// The (signed) 'int' type.
+ PREDEF_TYPE_INT_ID = 13,
- /// The (signed) 'long' type.
- PREDEF_TYPE_LONG_ID = 14,
+ /// The (signed) 'long' type.
+ PREDEF_TYPE_LONG_ID = 14,
- /// The (signed) 'long long' type.
- PREDEF_TYPE_LONGLONG_ID = 15,
+ /// The (signed) 'long long' type.
+ PREDEF_TYPE_LONGLONG_ID = 15,
- /// The 'float' type.
- PREDEF_TYPE_FLOAT_ID = 16,
+ /// The 'float' type.
+ PREDEF_TYPE_FLOAT_ID = 16,
- /// The 'double' type.
- PREDEF_TYPE_DOUBLE_ID = 17,
+ /// The 'double' type.
+ PREDEF_TYPE_DOUBLE_ID = 17,
- /// The 'long double' type.
- PREDEF_TYPE_LONGDOUBLE_ID = 18,
+ /// The 'long double' type.
+ PREDEF_TYPE_LONGDOUBLE_ID = 18,
- /// The placeholder type for overloaded function sets.
- PREDEF_TYPE_OVERLOAD_ID = 19,
+ /// The placeholder type for overloaded function sets.
+ PREDEF_TYPE_OVERLOAD_ID = 19,
- /// The placeholder type for dependent types.
- PREDEF_TYPE_DEPENDENT_ID = 20,
+ /// The placeholder type for dependent types.
+ PREDEF_TYPE_DEPENDENT_ID = 20,
- /// The '__uint128_t' type.
- PREDEF_TYPE_UINT128_ID = 21,
+ /// The '__uint128_t' type.
+ PREDEF_TYPE_UINT128_ID = 21,
- /// The '__int128_t' type.
- PREDEF_TYPE_INT128_ID = 22,
+ /// The '__int128_t' type.
+ PREDEF_TYPE_INT128_ID = 22,
- /// The type of 'nullptr'.
- PREDEF_TYPE_NULLPTR_ID = 23,
+ /// The type of 'nullptr'.
+ PREDEF_TYPE_NULLPTR_ID = 23,
- /// The C++ 'char16_t' type.
- PREDEF_TYPE_CHAR16_ID = 24,
+ /// The C++ 'char16_t' type.
+ PREDEF_TYPE_CHAR16_ID = 24,
- /// The C++ 'char32_t' type.
- PREDEF_TYPE_CHAR32_ID = 25,
+ /// The C++ 'char32_t' type.
+ PREDEF_TYPE_CHAR32_ID = 25,
- /// The ObjC 'id' type.
- PREDEF_TYPE_OBJC_ID = 26,
+ /// The ObjC 'id' type.
+ PREDEF_TYPE_OBJC_ID = 26,
- /// The ObjC 'Class' type.
- PREDEF_TYPE_OBJC_CLASS = 27,
+ /// The ObjC 'Class' type.
+ PREDEF_TYPE_OBJC_CLASS = 27,
- /// The ObjC 'SEL' type.
- PREDEF_TYPE_OBJC_SEL = 28,
+ /// The ObjC 'SEL' type.
+ PREDEF_TYPE_OBJC_SEL = 28,
- /// The 'unknown any' placeholder type.
- PREDEF_TYPE_UNKNOWN_ANY = 29,
+ /// The 'unknown any' placeholder type.
+ PREDEF_TYPE_UNKNOWN_ANY = 29,
- /// The placeholder type for bound member functions.
- PREDEF_TYPE_BOUND_MEMBER = 30,
+ /// The placeholder type for bound member functions.
+ PREDEF_TYPE_BOUND_MEMBER = 30,
- /// The "auto" deduction type.
- PREDEF_TYPE_AUTO_DEDUCT = 31,
+ /// The "auto" deduction type.
+ PREDEF_TYPE_AUTO_DEDUCT = 31,
- /// The "auto &&" deduction type.
- PREDEF_TYPE_AUTO_RREF_DEDUCT = 32,
+ /// The "auto &&" deduction type.
+ PREDEF_TYPE_AUTO_RREF_DEDUCT = 32,
- /// The OpenCL 'half' / ARM NEON __fp16 type.
- PREDEF_TYPE_HALF_ID = 33,
+ /// The OpenCL 'half' / ARM NEON __fp16 type.
+ PREDEF_TYPE_HALF_ID = 33,
- /// ARC's unbridged-cast placeholder type.
- PREDEF_TYPE_ARC_UNBRIDGED_CAST = 34,
+ /// ARC's unbridged-cast placeholder type.
+ PREDEF_TYPE_ARC_UNBRIDGED_CAST = 34,
- /// The pseudo-object placeholder type.
- PREDEF_TYPE_PSEUDO_OBJECT = 35,
+ /// The pseudo-object placeholder type.
+ PREDEF_TYPE_PSEUDO_OBJECT = 35,
- /// The placeholder type for builtin functions.
- PREDEF_TYPE_BUILTIN_FN = 36,
+ /// The placeholder type for builtin functions.
+ PREDEF_TYPE_BUILTIN_FN = 36,
- /// OpenCL event type.
- PREDEF_TYPE_EVENT_ID = 37,
+ /// OpenCL event type.
+ PREDEF_TYPE_EVENT_ID = 37,
- /// OpenCL clk event type.
- PREDEF_TYPE_CLK_EVENT_ID = 38,
+ /// OpenCL clk event type.
+ PREDEF_TYPE_CLK_EVENT_ID = 38,
- /// OpenCL sampler type.
- PREDEF_TYPE_SAMPLER_ID = 39,
+ /// OpenCL sampler type.
+ PREDEF_TYPE_SAMPLER_ID = 39,
- /// OpenCL queue type.
- PREDEF_TYPE_QUEUE_ID = 40,
+ /// OpenCL queue type.
+ PREDEF_TYPE_QUEUE_ID = 40,
- /// OpenCL reserve_id type.
- PREDEF_TYPE_RESERVE_ID_ID = 41,
+ /// OpenCL reserve_id type.
+ PREDEF_TYPE_RESERVE_ID_ID = 41,
- /// The placeholder type for OpenMP array section.
- PREDEF_TYPE_OMP_ARRAY_SECTION = 42,
+ /// The placeholder type for OpenMP array section.
+ PREDEF_TYPE_OMP_ARRAY_SECTION = 42,
- /// The '__float128' type
- PREDEF_TYPE_FLOAT128_ID = 43,
+ /// The '__float128' type
+ PREDEF_TYPE_FLOAT128_ID = 43,
- /// The '_Float16' type
- PREDEF_TYPE_FLOAT16_ID = 44,
+ /// The '_Float16' type
+ PREDEF_TYPE_FLOAT16_ID = 44,
- /// The C++ 'char8_t' type.
- PREDEF_TYPE_CHAR8_ID = 45,
+ /// The C++ 'char8_t' type.
+ PREDEF_TYPE_CHAR8_ID = 45,
- /// \brief The 'short _Accum' type
- PREDEF_TYPE_SHORT_ACCUM_ID = 46,
+ /// \brief The 'short _Accum' type
+ PREDEF_TYPE_SHORT_ACCUM_ID = 46,
- /// \brief The '_Accum' type
- PREDEF_TYPE_ACCUM_ID = 47,
+ /// \brief The '_Accum' type
+ PREDEF_TYPE_ACCUM_ID = 47,
- /// \brief The 'long _Accum' type
- PREDEF_TYPE_LONG_ACCUM_ID = 48,
+ /// \brief The 'long _Accum' type
+ PREDEF_TYPE_LONG_ACCUM_ID = 48,
- /// \brief The 'unsigned short _Accum' type
- PREDEF_TYPE_USHORT_ACCUM_ID = 49,
+ /// \brief The 'unsigned short _Accum' type
+ PREDEF_TYPE_USHORT_ACCUM_ID = 49,
- /// \brief The 'unsigned _Accum' type
- PREDEF_TYPE_UACCUM_ID = 50,
+ /// \brief The 'unsigned _Accum' type
+ PREDEF_TYPE_UACCUM_ID = 50,
- /// \brief The 'unsigned long _Accum' type
- PREDEF_TYPE_ULONG_ACCUM_ID = 51,
+ /// \brief The 'unsigned long _Accum' type
+ PREDEF_TYPE_ULONG_ACCUM_ID = 51,
- /// \brief The 'short _Fract' type
- PREDEF_TYPE_SHORT_FRACT_ID = 52,
+ /// \brief The 'short _Fract' type
+ PREDEF_TYPE_SHORT_FRACT_ID = 52,
- /// \brief The '_Fract' type
- PREDEF_TYPE_FRACT_ID = 53,
+ /// \brief The '_Fract' type
+ PREDEF_TYPE_FRACT_ID = 53,
- /// \brief The 'long _Fract' type
- PREDEF_TYPE_LONG_FRACT_ID = 54,
+ /// \brief The 'long _Fract' type
+ PREDEF_TYPE_LONG_FRACT_ID = 54,
- /// \brief The 'unsigned short _Fract' type
- PREDEF_TYPE_USHORT_FRACT_ID = 55,
+ /// \brief The 'unsigned short _Fract' type
+ PREDEF_TYPE_USHORT_FRACT_ID = 55,
- /// \brief The 'unsigned _Fract' type
- PREDEF_TYPE_UFRACT_ID = 56,
+ /// \brief The 'unsigned _Fract' type
+ PREDEF_TYPE_UFRACT_ID = 56,
- /// \brief The 'unsigned long _Fract' type
- PREDEF_TYPE_ULONG_FRACT_ID = 57,
+ /// \brief The 'unsigned long _Fract' type
+ PREDEF_TYPE_ULONG_FRACT_ID = 57,
- /// \brief The '_Sat short _Accum' type
- PREDEF_TYPE_SAT_SHORT_ACCUM_ID = 58,
+ /// \brief The '_Sat short _Accum' type
+ PREDEF_TYPE_SAT_SHORT_ACCUM_ID = 58,
- /// \brief The '_Sat _Accum' type
- PREDEF_TYPE_SAT_ACCUM_ID = 59,
+ /// \brief The '_Sat _Accum' type
+ PREDEF_TYPE_SAT_ACCUM_ID = 59,
- /// \brief The '_Sat long _Accum' type
- PREDEF_TYPE_SAT_LONG_ACCUM_ID = 60,
+ /// \brief The '_Sat long _Accum' type
+ PREDEF_TYPE_SAT_LONG_ACCUM_ID = 60,
- /// \brief The '_Sat unsigned short _Accum' type
- PREDEF_TYPE_SAT_USHORT_ACCUM_ID = 61,
+ /// \brief The '_Sat unsigned short _Accum' type
+ PREDEF_TYPE_SAT_USHORT_ACCUM_ID = 61,
- /// \brief The '_Sat unsigned _Accum' type
- PREDEF_TYPE_SAT_UACCUM_ID = 62,
+ /// \brief The '_Sat unsigned _Accum' type
+ PREDEF_TYPE_SAT_UACCUM_ID = 62,
- /// \brief The '_Sat unsigned long _Accum' type
- PREDEF_TYPE_SAT_ULONG_ACCUM_ID = 63,
+ /// \brief The '_Sat unsigned long _Accum' type
+ PREDEF_TYPE_SAT_ULONG_ACCUM_ID = 63,
- /// \brief The '_Sat short _Fract' type
- PREDEF_TYPE_SAT_SHORT_FRACT_ID = 64,
+ /// \brief The '_Sat short _Fract' type
+ PREDEF_TYPE_SAT_SHORT_FRACT_ID = 64,
- /// \brief The '_Sat _Fract' type
- PREDEF_TYPE_SAT_FRACT_ID = 65,
+ /// \brief The '_Sat _Fract' type
+ PREDEF_TYPE_SAT_FRACT_ID = 65,
- /// \brief The '_Sat long _Fract' type
- PREDEF_TYPE_SAT_LONG_FRACT_ID = 66,
+ /// \brief The '_Sat long _Fract' type
+ PREDEF_TYPE_SAT_LONG_FRACT_ID = 66,
- /// \brief The '_Sat unsigned short _Fract' type
- PREDEF_TYPE_SAT_USHORT_FRACT_ID = 67,
+ /// \brief The '_Sat unsigned short _Fract' type
+ PREDEF_TYPE_SAT_USHORT_FRACT_ID = 67,
- /// \brief The '_Sat unsigned _Fract' type
- PREDEF_TYPE_SAT_UFRACT_ID = 68,
+ /// \brief The '_Sat unsigned _Fract' type
+ PREDEF_TYPE_SAT_UFRACT_ID = 68,
- /// \brief The '_Sat unsigned long _Fract' type
- PREDEF_TYPE_SAT_ULONG_FRACT_ID = 69,
+ /// \brief The '_Sat unsigned long _Fract' type
+ PREDEF_TYPE_SAT_ULONG_FRACT_ID = 69,
- /// The placeholder type for OpenMP array shaping operation.
- PREDEF_TYPE_OMP_ARRAY_SHAPING = 70,
+ /// The placeholder type for OpenMP array shaping operation.
+ PREDEF_TYPE_OMP_ARRAY_SHAPING = 70,
- /// The placeholder type for OpenMP iterator expression.
- PREDEF_TYPE_OMP_ITERATOR = 71,
+ /// The placeholder type for OpenMP iterator expression.
+ PREDEF_TYPE_OMP_ITERATOR = 71,
- /// A placeholder type for incomplete matrix index operations.
- PREDEF_TYPE_INCOMPLETE_MATRIX_IDX = 72,
+ /// A placeholder type for incomplete matrix index operations.
+ PREDEF_TYPE_INCOMPLETE_MATRIX_IDX = 72,
- /// \brief The '__bf16' type
- PREDEF_TYPE_BFLOAT16_ID = 73,
+ /// \brief The '__bf16' type
+ PREDEF_TYPE_BFLOAT16_ID = 73,
- /// OpenCL image types with auto numeration
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- PREDEF_TYPE_##Id##_ID,
+/// OpenCL image types with auto numeration
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/OpenCLImageTypes.def"
- /// \brief OpenCL extension types with auto numeration
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- PREDEF_TYPE_##Id##_ID,
+/// \brief OpenCL extension types with auto numeration
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/OpenCLExtensionTypes.def"
- // \brief SVE types with auto numeration
+// \brief SVE types with auto numeration
#define SVE_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/AArch64SVEACLETypes.def"
- // \brief PowerPC MMA types with auto numeration
+// \brief PowerPC MMA types with auto numeration
#define PPC_VECTOR_TYPE(Name, Id, Size) PREDEF_TYPE_##Id##_ID,
#include "clang/Basic/PPCTypes.def"
- };
-
- /// The number of predefined type IDs that are reserved for
- /// the PREDEF_TYPE_* constants.
- ///
- /// Type IDs for non-predefined types will start at
- /// NUM_PREDEF_TYPE_IDs.
- const unsigned NUM_PREDEF_TYPE_IDS = 200;
-
- /// Record codes for each kind of type.
- ///
- /// These constants describe the type records that can occur within a
- /// block identified by DECLTYPES_BLOCK_ID in the AST file. Each
- /// constant describes a record for a specific type class in the
- /// AST. Note that DeclCode values share this code space.
- enum TypeCode {
-#define TYPE_BIT_CODE(CLASS_ID, CODE_ID, CODE_VALUE) \
- TYPE_##CODE_ID = CODE_VALUE,
+// \brief RISC-V V types with auto numeration
+#define RVV_TYPE(Name, Id, SingletonId) PREDEF_TYPE_##Id##_ID,
+#include "clang/Basic/RISCVVTypes.def"
+};
+
+/// The number of predefined type IDs that are reserved for
+/// the PREDEF_TYPE_* constants.
+///
+/// Type IDs for non-predefined types will start at
+/// NUM_PREDEF_TYPE_IDs.
+const unsigned NUM_PREDEF_TYPE_IDS = 300;
+
+/// Record codes for each kind of type.
+///
+/// These constants describe the type records that can occur within a
+/// block identified by DECLTYPES_BLOCK_ID in the AST file. Each
+/// constant describes a record for a specific type class in the
+/// AST. Note that DeclCode values share this code space.
+enum TypeCode {
+#define TYPE_BIT_CODE(CLASS_ID, CODE_ID, CODE_VALUE) \
+ TYPE_##CODE_ID = CODE_VALUE,
#include "clang/Serialization/TypeBitCodes.def"
- /// An ExtQualType record.
- TYPE_EXT_QUAL = 1
- };
+ /// An ExtQualType record.
+ TYPE_EXT_QUAL = 1
+};
+
+/// The type IDs for special types constructed by semantic
+/// analysis.
+///
+/// The constants in this enumeration are indices into the
+/// SPECIAL_TYPES record.
+enum SpecialTypeIDs {
+ /// CFConstantString type
+ SPECIAL_TYPE_CF_CONSTANT_STRING = 0,
+
+ /// C FILE typedef type
+ SPECIAL_TYPE_FILE = 1,
+
+ /// C jmp_buf typedef type
+ SPECIAL_TYPE_JMP_BUF = 2,
+
+ /// C sigjmp_buf typedef type
+ SPECIAL_TYPE_SIGJMP_BUF = 3,
+
+ /// Objective-C "id" redefinition type
+ SPECIAL_TYPE_OBJC_ID_REDEFINITION = 4,
+
+ /// Objective-C "Class" redefinition type
+ SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 5,
- /// The type IDs for special types constructed by semantic
- /// analysis.
- ///
- /// The constants in this enumeration are indices into the
- /// SPECIAL_TYPES record.
- enum SpecialTypeIDs {
- /// CFConstantString type
- SPECIAL_TYPE_CF_CONSTANT_STRING = 0,
+ /// Objective-C "SEL" redefinition type
+ SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 6,
- /// C FILE typedef type
- SPECIAL_TYPE_FILE = 1,
+ /// C ucontext_t typedef type
+ SPECIAL_TYPE_UCONTEXT_T = 7
+};
+
+/// The number of special type IDs.
+const unsigned NumSpecialTypeIDs = 8;
+
+/// Predefined declaration IDs.
+///
+/// These declaration IDs correspond to predefined declarations in the AST
+/// context, such as the NULL declaration ID. Such declarations are never
+/// actually serialized, since they will be built by the AST context when
+/// it is created.
+enum PredefinedDeclIDs {
+ /// The NULL declaration.
+ PREDEF_DECL_NULL_ID = 0,
+
+ /// The translation unit.
+ PREDEF_DECL_TRANSLATION_UNIT_ID = 1,
- /// C jmp_buf typedef type
- SPECIAL_TYPE_JMP_BUF = 2,
+ /// The Objective-C 'id' type.
+ PREDEF_DECL_OBJC_ID_ID = 2,
- /// C sigjmp_buf typedef type
- SPECIAL_TYPE_SIGJMP_BUF = 3,
+ /// The Objective-C 'SEL' type.
+ PREDEF_DECL_OBJC_SEL_ID = 3,
- /// Objective-C "id" redefinition type
- SPECIAL_TYPE_OBJC_ID_REDEFINITION = 4,
+ /// The Objective-C 'Class' type.
+ PREDEF_DECL_OBJC_CLASS_ID = 4,
- /// Objective-C "Class" redefinition type
- SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 5,
+ /// The Objective-C 'Protocol' type.
+ PREDEF_DECL_OBJC_PROTOCOL_ID = 5,
- /// Objective-C "SEL" redefinition type
- SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 6,
+ /// The signed 128-bit integer type.
+ PREDEF_DECL_INT_128_ID = 6,
- /// C ucontext_t typedef type
- SPECIAL_TYPE_UCONTEXT_T = 7
- };
+ /// The unsigned 128-bit integer type.
+ PREDEF_DECL_UNSIGNED_INT_128_ID = 7,
- /// The number of special type IDs.
- const unsigned NumSpecialTypeIDs = 8;
+ /// The internal 'instancetype' typedef.
+ PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8,
- /// Predefined declaration IDs.
- ///
- /// These declaration IDs correspond to predefined declarations in the AST
- /// context, such as the NULL declaration ID. Such declarations are never
- /// actually serialized, since they will be built by the AST context when
- /// it is created.
- enum PredefinedDeclIDs {
- /// The NULL declaration.
- PREDEF_DECL_NULL_ID = 0,
+ /// The internal '__builtin_va_list' typedef.
+ PREDEF_DECL_BUILTIN_VA_LIST_ID = 9,
- /// The translation unit.
- PREDEF_DECL_TRANSLATION_UNIT_ID = 1,
+ /// The internal '__va_list_tag' struct, if any.
+ PREDEF_DECL_VA_LIST_TAG = 10,
- /// The Objective-C 'id' type.
- PREDEF_DECL_OBJC_ID_ID = 2,
+ /// The internal '__builtin_ms_va_list' typedef.
+ PREDEF_DECL_BUILTIN_MS_VA_LIST_ID = 11,
- /// The Objective-C 'SEL' type.
- PREDEF_DECL_OBJC_SEL_ID = 3,
+ /// The predeclared '_GUID' struct.
+ PREDEF_DECL_BUILTIN_MS_GUID_ID = 12,
- /// The Objective-C 'Class' type.
- PREDEF_DECL_OBJC_CLASS_ID = 4,
+ /// The extern "C" context.
+ PREDEF_DECL_EXTERN_C_CONTEXT_ID = 13,
+
+ /// The internal '__make_integer_seq' template.
+ PREDEF_DECL_MAKE_INTEGER_SEQ_ID = 14,
+
+ /// The internal '__NSConstantString' typedef.
+ PREDEF_DECL_CF_CONSTANT_STRING_ID = 15,
+
+ /// The internal '__NSConstantString' tag type.
+ PREDEF_DECL_CF_CONSTANT_STRING_TAG_ID = 16,
+
+ /// The internal '__type_pack_element' template.
+ PREDEF_DECL_TYPE_PACK_ELEMENT_ID = 17,
+};
- /// The Objective-C 'Protocol' type.
- PREDEF_DECL_OBJC_PROTOCOL_ID = 5,
+/// The number of declaration IDs that are predefined.
+///
+/// For more information about predefined declarations, see the
+/// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
+const unsigned int NUM_PREDEF_DECL_IDS = 18;
- /// The signed 128-bit integer type.
- PREDEF_DECL_INT_128_ID = 6,
+/// Record of updates for a declaration that was modified after
+/// being deserialized. This can occur within DECLTYPES_BLOCK_ID.
+const unsigned int DECL_UPDATES = 49;
- /// The unsigned 128-bit integer type.
- PREDEF_DECL_UNSIGNED_INT_128_ID = 7,
+/// Record code for a list of local redeclarations of a declaration.
+/// This can occur within DECLTYPES_BLOCK_ID.
+const unsigned int LOCAL_REDECLARATIONS = 50;
- /// The internal 'instancetype' typedef.
- PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8,
+/// Record codes for each kind of declaration.
+///
+/// These constants describe the declaration records that can occur within
+/// a declarations block (identified by DECLTYPES_BLOCK_ID). Each
+/// constant describes a record for a specific declaration class
+/// in the AST. Note that TypeCode values share this code space.
+enum DeclCode {
+ /// A TypedefDecl record.
+ DECL_TYPEDEF = 51,
+ /// A TypeAliasDecl record.
- /// The internal '__builtin_va_list' typedef.
- PREDEF_DECL_BUILTIN_VA_LIST_ID = 9,
+ DECL_TYPEALIAS,
- /// The internal '__va_list_tag' struct, if any.
- PREDEF_DECL_VA_LIST_TAG = 10,
+ /// An EnumDecl record.
+ DECL_ENUM,
- /// The internal '__builtin_ms_va_list' typedef.
- PREDEF_DECL_BUILTIN_MS_VA_LIST_ID = 11,
+ /// A RecordDecl record.
+ DECL_RECORD,
- /// The predeclared '_GUID' struct.
- PREDEF_DECL_BUILTIN_MS_GUID_ID = 12,
+ /// An EnumConstantDecl record.
+ DECL_ENUM_CONSTANT,
- /// The extern "C" context.
- PREDEF_DECL_EXTERN_C_CONTEXT_ID = 13,
+ /// A FunctionDecl record.
+ DECL_FUNCTION,
- /// The internal '__make_integer_seq' template.
- PREDEF_DECL_MAKE_INTEGER_SEQ_ID = 14,
+ /// A ObjCMethodDecl record.
+ DECL_OBJC_METHOD,
- /// The internal '__NSConstantString' typedef.
- PREDEF_DECL_CF_CONSTANT_STRING_ID = 15,
+ /// A ObjCInterfaceDecl record.
+ DECL_OBJC_INTERFACE,
- /// The internal '__NSConstantString' tag type.
- PREDEF_DECL_CF_CONSTANT_STRING_TAG_ID = 16,
+ /// A ObjCProtocolDecl record.
+ DECL_OBJC_PROTOCOL,
- /// The internal '__type_pack_element' template.
- PREDEF_DECL_TYPE_PACK_ELEMENT_ID = 17,
- };
+ /// A ObjCIvarDecl record.
+ DECL_OBJC_IVAR,
- /// The number of declaration IDs that are predefined.
- ///
- /// For more information about predefined declarations, see the
- /// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
- const unsigned int NUM_PREDEF_DECL_IDS = 18;
+ /// A ObjCAtDefsFieldDecl record.
+ DECL_OBJC_AT_DEFS_FIELD,
- /// Record of updates for a declaration that was modified after
- /// being deserialized. This can occur within DECLTYPES_BLOCK_ID.
- const unsigned int DECL_UPDATES = 49;
+ /// A ObjCCategoryDecl record.
+ DECL_OBJC_CATEGORY,
- /// Record code for a list of local redeclarations of a declaration.
- /// This can occur within DECLTYPES_BLOCK_ID.
- const unsigned int LOCAL_REDECLARATIONS = 50;
+ /// A ObjCCategoryImplDecl record.
+ DECL_OBJC_CATEGORY_IMPL,
- /// Record codes for each kind of declaration.
- ///
- /// These constants describe the declaration records that can occur within
- /// a declarations block (identified by DECLTYPES_BLOCK_ID). Each
- /// constant describes a record for a specific declaration class
- /// in the AST. Note that TypeCode values share this code space.
- enum DeclCode {
- /// A TypedefDecl record.
- DECL_TYPEDEF = 51,
- /// A TypeAliasDecl record.
+ /// A ObjCImplementationDecl record.
+ DECL_OBJC_IMPLEMENTATION,
- DECL_TYPEALIAS,
+ /// A ObjCCompatibleAliasDecl record.
+ DECL_OBJC_COMPATIBLE_ALIAS,
- /// An EnumDecl record.
- DECL_ENUM,
+ /// A ObjCPropertyDecl record.
+ DECL_OBJC_PROPERTY,
- /// A RecordDecl record.
- DECL_RECORD,
+ /// A ObjCPropertyImplDecl record.
+ DECL_OBJC_PROPERTY_IMPL,
- /// An EnumConstantDecl record.
- DECL_ENUM_CONSTANT,
+ /// A FieldDecl record.
+ DECL_FIELD,
- /// A FunctionDecl record.
- DECL_FUNCTION,
+ /// A MSPropertyDecl record.
+ DECL_MS_PROPERTY,
- /// A ObjCMethodDecl record.
- DECL_OBJC_METHOD,
+ /// A MSGuidDecl record.
+ DECL_MS_GUID,
- /// A ObjCInterfaceDecl record.
- DECL_OBJC_INTERFACE,
+ /// A TemplateParamObjectDecl record.
+ DECL_TEMPLATE_PARAM_OBJECT,
- /// A ObjCProtocolDecl record.
- DECL_OBJC_PROTOCOL,
+ /// A VarDecl record.
+ DECL_VAR,
- /// A ObjCIvarDecl record.
- DECL_OBJC_IVAR,
+ /// An ImplicitParamDecl record.
+ DECL_IMPLICIT_PARAM,
- /// A ObjCAtDefsFieldDecl record.
- DECL_OBJC_AT_DEFS_FIELD,
+ /// A ParmVarDecl record.
+ DECL_PARM_VAR,
- /// A ObjCCategoryDecl record.
- DECL_OBJC_CATEGORY,
+ /// A DecompositionDecl record.
+ DECL_DECOMPOSITION,
- /// A ObjCCategoryImplDecl record.
- DECL_OBJC_CATEGORY_IMPL,
+ /// A BindingDecl record.
+ DECL_BINDING,
- /// A ObjCImplementationDecl record.
- DECL_OBJC_IMPLEMENTATION,
+ /// A FileScopeAsmDecl record.
+ DECL_FILE_SCOPE_ASM,
- /// A ObjCCompatibleAliasDecl record.
- DECL_OBJC_COMPATIBLE_ALIAS,
+ /// A BlockDecl record.
+ DECL_BLOCK,
- /// A ObjCPropertyDecl record.
- DECL_OBJC_PROPERTY,
+ /// A CapturedDecl record.
+ DECL_CAPTURED,
- /// A ObjCPropertyImplDecl record.
- DECL_OBJC_PROPERTY_IMPL,
+ /// A record that stores the set of declarations that are
+ /// lexically stored within a given DeclContext.
+ ///
+ /// The record itself is a blob that is an array of declaration IDs,
+ /// in the order in which those declarations were added to the
+ /// declaration context. This data is used when iterating over
+ /// the contents of a DeclContext, e.g., via
+ /// DeclContext::decls_begin() and DeclContext::decls_end().
+ DECL_CONTEXT_LEXICAL,
- /// A FieldDecl record.
- DECL_FIELD,
+ /// A record that stores the set of declarations that are
+ /// visible from a given DeclContext.
+ ///
+ /// The record itself stores a set of mappings, each of which
+ /// associates a declaration name with one or more declaration
+ /// IDs. This data is used when performing qualified name lookup
+ /// into a DeclContext via DeclContext::lookup.
+ DECL_CONTEXT_VISIBLE,
- /// A MSPropertyDecl record.
- DECL_MS_PROPERTY,
+ /// A LabelDecl record.
+ DECL_LABEL,
- /// A MSGuidDecl record.
- DECL_MS_GUID,
+ /// A NamespaceDecl record.
+ DECL_NAMESPACE,
- /// A TemplateParamObjectDecl record.
- DECL_TEMPLATE_PARAM_OBJECT,
+ /// A NamespaceAliasDecl record.
+ DECL_NAMESPACE_ALIAS,
- /// A VarDecl record.
- DECL_VAR,
+ /// A UsingDecl record.
+ DECL_USING,
- /// An ImplicitParamDecl record.
- DECL_IMPLICIT_PARAM,
+ /// A UsingEnumDecl record.
+ DECL_USING_ENUM,
- /// A ParmVarDecl record.
- DECL_PARM_VAR,
+ /// A UsingPackDecl record.
+ DECL_USING_PACK,
- /// A DecompositionDecl record.
- DECL_DECOMPOSITION,
+ /// A UsingShadowDecl record.
+ DECL_USING_SHADOW,
- /// A BindingDecl record.
- DECL_BINDING,
+ /// A ConstructorUsingShadowDecl record.
+ DECL_CONSTRUCTOR_USING_SHADOW,
- /// A FileScopeAsmDecl record.
- DECL_FILE_SCOPE_ASM,
+ /// A UsingDirecitveDecl record.
+ DECL_USING_DIRECTIVE,
- /// A BlockDecl record.
- DECL_BLOCK,
+ /// An UnresolvedUsingValueDecl record.
+ DECL_UNRESOLVED_USING_VALUE,
- /// A CapturedDecl record.
- DECL_CAPTURED,
+ /// An UnresolvedUsingTypenameDecl record.
+ DECL_UNRESOLVED_USING_TYPENAME,
- /// A record that stores the set of declarations that are
- /// lexically stored within a given DeclContext.
- ///
- /// The record itself is a blob that is an array of declaration IDs,
- /// in the order in which those declarations were added to the
- /// declaration context. This data is used when iterating over
- /// the contents of a DeclContext, e.g., via
- /// DeclContext::decls_begin() and DeclContext::decls_end().
- DECL_CONTEXT_LEXICAL,
+ /// A LinkageSpecDecl record.
+ DECL_LINKAGE_SPEC,
- /// A record that stores the set of declarations that are
- /// visible from a given DeclContext.
- ///
- /// The record itself stores a set of mappings, each of which
- /// associates a declaration name with one or more declaration
- /// IDs. This data is used when performing qualified name lookup
- /// into a DeclContext via DeclContext::lookup.
- DECL_CONTEXT_VISIBLE,
+ /// An ExportDecl record.
+ DECL_EXPORT,
- /// A LabelDecl record.
- DECL_LABEL,
+ /// A CXXRecordDecl record.
+ DECL_CXX_RECORD,
- /// A NamespaceDecl record.
- DECL_NAMESPACE,
+ /// A CXXDeductionGuideDecl record.
+ DECL_CXX_DEDUCTION_GUIDE,
- /// A NamespaceAliasDecl record.
- DECL_NAMESPACE_ALIAS,
+ /// A CXXMethodDecl record.
+ DECL_CXX_METHOD,
- /// A UsingDecl record.
- DECL_USING,
+ /// A CXXConstructorDecl record.
+ DECL_CXX_CONSTRUCTOR,
- /// A UsingPackDecl record.
- DECL_USING_PACK,
+ /// A CXXDestructorDecl record.
+ DECL_CXX_DESTRUCTOR,
- /// A UsingShadowDecl record.
- DECL_USING_SHADOW,
+ /// A CXXConversionDecl record.
+ DECL_CXX_CONVERSION,
- /// A ConstructorUsingShadowDecl record.
- DECL_CONSTRUCTOR_USING_SHADOW,
+ /// An AccessSpecDecl record.
+ DECL_ACCESS_SPEC,
- /// A UsingDirecitveDecl record.
- DECL_USING_DIRECTIVE,
+ /// A FriendDecl record.
+ DECL_FRIEND,
- /// An UnresolvedUsingValueDecl record.
- DECL_UNRESOLVED_USING_VALUE,
+ /// A FriendTemplateDecl record.
+ DECL_FRIEND_TEMPLATE,
- /// An UnresolvedUsingTypenameDecl record.
- DECL_UNRESOLVED_USING_TYPENAME,
+ /// A ClassTemplateDecl record.
+ DECL_CLASS_TEMPLATE,
- /// A LinkageSpecDecl record.
- DECL_LINKAGE_SPEC,
+ /// A ClassTemplateSpecializationDecl record.
+ DECL_CLASS_TEMPLATE_SPECIALIZATION,
- /// An ExportDecl record.
- DECL_EXPORT,
+ /// A ClassTemplatePartialSpecializationDecl record.
+ DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION,
- /// A CXXRecordDecl record.
- DECL_CXX_RECORD,
+ /// A VarTemplateDecl record.
+ DECL_VAR_TEMPLATE,
- /// A CXXDeductionGuideDecl record.
- DECL_CXX_DEDUCTION_GUIDE,
+ /// A VarTemplateSpecializationDecl record.
+ DECL_VAR_TEMPLATE_SPECIALIZATION,
- /// A CXXMethodDecl record.
- DECL_CXX_METHOD,
+ /// A VarTemplatePartialSpecializationDecl record.
+ DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION,
- /// A CXXConstructorDecl record.
- DECL_CXX_CONSTRUCTOR,
+ /// A FunctionTemplateDecl record.
+ DECL_FUNCTION_TEMPLATE,
- /// A CXXDestructorDecl record.
- DECL_CXX_DESTRUCTOR,
+ /// A TemplateTypeParmDecl record.
+ DECL_TEMPLATE_TYPE_PARM,
- /// A CXXConversionDecl record.
- DECL_CXX_CONVERSION,
+ /// A NonTypeTemplateParmDecl record.
+ DECL_NON_TYPE_TEMPLATE_PARM,
- /// An AccessSpecDecl record.
- DECL_ACCESS_SPEC,
+ /// A TemplateTemplateParmDecl record.
+ DECL_TEMPLATE_TEMPLATE_PARM,
- /// A FriendDecl record.
- DECL_FRIEND,
+ /// A TypeAliasTemplateDecl record.
+ DECL_TYPE_ALIAS_TEMPLATE,
- /// A FriendTemplateDecl record.
- DECL_FRIEND_TEMPLATE,
+ /// \brief A ConceptDecl record.
+ DECL_CONCEPT,
- /// A ClassTemplateDecl record.
- DECL_CLASS_TEMPLATE,
+ /// An UnresolvedUsingIfExistsDecl record.
+ DECL_UNRESOLVED_USING_IF_EXISTS,
- /// A ClassTemplateSpecializationDecl record.
- DECL_CLASS_TEMPLATE_SPECIALIZATION,
+ /// \brief A StaticAssertDecl record.
+ DECL_STATIC_ASSERT,
- /// A ClassTemplatePartialSpecializationDecl record.
- DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION,
+ /// A record containing CXXBaseSpecifiers.
+ DECL_CXX_BASE_SPECIFIERS,
- /// A VarTemplateDecl record.
- DECL_VAR_TEMPLATE,
+ /// A record containing CXXCtorInitializers.
+ DECL_CXX_CTOR_INITIALIZERS,
- /// A VarTemplateSpecializationDecl record.
- DECL_VAR_TEMPLATE_SPECIALIZATION,
+ /// A IndirectFieldDecl record.
+ DECL_INDIRECTFIELD,
- /// A VarTemplatePartialSpecializationDecl record.
- DECL_VAR_TEMPLATE_PARTIAL_SPECIALIZATION,
+ /// A NonTypeTemplateParmDecl record that stores an expanded
+ /// non-type template parameter pack.
+ DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK,
- /// A FunctionTemplateDecl record.
- DECL_FUNCTION_TEMPLATE,
+ /// A TemplateTemplateParmDecl record that stores an expanded
+ /// template template parameter pack.
+ DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK,
- /// A TemplateTypeParmDecl record.
- DECL_TEMPLATE_TYPE_PARM,
+ /// A ClassScopeFunctionSpecializationDecl record a class scope
+ /// function specialization. (Microsoft extension).
+ DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
- /// A NonTypeTemplateParmDecl record.
- DECL_NON_TYPE_TEMPLATE_PARM,
+ /// An ImportDecl recording a module import.
+ DECL_IMPORT,
- /// A TemplateTemplateParmDecl record.
- DECL_TEMPLATE_TEMPLATE_PARM,
+ /// An OMPThreadPrivateDecl record.
+ DECL_OMP_THREADPRIVATE,
- /// A TypeAliasTemplateDecl record.
- DECL_TYPE_ALIAS_TEMPLATE,
+ /// An OMPRequiresDecl record.
+ DECL_OMP_REQUIRES,
- /// \brief A ConceptDecl record.
- DECL_CONCEPT,
+ /// An OMPAllocateDcl record.
+ DECL_OMP_ALLOCATE,
- /// \brief A StaticAssertDecl record.
- DECL_STATIC_ASSERT,
+ /// An EmptyDecl record.
+ DECL_EMPTY,
- /// A record containing CXXBaseSpecifiers.
- DECL_CXX_BASE_SPECIFIERS,
+ /// An LifetimeExtendedTemporaryDecl record.
+ DECL_LIFETIME_EXTENDED_TEMPORARY,
- /// A record containing CXXCtorInitializers.
- DECL_CXX_CTOR_INITIALIZERS,
+ /// A RequiresExprBodyDecl record.
+ DECL_REQUIRES_EXPR_BODY,
- /// A IndirectFieldDecl record.
- DECL_INDIRECTFIELD,
+ /// An ObjCTypeParamDecl record.
+ DECL_OBJC_TYPE_PARAM,
- /// A NonTypeTemplateParmDecl record that stores an expanded
- /// non-type template parameter pack.
- DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK,
+ /// An OMPCapturedExprDecl record.
+ DECL_OMP_CAPTUREDEXPR,
- /// A TemplateTemplateParmDecl record that stores an expanded
- /// template template parameter pack.
- DECL_EXPANDED_TEMPLATE_TEMPLATE_PARM_PACK,
+ /// A PragmaCommentDecl record.
+ DECL_PRAGMA_COMMENT,
- /// A ClassScopeFunctionSpecializationDecl record a class scope
- /// function specialization. (Microsoft extension).
- DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
+ /// A PragmaDetectMismatchDecl record.
+ DECL_PRAGMA_DETECT_MISMATCH,
- /// An ImportDecl recording a module import.
- DECL_IMPORT,
+ /// An OMPDeclareMapperDecl record.
+ DECL_OMP_DECLARE_MAPPER,
- /// An OMPThreadPrivateDecl record.
- DECL_OMP_THREADPRIVATE,
+ /// An OMPDeclareReductionDecl record.
+ DECL_OMP_DECLARE_REDUCTION,
- /// An OMPRequiresDecl record.
- DECL_OMP_REQUIRES,
+ DECL_LAST = DECL_OMP_DECLARE_REDUCTION
+};
- /// An OMPAllocateDcl record.
- DECL_OMP_ALLOCATE,
+/// Record codes for each kind of statement or expression.
+///
+/// These constants describe the records that describe statements
+/// or expressions. These records occur within type and declarations
+/// block, so they begin with record values of 128. Each constant
+/// describes a record for a specific statement or expression class in the
+/// AST.
+enum StmtCode {
+ /// A marker record that indicates that we are at the end
+ /// of an expression.
+ STMT_STOP = DECL_LAST + 1,
- /// An EmptyDecl record.
- DECL_EMPTY,
+ /// A NULL expression.
+ STMT_NULL_PTR,
- /// An LifetimeExtendedTemporaryDecl record.
- DECL_LIFETIME_EXTENDED_TEMPORARY,
+ /// A reference to a previously [de]serialized Stmt record.
+ STMT_REF_PTR,
- /// A RequiresExprBodyDecl record.
- DECL_REQUIRES_EXPR_BODY,
+ /// A NullStmt record.
+ STMT_NULL,
- /// An ObjCTypeParamDecl record.
- DECL_OBJC_TYPE_PARAM,
+ /// A CompoundStmt record.
+ STMT_COMPOUND,
- /// An OMPCapturedExprDecl record.
- DECL_OMP_CAPTUREDEXPR,
+ /// A CaseStmt record.
+ STMT_CASE,
- /// A PragmaCommentDecl record.
- DECL_PRAGMA_COMMENT,
+ /// A DefaultStmt record.
+ STMT_DEFAULT,
- /// A PragmaDetectMismatchDecl record.
- DECL_PRAGMA_DETECT_MISMATCH,
+ /// A LabelStmt record.
+ STMT_LABEL,
- /// An OMPDeclareMapperDecl record.
- DECL_OMP_DECLARE_MAPPER,
+ /// An AttributedStmt record.
+ STMT_ATTRIBUTED,
- /// An OMPDeclareReductionDecl record.
- DECL_OMP_DECLARE_REDUCTION,
+ /// An IfStmt record.
+ STMT_IF,
- DECL_LAST = DECL_OMP_DECLARE_REDUCTION
- };
+ /// A SwitchStmt record.
+ STMT_SWITCH,
- /// Record codes for each kind of statement or expression.
- ///
- /// These constants describe the records that describe statements
- /// or expressions. These records occur within type and declarations
- /// block, so they begin with record values of 128. Each constant
- /// describes a record for a specific statement or expression class in the
- /// AST.
- enum StmtCode {
- /// A marker record that indicates that we are at the end
- /// of an expression.
- STMT_STOP = DECL_LAST + 1,
+ /// A WhileStmt record.
+ STMT_WHILE,
- /// A NULL expression.
- STMT_NULL_PTR,
+ /// A DoStmt record.
+ STMT_DO,
- /// A reference to a previously [de]serialized Stmt record.
- STMT_REF_PTR,
+ /// A ForStmt record.
+ STMT_FOR,
- /// A NullStmt record.
- STMT_NULL,
+ /// A GotoStmt record.
+ STMT_GOTO,
- /// A CompoundStmt record.
- STMT_COMPOUND,
+ /// An IndirectGotoStmt record.
+ STMT_INDIRECT_GOTO,
- /// A CaseStmt record.
- STMT_CASE,
+ /// A ContinueStmt record.
+ STMT_CONTINUE,
- /// A DefaultStmt record.
- STMT_DEFAULT,
+ /// A BreakStmt record.
+ STMT_BREAK,
- /// A LabelStmt record.
- STMT_LABEL,
+ /// A ReturnStmt record.
+ STMT_RETURN,
- /// An AttributedStmt record.
- STMT_ATTRIBUTED,
+ /// A DeclStmt record.
+ STMT_DECL,
- /// An IfStmt record.
- STMT_IF,
+ /// A CapturedStmt record.
+ STMT_CAPTURED,
- /// A SwitchStmt record.
- STMT_SWITCH,
+ /// A GCC-style AsmStmt record.
+ STMT_GCCASM,
- /// A WhileStmt record.
- STMT_WHILE,
+ /// A MS-style AsmStmt record.
+ STMT_MSASM,
- /// A DoStmt record.
- STMT_DO,
+ /// A constant expression context.
+ EXPR_CONSTANT,
- /// A ForStmt record.
- STMT_FOR,
+ /// A PredefinedExpr record.
+ EXPR_PREDEFINED,
- /// A GotoStmt record.
- STMT_GOTO,
+ /// A DeclRefExpr record.
+ EXPR_DECL_REF,
- /// An IndirectGotoStmt record.
- STMT_INDIRECT_GOTO,
+ /// An IntegerLiteral record.
+ EXPR_INTEGER_LITERAL,
- /// A ContinueStmt record.
- STMT_CONTINUE,
+ /// A FloatingLiteral record.
+ EXPR_FLOATING_LITERAL,
- /// A BreakStmt record.
- STMT_BREAK,
+ /// An ImaginaryLiteral record.
+ EXPR_IMAGINARY_LITERAL,
- /// A ReturnStmt record.
- STMT_RETURN,
+ /// A StringLiteral record.
+ EXPR_STRING_LITERAL,
- /// A DeclStmt record.
- STMT_DECL,
+ /// A CharacterLiteral record.
+ EXPR_CHARACTER_LITERAL,
- /// A CapturedStmt record.
- STMT_CAPTURED,
+ /// A ParenExpr record.
+ EXPR_PAREN,
- /// A GCC-style AsmStmt record.
- STMT_GCCASM,
+ /// A ParenListExpr record.
+ EXPR_PAREN_LIST,
- /// A MS-style AsmStmt record.
- STMT_MSASM,
+ /// A UnaryOperator record.
+ EXPR_UNARY_OPERATOR,
- /// A constant expression context.
- EXPR_CONSTANT,
+ /// An OffsetOfExpr record.
+ EXPR_OFFSETOF,
- /// A PredefinedExpr record.
- EXPR_PREDEFINED,
+ /// A SizefAlignOfExpr record.
+ EXPR_SIZEOF_ALIGN_OF,
- /// A DeclRefExpr record.
- EXPR_DECL_REF,
+ /// An ArraySubscriptExpr record.
+ EXPR_ARRAY_SUBSCRIPT,
- /// An IntegerLiteral record.
- EXPR_INTEGER_LITERAL,
+ /// An MatrixSubscriptExpr record.
+ EXPR_MATRIX_SUBSCRIPT,
- /// A FloatingLiteral record.
- EXPR_FLOATING_LITERAL,
+ /// A CallExpr record.
+ EXPR_CALL,
- /// An ImaginaryLiteral record.
- EXPR_IMAGINARY_LITERAL,
+ /// A MemberExpr record.
+ EXPR_MEMBER,
- /// A StringLiteral record.
- EXPR_STRING_LITERAL,
+ /// A BinaryOperator record.
+ EXPR_BINARY_OPERATOR,
- /// A CharacterLiteral record.
- EXPR_CHARACTER_LITERAL,
+ /// A CompoundAssignOperator record.
+ EXPR_COMPOUND_ASSIGN_OPERATOR,
- /// A ParenExpr record.
- EXPR_PAREN,
+ /// A ConditionOperator record.
+ EXPR_CONDITIONAL_OPERATOR,
- /// A ParenListExpr record.
- EXPR_PAREN_LIST,
+ /// An ImplicitCastExpr record.
+ EXPR_IMPLICIT_CAST,
- /// A UnaryOperator record.
- EXPR_UNARY_OPERATOR,
+ /// A CStyleCastExpr record.
+ EXPR_CSTYLE_CAST,
- /// An OffsetOfExpr record.
- EXPR_OFFSETOF,
+ /// A CompoundLiteralExpr record.
+ EXPR_COMPOUND_LITERAL,
- /// A SizefAlignOfExpr record.
- EXPR_SIZEOF_ALIGN_OF,
+ /// An ExtVectorElementExpr record.
+ EXPR_EXT_VECTOR_ELEMENT,
- /// An ArraySubscriptExpr record.
- EXPR_ARRAY_SUBSCRIPT,
+ /// An InitListExpr record.
+ EXPR_INIT_LIST,
- /// An MatrixSubscriptExpr record.
- EXPR_MATRIX_SUBSCRIPT,
+ /// A DesignatedInitExpr record.
+ EXPR_DESIGNATED_INIT,
- /// A CallExpr record.
- EXPR_CALL,
+ /// A DesignatedInitUpdateExpr record.
+ EXPR_DESIGNATED_INIT_UPDATE,
- /// A MemberExpr record.
- EXPR_MEMBER,
+ /// An NoInitExpr record.
+ EXPR_NO_INIT,
- /// A BinaryOperator record.
- EXPR_BINARY_OPERATOR,
+ /// An ArrayInitLoopExpr record.
+ EXPR_ARRAY_INIT_LOOP,
- /// A CompoundAssignOperator record.
- EXPR_COMPOUND_ASSIGN_OPERATOR,
+ /// An ArrayInitIndexExpr record.
+ EXPR_ARRAY_INIT_INDEX,
- /// A ConditionOperator record.
- EXPR_CONDITIONAL_OPERATOR,
+ /// An ImplicitValueInitExpr record.
+ EXPR_IMPLICIT_VALUE_INIT,
- /// An ImplicitCastExpr record.
- EXPR_IMPLICIT_CAST,
+ /// A VAArgExpr record.
+ EXPR_VA_ARG,
- /// A CStyleCastExpr record.
- EXPR_CSTYLE_CAST,
+ /// An AddrLabelExpr record.
+ EXPR_ADDR_LABEL,
- /// A CompoundLiteralExpr record.
- EXPR_COMPOUND_LITERAL,
+ /// A StmtExpr record.
+ EXPR_STMT,
- /// An ExtVectorElementExpr record.
- EXPR_EXT_VECTOR_ELEMENT,
+ /// A ChooseExpr record.
+ EXPR_CHOOSE,
- /// An InitListExpr record.
- EXPR_INIT_LIST,
+ /// A GNUNullExpr record.
+ EXPR_GNU_NULL,
- /// A DesignatedInitExpr record.
- EXPR_DESIGNATED_INIT,
+ /// A SourceLocExpr record.
+ EXPR_SOURCE_LOC,
- /// A DesignatedInitUpdateExpr record.
- EXPR_DESIGNATED_INIT_UPDATE,
+ /// A ShuffleVectorExpr record.
+ EXPR_SHUFFLE_VECTOR,
- /// An NoInitExpr record.
- EXPR_NO_INIT,
+ /// A ConvertVectorExpr record.
+ EXPR_CONVERT_VECTOR,
- /// An ArrayInitLoopExpr record.
- EXPR_ARRAY_INIT_LOOP,
+ /// BlockExpr
+ EXPR_BLOCK,
- /// An ArrayInitIndexExpr record.
- EXPR_ARRAY_INIT_INDEX,
+ /// A GenericSelectionExpr record.
+ EXPR_GENERIC_SELECTION,
- /// An ImplicitValueInitExpr record.
- EXPR_IMPLICIT_VALUE_INIT,
+ /// A PseudoObjectExpr record.
+ EXPR_PSEUDO_OBJECT,
- /// A VAArgExpr record.
- EXPR_VA_ARG,
+ /// An AtomicExpr record.
+ EXPR_ATOMIC,
- /// An AddrLabelExpr record.
- EXPR_ADDR_LABEL,
+ /// A RecoveryExpr record.
+ EXPR_RECOVERY,
- /// A StmtExpr record.
- EXPR_STMT,
+ // Objective-C
- /// A ChooseExpr record.
- EXPR_CHOOSE,
+ /// An ObjCStringLiteral record.
+ EXPR_OBJC_STRING_LITERAL,
- /// A GNUNullExpr record.
- EXPR_GNU_NULL,
+ EXPR_OBJC_BOXED_EXPRESSION,
+ EXPR_OBJC_ARRAY_LITERAL,
+ EXPR_OBJC_DICTIONARY_LITERAL,
- /// A SourceLocExpr record.
- EXPR_SOURCE_LOC,
+ /// An ObjCEncodeExpr record.
+ EXPR_OBJC_ENCODE,
- /// A ShuffleVectorExpr record.
- EXPR_SHUFFLE_VECTOR,
+ /// An ObjCSelectorExpr record.
+ EXPR_OBJC_SELECTOR_EXPR,
- /// A ConvertVectorExpr record.
- EXPR_CONVERT_VECTOR,
+ /// An ObjCProtocolExpr record.
+ EXPR_OBJC_PROTOCOL_EXPR,
- /// BlockExpr
- EXPR_BLOCK,
+ /// An ObjCIvarRefExpr record.
+ EXPR_OBJC_IVAR_REF_EXPR,
- /// A GenericSelectionExpr record.
- EXPR_GENERIC_SELECTION,
+ /// An ObjCPropertyRefExpr record.
+ EXPR_OBJC_PROPERTY_REF_EXPR,
- /// A PseudoObjectExpr record.
- EXPR_PSEUDO_OBJECT,
+ /// An ObjCSubscriptRefExpr record.
+ EXPR_OBJC_SUBSCRIPT_REF_EXPR,
- /// An AtomicExpr record.
- EXPR_ATOMIC,
+ /// UNUSED
+ EXPR_OBJC_KVC_REF_EXPR,
- /// A RecoveryExpr record.
- EXPR_RECOVERY,
+ /// An ObjCMessageExpr record.
+ EXPR_OBJC_MESSAGE_EXPR,
- // Objective-C
+ /// An ObjCIsa Expr record.
+ EXPR_OBJC_ISA,
- /// An ObjCStringLiteral record.
- EXPR_OBJC_STRING_LITERAL,
+ /// An ObjCIndirectCopyRestoreExpr record.
+ EXPR_OBJC_INDIRECT_COPY_RESTORE,
- EXPR_OBJC_BOXED_EXPRESSION,
- EXPR_OBJC_ARRAY_LITERAL,
- EXPR_OBJC_DICTIONARY_LITERAL,
+ /// An ObjCForCollectionStmt record.
+ STMT_OBJC_FOR_COLLECTION,
- /// An ObjCEncodeExpr record.
- EXPR_OBJC_ENCODE,
+ /// An ObjCAtCatchStmt record.
+ STMT_OBJC_CATCH,
- /// An ObjCSelectorExpr record.
- EXPR_OBJC_SELECTOR_EXPR,
+ /// An ObjCAtFinallyStmt record.
+ STMT_OBJC_FINALLY,
- /// An ObjCProtocolExpr record.
- EXPR_OBJC_PROTOCOL_EXPR,
+ /// An ObjCAtTryStmt record.
+ STMT_OBJC_AT_TRY,
- /// An ObjCIvarRefExpr record.
- EXPR_OBJC_IVAR_REF_EXPR,
+ /// An ObjCAtSynchronizedStmt record.
+ STMT_OBJC_AT_SYNCHRONIZED,
- /// An ObjCPropertyRefExpr record.
- EXPR_OBJC_PROPERTY_REF_EXPR,
+ /// An ObjCAtThrowStmt record.
+ STMT_OBJC_AT_THROW,
- /// An ObjCSubscriptRefExpr record.
- EXPR_OBJC_SUBSCRIPT_REF_EXPR,
+ /// An ObjCAutoreleasePoolStmt record.
+ STMT_OBJC_AUTORELEASE_POOL,
- /// UNUSED
- EXPR_OBJC_KVC_REF_EXPR,
+ /// An ObjCBoolLiteralExpr record.
+ EXPR_OBJC_BOOL_LITERAL,
- /// An ObjCMessageExpr record.
- EXPR_OBJC_MESSAGE_EXPR,
+ /// An ObjCAvailabilityCheckExpr record.
+ EXPR_OBJC_AVAILABILITY_CHECK,
- /// An ObjCIsa Expr record.
- EXPR_OBJC_ISA,
+ // C++
- /// An ObjCIndirectCopyRestoreExpr record.
- EXPR_OBJC_INDIRECT_COPY_RESTORE,
+ /// A CXXCatchStmt record.
+ STMT_CXX_CATCH,
- /// An ObjCForCollectionStmt record.
- STMT_OBJC_FOR_COLLECTION,
+ /// A CXXTryStmt record.
+ STMT_CXX_TRY,
+ /// A CXXForRangeStmt record.
- /// An ObjCAtCatchStmt record.
- STMT_OBJC_CATCH,
+ STMT_CXX_FOR_RANGE,
- /// An ObjCAtFinallyStmt record.
- STMT_OBJC_FINALLY,
+ /// A CXXOperatorCallExpr record.
+ EXPR_CXX_OPERATOR_CALL,
- /// An ObjCAtTryStmt record.
- STMT_OBJC_AT_TRY,
+ /// A CXXMemberCallExpr record.
+ EXPR_CXX_MEMBER_CALL,
- /// An ObjCAtSynchronizedStmt record.
- STMT_OBJC_AT_SYNCHRONIZED,
+ /// A CXXRewrittenBinaryOperator record.
+ EXPR_CXX_REWRITTEN_BINARY_OPERATOR,
- /// An ObjCAtThrowStmt record.
- STMT_OBJC_AT_THROW,
+ /// A CXXConstructExpr record.
+ EXPR_CXX_CONSTRUCT,
- /// An ObjCAutoreleasePoolStmt record.
- STMT_OBJC_AUTORELEASE_POOL,
+ /// A CXXInheritedCtorInitExpr record.
+ EXPR_CXX_INHERITED_CTOR_INIT,
- /// An ObjCBoolLiteralExpr record.
- EXPR_OBJC_BOOL_LITERAL,
+ /// A CXXTemporaryObjectExpr record.
+ EXPR_CXX_TEMPORARY_OBJECT,
- /// An ObjCAvailabilityCheckExpr record.
- EXPR_OBJC_AVAILABILITY_CHECK,
+ /// A CXXStaticCastExpr record.
+ EXPR_CXX_STATIC_CAST,
- // C++
+ /// A CXXDynamicCastExpr record.
+ EXPR_CXX_DYNAMIC_CAST,
- /// A CXXCatchStmt record.
- STMT_CXX_CATCH,
+ /// A CXXReinterpretCastExpr record.
+ EXPR_CXX_REINTERPRET_CAST,
- /// A CXXTryStmt record.
- STMT_CXX_TRY,
- /// A CXXForRangeStmt record.
+ /// A CXXConstCastExpr record.
+ EXPR_CXX_CONST_CAST,
- STMT_CXX_FOR_RANGE,
+ /// A CXXAddrspaceCastExpr record.
+ EXPR_CXX_ADDRSPACE_CAST,
+
+ /// A CXXFunctionalCastExpr record.
+ EXPR_CXX_FUNCTIONAL_CAST,
+
+ /// A BuiltinBitCastExpr record.
+ EXPR_BUILTIN_BIT_CAST,
+
+ /// A UserDefinedLiteral record.
+ EXPR_USER_DEFINED_LITERAL,
+
+ /// A CXXStdInitializerListExpr record.
+ EXPR_CXX_STD_INITIALIZER_LIST,
+
+ /// A CXXBoolLiteralExpr record.
+ EXPR_CXX_BOOL_LITERAL,
+
+ EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
+ EXPR_CXX_TYPEID_EXPR, // CXXTypeidExpr (of expr).
+ EXPR_CXX_TYPEID_TYPE, // CXXTypeidExpr (of type).
+ EXPR_CXX_THIS, // CXXThisExpr
+ EXPR_CXX_THROW, // CXXThrowExpr
+ EXPR_CXX_DEFAULT_ARG, // CXXDefaultArgExpr
+ EXPR_CXX_DEFAULT_INIT, // CXXDefaultInitExpr
+ EXPR_CXX_BIND_TEMPORARY, // CXXBindTemporaryExpr
+
+ EXPR_CXX_SCALAR_VALUE_INIT, // CXXScalarValueInitExpr
+ EXPR_CXX_NEW, // CXXNewExpr
+ EXPR_CXX_DELETE, // CXXDeleteExpr
+ EXPR_CXX_PSEUDO_DESTRUCTOR, // CXXPseudoDestructorExpr
+
+ EXPR_EXPR_WITH_CLEANUPS, // ExprWithCleanups
+
+ EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
+ EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
+ EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
+ EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
+ EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
+
+ EXPR_CXX_EXPRESSION_TRAIT, // ExpressionTraitExpr
+ EXPR_CXX_NOEXCEPT, // CXXNoexceptExpr
+
+ EXPR_OPAQUE_VALUE, // OpaqueValueExpr
+ EXPR_BINARY_CONDITIONAL_OPERATOR, // BinaryConditionalOperator
+ EXPR_TYPE_TRAIT, // TypeTraitExpr
+ EXPR_ARRAY_TYPE_TRAIT, // ArrayTypeTraitIntExpr
+
+ EXPR_PACK_EXPANSION, // PackExpansionExpr
+ EXPR_SIZEOF_PACK, // SizeOfPackExpr
+ EXPR_SUBST_NON_TYPE_TEMPLATE_PARM, // SubstNonTypeTemplateParmExpr
+ EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK, // SubstNonTypeTemplateParmPackExpr
+ EXPR_FUNCTION_PARM_PACK, // FunctionParmPackExpr
+ EXPR_MATERIALIZE_TEMPORARY, // MaterializeTemporaryExpr
+ EXPR_CXX_FOLD, // CXXFoldExpr
+ EXPR_CONCEPT_SPECIALIZATION, // ConceptSpecializationExpr
+ EXPR_REQUIRES, // RequiresExpr
+
+ // CUDA
+ EXPR_CUDA_KERNEL_CALL, // CUDAKernelCallExpr
+
+ // OpenCL
+ EXPR_ASTYPE, // AsTypeExpr
+
+ // Microsoft
+ EXPR_CXX_PROPERTY_REF_EXPR, // MSPropertyRefExpr
+ EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR, // MSPropertySubscriptExpr
+ EXPR_CXX_UUIDOF_EXPR, // CXXUuidofExpr (of expr).
+ EXPR_CXX_UUIDOF_TYPE, // CXXUuidofExpr (of type).
+ STMT_SEH_LEAVE, // SEHLeaveStmt
+ STMT_SEH_EXCEPT, // SEHExceptStmt
+ STMT_SEH_FINALLY, // SEHFinallyStmt
+ STMT_SEH_TRY, // SEHTryStmt
+
+ // OpenMP directives
+ STMT_OMP_CANONICAL_LOOP,
+ STMT_OMP_PARALLEL_DIRECTIVE,
+ STMT_OMP_SIMD_DIRECTIVE,
+ STMT_OMP_TILE_DIRECTIVE,
+ STMT_OMP_UNROLL_DIRECTIVE,
+ STMT_OMP_FOR_DIRECTIVE,
+ STMT_OMP_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_SECTIONS_DIRECTIVE,
+ STMT_OMP_SECTION_DIRECTIVE,
+ STMT_OMP_SINGLE_DIRECTIVE,
+ STMT_OMP_MASTER_DIRECTIVE,
+ STMT_OMP_CRITICAL_DIRECTIVE,
+ STMT_OMP_PARALLEL_FOR_DIRECTIVE,
+ STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASTER_DIRECTIVE,
+ STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE,
+ STMT_OMP_TASK_DIRECTIVE,
+ STMT_OMP_TASKYIELD_DIRECTIVE,
+ STMT_OMP_BARRIER_DIRECTIVE,
+ STMT_OMP_TASKWAIT_DIRECTIVE,
+ STMT_OMP_FLUSH_DIRECTIVE,
+ STMT_OMP_DEPOBJ_DIRECTIVE,
+ STMT_OMP_SCAN_DIRECTIVE,
+ STMT_OMP_ORDERED_DIRECTIVE,
+ STMT_OMP_ATOMIC_DIRECTIVE,
+ STMT_OMP_TARGET_DIRECTIVE,
+ STMT_OMP_TARGET_DATA_DIRECTIVE,
+ STMT_OMP_TARGET_ENTER_DATA_DIRECTIVE,
+ STMT_OMP_TARGET_EXIT_DATA_DIRECTIVE,
+ STMT_OMP_TARGET_PARALLEL_DIRECTIVE,
+ STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE,
+ STMT_OMP_TEAMS_DIRECTIVE,
+ STMT_OMP_TASKGROUP_DIRECTIVE,
+ STMT_OMP_CANCELLATION_POINT_DIRECTIVE,
+ STMT_OMP_CANCEL_DIRECTIVE,
+ STMT_OMP_TASKLOOP_DIRECTIVE,
+ STMT_OMP_TASKLOOP_SIMD_DIRECTIVE,
+ STMT_OMP_MASTER_TASKLOOP_DIRECTIVE,
+ STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE,
+ STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE,
+ STMT_OMP_DISTRIBUTE_DIRECTIVE,
+ STMT_OMP_TARGET_UPDATE_DIRECTIVE,
+ STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
+ STMT_OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_DISTRIBUTE_SIMD_DIRECTIVE,
+ STMT_OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_TARGET_SIMD_DIRECTIVE,
+ STMT_OMP_TEAMS_DISTRIBUTE_DIRECTIVE,
+ STMT_OMP_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
+ STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
+ STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
+ STMT_OMP_INTEROP_DIRECTIVE,
+ STMT_OMP_DISPATCH_DIRECTIVE,
+ STMT_OMP_MASKED_DIRECTIVE,
+ EXPR_OMP_ARRAY_SECTION,
+ EXPR_OMP_ARRAY_SHAPING,
+ EXPR_OMP_ITERATOR,
+
+ // ARC
+ EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr
+
+ STMT_MS_DEPENDENT_EXISTS, // MSDependentExistsStmt
+ EXPR_LAMBDA, // LambdaExpr
+ STMT_COROUTINE_BODY,
+ STMT_CORETURN,
+ EXPR_COAWAIT,
+ EXPR_COYIELD,
+ EXPR_DEPENDENT_COAWAIT,
+
+ // FixedPointLiteral
+ EXPR_FIXEDPOINT_LITERAL,
+
+ // SYCLUniqueStableNameExpr
+ EXPR_SYCL_UNIQUE_STABLE_NAME,
+};
- /// A CXXOperatorCallExpr record.
- EXPR_CXX_OPERATOR_CALL,
+/// The kinds of designators that can occur in a
+/// DesignatedInitExpr.
+enum DesignatorTypes {
+ /// Field designator where only the field name is known.
+ DESIG_FIELD_NAME = 0,
- /// A CXXMemberCallExpr record.
- EXPR_CXX_MEMBER_CALL,
+ /// Field designator where the field has been resolved to
+ /// a declaration.
+ DESIG_FIELD_DECL = 1,
- /// A CXXRewrittenBinaryOperator record.
- EXPR_CXX_REWRITTEN_BINARY_OPERATOR,
+ /// Array designator.
+ DESIG_ARRAY = 2,
- /// A CXXConstructExpr record.
- EXPR_CXX_CONSTRUCT,
+ /// GNU array range designator.
+ DESIG_ARRAY_RANGE = 3
+};
- /// A CXXInheritedCtorInitExpr record.
- EXPR_CXX_INHERITED_CTOR_INIT,
+/// The different kinds of data that can occur in a
+/// CtorInitializer.
+enum CtorInitializerType {
+ CTOR_INITIALIZER_BASE,
+ CTOR_INITIALIZER_DELEGATING,
+ CTOR_INITIALIZER_MEMBER,
+ CTOR_INITIALIZER_INDIRECT_MEMBER
+};
- /// A CXXTemporaryObjectExpr record.
- EXPR_CXX_TEMPORARY_OBJECT,
+/// Kinds of cleanup objects owned by ExprWithCleanups.
+enum CleanupObjectKind { COK_Block, COK_CompoundLiteral };
- /// A CXXStaticCastExpr record.
- EXPR_CXX_STATIC_CAST,
+/// Describes the redeclarations of a declaration.
+struct LocalRedeclarationsInfo {
+ // The ID of the first declaration
+ DeclID FirstID;
- /// A CXXDynamicCastExpr record.
- EXPR_CXX_DYNAMIC_CAST,
+ // Offset into the array of redeclaration chains.
+ unsigned Offset;
- /// A CXXReinterpretCastExpr record.
- EXPR_CXX_REINTERPRET_CAST,
+ friend bool operator<(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID < Y.FirstID;
+ }
- /// A CXXConstCastExpr record.
- EXPR_CXX_CONST_CAST,
+ friend bool operator>(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID > Y.FirstID;
+ }
- /// A CXXAddrspaceCastExpr record.
- EXPR_CXX_ADDRSPACE_CAST,
-
- /// A CXXFunctionalCastExpr record.
- EXPR_CXX_FUNCTIONAL_CAST,
-
- /// A BuiltinBitCastExpr record.
- EXPR_BUILTIN_BIT_CAST,
-
- /// A UserDefinedLiteral record.
- EXPR_USER_DEFINED_LITERAL,
-
- /// A CXXStdInitializerListExpr record.
- EXPR_CXX_STD_INITIALIZER_LIST,
-
- /// A CXXBoolLiteralExpr record.
- EXPR_CXX_BOOL_LITERAL,
-
- EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
- EXPR_CXX_TYPEID_EXPR, // CXXTypeidExpr (of expr).
- EXPR_CXX_TYPEID_TYPE, // CXXTypeidExpr (of type).
- EXPR_CXX_THIS, // CXXThisExpr
- EXPR_CXX_THROW, // CXXThrowExpr
- EXPR_CXX_DEFAULT_ARG, // CXXDefaultArgExpr
- EXPR_CXX_DEFAULT_INIT, // CXXDefaultInitExpr
- EXPR_CXX_BIND_TEMPORARY, // CXXBindTemporaryExpr
-
- EXPR_CXX_SCALAR_VALUE_INIT, // CXXScalarValueInitExpr
- EXPR_CXX_NEW, // CXXNewExpr
- EXPR_CXX_DELETE, // CXXDeleteExpr
- EXPR_CXX_PSEUDO_DESTRUCTOR, // CXXPseudoDestructorExpr
-
- EXPR_EXPR_WITH_CLEANUPS, // ExprWithCleanups
-
- EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
- EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
- EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
- EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
- EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
-
- EXPR_CXX_EXPRESSION_TRAIT, // ExpressionTraitExpr
- EXPR_CXX_NOEXCEPT, // CXXNoexceptExpr
-
- EXPR_OPAQUE_VALUE, // OpaqueValueExpr
- EXPR_BINARY_CONDITIONAL_OPERATOR, // BinaryConditionalOperator
- EXPR_TYPE_TRAIT, // TypeTraitExpr
- EXPR_ARRAY_TYPE_TRAIT, // ArrayTypeTraitIntExpr
-
- EXPR_PACK_EXPANSION, // PackExpansionExpr
- EXPR_SIZEOF_PACK, // SizeOfPackExpr
- EXPR_SUBST_NON_TYPE_TEMPLATE_PARM, // SubstNonTypeTemplateParmExpr
- EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK,// SubstNonTypeTemplateParmPackExpr
- EXPR_FUNCTION_PARM_PACK, // FunctionParmPackExpr
- EXPR_MATERIALIZE_TEMPORARY, // MaterializeTemporaryExpr
- EXPR_CXX_FOLD, // CXXFoldExpr
- EXPR_CONCEPT_SPECIALIZATION,// ConceptSpecializationExpr
- EXPR_REQUIRES, // RequiresExpr
-
- // CUDA
- EXPR_CUDA_KERNEL_CALL, // CUDAKernelCallExpr
-
- // OpenCL
- EXPR_ASTYPE, // AsTypeExpr
-
- // Microsoft
- EXPR_CXX_PROPERTY_REF_EXPR, // MSPropertyRefExpr
- EXPR_CXX_PROPERTY_SUBSCRIPT_EXPR, // MSPropertySubscriptExpr
- EXPR_CXX_UUIDOF_EXPR, // CXXUuidofExpr (of expr).
- EXPR_CXX_UUIDOF_TYPE, // CXXUuidofExpr (of type).
- STMT_SEH_LEAVE, // SEHLeaveStmt
- STMT_SEH_EXCEPT, // SEHExceptStmt
- STMT_SEH_FINALLY, // SEHFinallyStmt
- STMT_SEH_TRY, // SEHTryStmt
-
- // OpenMP directives
- STMT_OMP_PARALLEL_DIRECTIVE,
- STMT_OMP_SIMD_DIRECTIVE,
- STMT_OMP_FOR_DIRECTIVE,
- STMT_OMP_FOR_SIMD_DIRECTIVE,
- STMT_OMP_SECTIONS_DIRECTIVE,
- STMT_OMP_SECTION_DIRECTIVE,
- STMT_OMP_SINGLE_DIRECTIVE,
- STMT_OMP_MASTER_DIRECTIVE,
- STMT_OMP_CRITICAL_DIRECTIVE,
- STMT_OMP_PARALLEL_FOR_DIRECTIVE,
- STMT_OMP_PARALLEL_FOR_SIMD_DIRECTIVE,
- STMT_OMP_PARALLEL_MASTER_DIRECTIVE,
- STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE,
- STMT_OMP_TASK_DIRECTIVE,
- STMT_OMP_TASKYIELD_DIRECTIVE,
- STMT_OMP_BARRIER_DIRECTIVE,
- STMT_OMP_TASKWAIT_DIRECTIVE,
- STMT_OMP_FLUSH_DIRECTIVE,
- STMT_OMP_DEPOBJ_DIRECTIVE,
- STMT_OMP_SCAN_DIRECTIVE,
- STMT_OMP_ORDERED_DIRECTIVE,
- STMT_OMP_ATOMIC_DIRECTIVE,
- STMT_OMP_TARGET_DIRECTIVE,
- STMT_OMP_TARGET_DATA_DIRECTIVE,
- STMT_OMP_TARGET_ENTER_DATA_DIRECTIVE,
- STMT_OMP_TARGET_EXIT_DATA_DIRECTIVE,
- STMT_OMP_TARGET_PARALLEL_DIRECTIVE,
- STMT_OMP_TARGET_PARALLEL_FOR_DIRECTIVE,
- STMT_OMP_TEAMS_DIRECTIVE,
- STMT_OMP_TASKGROUP_DIRECTIVE,
- STMT_OMP_CANCELLATION_POINT_DIRECTIVE,
- STMT_OMP_CANCEL_DIRECTIVE,
- STMT_OMP_TASKLOOP_DIRECTIVE,
- STMT_OMP_TASKLOOP_SIMD_DIRECTIVE,
- STMT_OMP_MASTER_TASKLOOP_DIRECTIVE,
- STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE,
- STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE,
- STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE,
- STMT_OMP_DISTRIBUTE_DIRECTIVE,
- STMT_OMP_TARGET_UPDATE_DIRECTIVE,
- STMT_OMP_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
- STMT_OMP_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
- STMT_OMP_DISTRIBUTE_SIMD_DIRECTIVE,
- STMT_OMP_TARGET_PARALLEL_FOR_SIMD_DIRECTIVE,
- STMT_OMP_TARGET_SIMD_DIRECTIVE,
- STMT_OMP_TEAMS_DISTRIBUTE_DIRECTIVE,
- STMT_OMP_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
- STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
- STMT_OMP_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
- STMT_OMP_TARGET_TEAMS_DIRECTIVE,
- STMT_OMP_TARGET_TEAMS_DISTRIBUTE_DIRECTIVE,
- STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_DIRECTIVE,
- STMT_OMP_TARGET_TEAMS_DISTRIBUTE_PARALLEL_FOR_SIMD_DIRECTIVE,
- STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE,
- EXPR_OMP_ARRAY_SECTION,
- EXPR_OMP_ARRAY_SHAPING,
- EXPR_OMP_ITERATOR,
-
- // ARC
- EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr
-
- STMT_MS_DEPENDENT_EXISTS, // MSDependentExistsStmt
- EXPR_LAMBDA, // LambdaExpr
- STMT_COROUTINE_BODY,
- STMT_CORETURN,
- EXPR_COAWAIT,
- EXPR_COYIELD,
- EXPR_DEPENDENT_COAWAIT,
-
- // FixedPointLiteral
- EXPR_FIXEDPOINT_LITERAL,
- };
-
- /// The kinds of designators that can occur in a
- /// DesignatedInitExpr.
- enum DesignatorTypes {
- /// Field designator where only the field name is known.
- DESIG_FIELD_NAME = 0,
-
- /// Field designator where the field has been resolved to
- /// a declaration.
- DESIG_FIELD_DECL = 1,
-
- /// Array designator.
- DESIG_ARRAY = 2,
-
- /// GNU array range designator.
- DESIG_ARRAY_RANGE = 3
- };
-
- /// The different kinds of data that can occur in a
- /// CtorInitializer.
- enum CtorInitializerType {
- CTOR_INITIALIZER_BASE,
- CTOR_INITIALIZER_DELEGATING,
- CTOR_INITIALIZER_MEMBER,
- CTOR_INITIALIZER_INDIRECT_MEMBER
- };
-
- /// Kinds of cleanup objects owned by ExprWithCleanups.
- enum CleanupObjectKind { COK_Block, COK_CompoundLiteral };
-
- /// Describes the redeclarations of a declaration.
- struct LocalRedeclarationsInfo {
- // The ID of the first declaration
- DeclID FirstID;
-
- // Offset into the array of redeclaration chains.
- unsigned Offset;
-
- friend bool operator<(const LocalRedeclarationsInfo &X,
- const LocalRedeclarationsInfo &Y) {
- return X.FirstID < Y.FirstID;
- }
-
- friend bool operator>(const LocalRedeclarationsInfo &X,
- const LocalRedeclarationsInfo &Y) {
- return X.FirstID > Y.FirstID;
- }
-
- friend bool operator<=(const LocalRedeclarationsInfo &X,
- const LocalRedeclarationsInfo &Y) {
- return X.FirstID <= Y.FirstID;
- }
-
- friend bool operator>=(const LocalRedeclarationsInfo &X,
- const LocalRedeclarationsInfo &Y) {
- return X.FirstID >= Y.FirstID;
- }
- };
-
- /// Describes the categories of an Objective-C class.
- struct ObjCCategoriesInfo {
- // The ID of the definition
- DeclID DefinitionID;
-
- // Offset into the array of category lists.
- unsigned Offset;
-
- friend bool operator<(const ObjCCategoriesInfo &X,
- const ObjCCategoriesInfo &Y) {
- return X.DefinitionID < Y.DefinitionID;
- }
-
- friend bool operator>(const ObjCCategoriesInfo &X,
- const ObjCCategoriesInfo &Y) {
- return X.DefinitionID > Y.DefinitionID;
- }
-
- friend bool operator<=(const ObjCCategoriesInfo &X,
- const ObjCCategoriesInfo &Y) {
- return X.DefinitionID <= Y.DefinitionID;
- }
-
- friend bool operator>=(const ObjCCategoriesInfo &X,
- const ObjCCategoriesInfo &Y) {
- return X.DefinitionID >= Y.DefinitionID;
- }
- };
-
- /// A key used when looking up entities by \ref DeclarationName.
- ///
- /// Different \ref DeclarationNames are mapped to different keys, but the
- /// same key can occasionally represent multiple names (for names that
- /// contain types, in particular).
- class DeclarationNameKey {
- using NameKind = unsigned;
-
- NameKind Kind = 0;
- uint64_t Data = 0;
-
- public:
- DeclarationNameKey() = default;
- DeclarationNameKey(DeclarationName Name);
- DeclarationNameKey(NameKind Kind, uint64_t Data)
- : Kind(Kind), Data(Data) {}
-
- NameKind getKind() const { return Kind; }
-
- IdentifierInfo *getIdentifier() const {
- assert(Kind == DeclarationName::Identifier ||
- Kind == DeclarationName::CXXLiteralOperatorName ||
- Kind == DeclarationName::CXXDeductionGuideName);
- return (IdentifierInfo *)Data;
- }
-
- Selector getSelector() const {
- assert(Kind == DeclarationName::ObjCZeroArgSelector ||
- Kind == DeclarationName::ObjCOneArgSelector ||
- Kind == DeclarationName::ObjCMultiArgSelector);
- return Selector(Data);
- }
-
- OverloadedOperatorKind getOperatorKind() const {
- assert(Kind == DeclarationName::CXXOperatorName);
- return (OverloadedOperatorKind)Data;
- }
-
- /// Compute a fingerprint of this key for use in on-disk hash table.
- unsigned getHash() const;
-
- friend bool operator==(const DeclarationNameKey &A,
- const DeclarationNameKey &B) {
- return A.Kind == B.Kind && A.Data == B.Data;
- }
- };
-
- /// @}
+ friend bool operator<=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID <= Y.FirstID;
+ }
+
+ friend bool operator>=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID >= Y.FirstID;
+ }
+};
+
+/// Describes the categories of an Objective-C class.
+struct ObjCCategoriesInfo {
+ // The ID of the definition
+ DeclID DefinitionID;
+
+ // Offset into the array of category lists.
+ unsigned Offset;
+
+ friend bool operator<(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID < Y.DefinitionID;
+ }
+
+ friend bool operator>(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID > Y.DefinitionID;
+ }
+
+ friend bool operator<=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID <= Y.DefinitionID;
+ }
+
+ friend bool operator>=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID >= Y.DefinitionID;
+ }
+};
+
+/// A key used when looking up entities by \ref DeclarationName.
+///
+/// Different \ref DeclarationNames are mapped to different keys, but the
+/// same key can occasionally represent multiple names (for names that
+/// contain types, in particular).
+class DeclarationNameKey {
+ using NameKind = unsigned;
+
+ NameKind Kind = 0;
+ uint64_t Data = 0;
+
+public:
+ DeclarationNameKey() = default;
+ DeclarationNameKey(DeclarationName Name);
+ DeclarationNameKey(NameKind Kind, uint64_t Data) : Kind(Kind), Data(Data) {}
+
+ NameKind getKind() const { return Kind; }
+
+ IdentifierInfo *getIdentifier() const {
+ assert(Kind == DeclarationName::Identifier ||
+ Kind == DeclarationName::CXXLiteralOperatorName ||
+ Kind == DeclarationName::CXXDeductionGuideName);
+ return (IdentifierInfo *)Data;
+ }
+
+ Selector getSelector() const {
+ assert(Kind == DeclarationName::ObjCZeroArgSelector ||
+ Kind == DeclarationName::ObjCOneArgSelector ||
+ Kind == DeclarationName::ObjCMultiArgSelector);
+ return Selector(Data);
+ }
+
+ OverloadedOperatorKind getOperatorKind() const {
+ assert(Kind == DeclarationName::CXXOperatorName);
+ return (OverloadedOperatorKind)Data;
+ }
+
+ /// Compute a fingerprint of this key for use in on-disk hash table.
+ unsigned getHash() const;
+
+ friend bool operator==(const DeclarationNameKey &A,
+ const DeclarationNameKey &B) {
+ return A.Kind == B.Kind && A.Data == B.Data;
+ }
+};
+
+/// @}
} // namespace serialization
} // namespace clang
namespace llvm {
- template <> struct DenseMapInfo<clang::serialization::DeclarationNameKey> {
- static clang::serialization::DeclarationNameKey getEmptyKey() {
- return clang::serialization::DeclarationNameKey(-1, 1);
- }
-
- static clang::serialization::DeclarationNameKey getTombstoneKey() {
- return clang::serialization::DeclarationNameKey(-1, 2);
- }
-
- static unsigned
- getHashValue(const clang::serialization::DeclarationNameKey &Key) {
- return Key.getHash();
- }
-
- static bool isEqual(const clang::serialization::DeclarationNameKey &L,
- const clang::serialization::DeclarationNameKey &R) {
- return L == R;
- }
- };
+template <> struct DenseMapInfo<clang::serialization::DeclarationNameKey> {
+ static clang::serialization::DeclarationNameKey getEmptyKey() {
+ return clang::serialization::DeclarationNameKey(-1, 1);
+ }
+
+ static clang::serialization::DeclarationNameKey getTombstoneKey() {
+ return clang::serialization::DeclarationNameKey(-1, 2);
+ }
+
+ static unsigned
+ getHashValue(const clang::serialization::DeclarationNameKey &Key) {
+ return Key.getHash();
+ }
+
+ static bool isEqual(const clang::serialization::DeclarationNameKey &L,
+ const clang::serialization::DeclarationNameKey &R) {
+ return L == R;
+ }
+};
} // namespace llvm
diff --git a/clang/include/clang/Serialization/ASTReader.h b/clang/include/clang/Serialization/ASTReader.h
index d0d2a68114c7..242b75baca6c 100644
--- a/clang/include/clang/Serialization/ASTReader.h
+++ b/clang/include/clang/Serialization/ASTReader.h
@@ -767,21 +767,21 @@ private:
/// This contains the data loaded from all EAGERLY_DESERIALIZED_DECLS blocks
/// in the chain. The referenced declarations are deserialized and passed to
/// the consumer eagerly.
- SmallVector<uint64_t, 16> EagerlyDeserializedDecls;
+ SmallVector<serialization::DeclID, 16> EagerlyDeserializedDecls;
/// The IDs of all tentative definitions stored in the chain.
///
/// Sema keeps track of all tentative definitions in a TU because it has to
/// complete them and pass them on to CodeGen. Thus, tentative definitions in
/// the PCH chain must be eagerly deserialized.
- SmallVector<uint64_t, 16> TentativeDefinitions;
+ SmallVector<serialization::DeclID, 16> TentativeDefinitions;
/// The IDs of all CXXRecordDecls stored in the chain whose VTables are
/// used.
///
/// CodeGen has to emit VTables for these records, so they have to be eagerly
/// deserialized.
- SmallVector<uint64_t, 64> VTableUses;
+ SmallVector<serialization::DeclID, 64> VTableUses;
/// A snapshot of the pending instantiations in the chain.
///
@@ -789,7 +789,7 @@ private:
/// end of the TU. It consists of a pair of values for every pending
/// instantiation where the first value is the ID of the decl and the second
/// is the instantiation location.
- SmallVector<uint64_t, 64> PendingInstantiations;
+ SmallVector<serialization::DeclID, 64> PendingInstantiations;
//@}
@@ -799,24 +799,24 @@ private:
/// A snapshot of Sema's unused file-scoped variable tracking, for
/// generating warnings.
- SmallVector<uint64_t, 16> UnusedFileScopedDecls;
+ SmallVector<serialization::DeclID, 16> UnusedFileScopedDecls;
/// A list of all the delegating constructors we've seen, to diagnose
/// cycles.
- SmallVector<uint64_t, 4> DelegatingCtorDecls;
+ SmallVector<serialization::DeclID, 4> DelegatingCtorDecls;
/// Method selectors used in a @selector expression. Used for
/// implementation of -Wselector.
- SmallVector<uint64_t, 64> ReferencedSelectorsData;
+ SmallVector<serialization::SelectorID, 64> ReferencedSelectorsData;
/// A snapshot of Sema's weak undeclared identifier tracking, for
/// generating warnings.
- SmallVector<uint64_t, 64> WeakUndeclaredIdentifiers;
+ SmallVector<serialization::IdentifierID, 64> WeakUndeclaredIdentifiers;
/// The IDs of type aliases for ext_vectors that exist in the chain.
///
/// Used by Sema for finding sugared names for ext_vectors in diagnostics.
- SmallVector<uint64_t, 4> ExtVectorDecls;
+ SmallVector<serialization::DeclID, 4> ExtVectorDecls;
//@}
@@ -827,7 +827,7 @@ private:
/// The IDs of all potentially unused typedef names in the chain.
///
/// Sema tracks these to emit warnings.
- SmallVector<uint64_t, 16> UnusedLocalTypedefNameCandidates;
+ SmallVector<serialization::DeclID, 16> UnusedLocalTypedefNameCandidates;
/// Our current depth in #pragma cuda force_host_device begin/end
/// macros.
@@ -836,18 +836,18 @@ private:
/// The IDs of the declarations Sema stores directly.
///
/// Sema tracks a few important decls, such as namespace std, directly.
- SmallVector<uint64_t, 4> SemaDeclRefs;
+ SmallVector<serialization::DeclID, 4> SemaDeclRefs;
/// The IDs of the types ASTContext stores directly.
///
/// The AST context tracks a few important types, such as va_list, directly.
- SmallVector<uint64_t, 16> SpecialTypes;
+ SmallVector<serialization::TypeID, 16> SpecialTypes;
/// The IDs of CUDA-specific declarations ASTContext stores directly.
///
/// The AST context tracks a few important decls, currently cudaConfigureCall,
/// directly.
- SmallVector<uint64_t, 2> CUDASpecialDeclRefs;
+ SmallVector<serialization::DeclID, 2> CUDASpecialDeclRefs;
/// The floating point pragma option settings.
SmallVector<uint64_t, 1> FPPragmaOptions;
@@ -896,11 +896,11 @@ private:
llvm::DenseMap<const Decl *, std::set<std::string>> OpenCLDeclExtMap;
/// A list of the namespaces we've seen.
- SmallVector<uint64_t, 4> KnownNamespaces;
+ SmallVector<serialization::DeclID, 4> KnownNamespaces;
/// A list of undefined decls with internal linkage followed by the
/// SourceLocation of a matching ODR-use.
- SmallVector<uint64_t, 8> UndefinedButUsed;
+ SmallVector<serialization::DeclID, 8> UndefinedButUsed;
/// Delete expressions to analyze at the end of translation unit.
SmallVector<uint64_t, 8> DelayedDeleteExprs;
@@ -912,8 +912,7 @@ private:
/// The IDs of all decls to be checked for deferred diags.
///
/// Sema tracks these to emit deferred diags.
- SmallVector<uint64_t, 4> DeclsToCheckForDeferredDiags;
-
+ llvm::SmallSetVector<serialization::DeclID, 4> DeclsToCheckForDeferredDiags;
public:
struct ImportedSubmodule {
@@ -1402,6 +1401,9 @@ private:
llvm::iterator_range<PreprocessingRecord::iterator>
getModulePreprocessedEntities(ModuleFile &Mod) const;
+ bool canRecoverFromOutOfDate(StringRef ModuleFileName,
+ unsigned ClientLoadCapabilities);
+
public:
class ModuleDeclIterator
: public llvm::iterator_adaptor_base<
@@ -1543,7 +1545,11 @@ public:
/// The client can handle an AST file that cannot load because it's
/// compiled configuration doesn't match that of the context it was
/// loaded into.
- ARR_ConfigurationMismatch = 0x8
+ ARR_ConfigurationMismatch = 0x8,
+
+ /// If a module file is marked with errors treat it as out-of-date so the
+ /// caller can rebuild it.
+ ARR_TreatModuleWithErrorsAsOutOfDate = 0x10
};
/// Load the AST file designated by the given file name.
@@ -2012,7 +2018,7 @@ public:
llvm::SmallSetVector<const TypedefNameDecl *, 4> &Decls) override;
void ReadDeclsToCheckForDeferredDiags(
- llvm::SmallVector<Decl *, 4> &Decls) override;
+ llvm::SmallSetVector<Decl *, 4> &Decls) override;
void ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) override;
@@ -2133,12 +2139,15 @@ public:
/// Read a source location from raw form and return it in its
/// originating module file's source location space.
- SourceLocation ReadUntranslatedSourceLocation(uint32_t Raw) const {
- return SourceLocation::getFromRawEncoding((Raw >> 1) | (Raw << 31));
+ SourceLocation
+ ReadUntranslatedSourceLocation(SourceLocation::UIntTy Raw) const {
+ return SourceLocation::getFromRawEncoding((Raw >> 1) |
+ (Raw << (8 * sizeof(Raw) - 1)));
}
/// Read a source location from raw form.
- SourceLocation ReadSourceLocation(ModuleFile &ModuleFile, uint32_t Raw) const {
+ SourceLocation ReadSourceLocation(ModuleFile &ModuleFile,
+ SourceLocation::UIntTy Raw) const {
SourceLocation Loc = ReadUntranslatedSourceLocation(Raw);
return TranslateSourceLocation(ModuleFile, Loc);
}
@@ -2152,7 +2161,8 @@ public:
assert(ModuleFile.SLocRemap.find(Loc.getOffset()) !=
ModuleFile.SLocRemap.end() &&
"Cannot find offset to remap.");
- int Remap = ModuleFile.SLocRemap.find(Loc.getOffset())->second;
+ SourceLocation::IntTy Remap =
+ ModuleFile.SLocRemap.find(Loc.getOffset())->second;
return Loc.getLocWithOffset(Remap);
}
diff --git a/clang/include/clang/Serialization/ASTWriter.h b/clang/include/clang/Serialization/ASTWriter.h
index 12073a38a77a..ac88cb0a3177 100644
--- a/clang/include/clang/Serialization/ASTWriter.h
+++ b/clang/include/clang/Serialization/ASTWriter.h
@@ -84,7 +84,7 @@ class RecordDecl;
class Sema;
class SourceManager;
class Stmt;
-struct StoredDeclsList;
+class StoredDeclsList;
class SwitchCase;
class TemplateParameterList;
class Token;
@@ -347,7 +347,7 @@ private:
union {
const Decl *Dcl;
void *Type;
- unsigned Loc;
+ SourceLocation::UIntTy Loc;
unsigned Val;
Module *Mod;
const Attr *Attribute;
@@ -402,8 +402,8 @@ private:
/// headers. The declarations themselves are stored as declaration
/// IDs, since they will be written out to an EAGERLY_DESERIALIZED_DECLS
/// record.
- SmallVector<uint64_t, 16> EagerlyDeserializedDecls;
- SmallVector<uint64_t, 16> ModularCodegenDecls;
+ SmallVector<serialization::DeclID, 16> EagerlyDeserializedDecls;
+ SmallVector<serialization::DeclID, 16> ModularCodegenDecls;
/// DeclContexts that have received extensions since their serialized
/// form.
@@ -450,7 +450,7 @@ private:
/// A mapping from each known submodule to its ID number, which will
/// be a positive integer.
- llvm::DenseMap<Module *, unsigned> SubmoduleIDs;
+ llvm::DenseMap<const Module *, unsigned> SubmoduleIDs;
/// A list of the module file extension writers.
std::vector<std::unique_ptr<ModuleFileExtensionWriter>>
@@ -510,8 +510,6 @@ private:
void WriteDeclContextVisibleUpdate(const DeclContext *DC);
void WriteFPPragmaOptions(const FPOptionsOverride &Opts);
void WriteOpenCLExtensions(Sema &SemaRef);
- void WriteOpenCLExtensionTypes(Sema &SemaRef);
- void WriteOpenCLExtensionDecls(Sema &SemaRef);
void WriteCUDAPragmas(Sema &SemaRef);
void WriteObjCCategories();
void WriteLateParsedTemplates(Sema &SemaRef);
@@ -673,7 +671,7 @@ public:
/// Retrieve or create a submodule ID for this module, or return 0 if
/// the submodule is neither local (a submodle of the currently-written module)
/// nor from an imported module.
- unsigned getLocalOrImportedSubmoduleID(Module *Mod);
+ unsigned getLocalOrImportedSubmoduleID(const Module *Mod);
/// Note that the identifier II occurs at the given offset
/// within the identifier table.
diff --git a/clang/include/clang/Serialization/ModuleFile.h b/clang/include/clang/Serialization/ModuleFile.h
index a641a26661ae..b1c8a8c8e72b 100644
--- a/clang/include/clang/Serialization/ModuleFile.h
+++ b/clang/include/clang/Serialization/ModuleFile.h
@@ -260,7 +260,7 @@ public:
int SLocEntryBaseID = 0;
/// The base offset in the source manager's view of this module.
- unsigned SLocEntryBaseOffset = 0;
+ SourceLocation::UIntTy SLocEntryBaseOffset = 0;
/// Base file offset for the offsets in SLocEntryOffsets. Real file offset
/// for the entry is SLocEntryOffsetsBase + SLocEntryOffsets[i].
@@ -274,7 +274,8 @@ public:
SmallVector<uint64_t, 4> PreloadSLocEntries;
/// Remapping table for source locations in this module.
- ContinuousRangeMap<uint32_t, int, 2> SLocRemap;
+ ContinuousRangeMap<SourceLocation::UIntTy, SourceLocation::IntTy, 2>
+ SLocRemap;
// === Identifiers ===
@@ -298,7 +299,7 @@ public:
///
/// This pointer points into a memory buffer, where the on-disk hash
/// table for identifiers actually lives.
- const char *IdentifierTableData = nullptr;
+ const unsigned char *IdentifierTableData = nullptr;
/// A pointer to an on-disk hash table of opaque type
/// IdentifierHashTable.
diff --git a/clang/include/clang/Serialization/ModuleFileExtension.h b/clang/include/clang/Serialization/ModuleFileExtension.h
index 63562c0d6bd2..34ea870724a4 100644
--- a/clang/include/clang/Serialization/ModuleFileExtension.h
+++ b/clang/include/clang/Serialization/ModuleFileExtension.h
@@ -10,6 +10,7 @@
#define LLVM_CLANG_SERIALIZATION_MODULEFILEEXTENSION_H
#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/Support/ExtensibleRTTI.h"
#include <memory>
#include <string>
@@ -59,8 +60,14 @@ class ModuleFileExtensionWriter;
/// compiled module files (.pcm) and precompiled headers (.pch) via a
/// custom writer that can then be accessed via a custom reader when
/// the module file or precompiled header is loaded.
-class ModuleFileExtension {
+///
+/// Subclasses must use LLVM RTTI for open class hierarchies.
+class ModuleFileExtension
+ : public llvm::RTTIExtends<ModuleFileExtension, llvm::RTTIRoot> {
public:
+ /// Discriminator for LLVM RTTI.
+ static char ID;
+
virtual ~ModuleFileExtension();
/// Retrieves the metadata for this module file extension.
diff --git a/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h b/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
index 0f33909daec0..31a4ed50a723 100644
--- a/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
+++ b/clang/include/clang/StaticAnalyzer/Checkers/SValExplainer.h
@@ -65,7 +65,7 @@ public:
}
std::string VisitLocConcreteInt(loc::ConcreteInt V) {
- llvm::APSInt I = V.getValue();
+ const llvm::APSInt &I = V.getValue();
std::string Str;
llvm::raw_string_ostream OS(Str);
OS << "concrete memory address '" << I << "'";
@@ -77,7 +77,7 @@ public:
}
std::string VisitNonLocConcreteInt(nonloc::ConcreteInt V) {
- llvm::APSInt I = V.getValue();
+ const llvm::APSInt &I = V.getValue();
std::string Str;
llvm::raw_string_ostream OS(Str);
OS << (I.isSigned() ? "signed " : "unsigned ") << I.getBitWidth()
diff --git a/clang/include/clang/StaticAnalyzer/Core/Analyses.def b/clang/include/clang/StaticAnalyzer/Core/Analyses.def
index c4e5f5be6fd7..88c375ce0925 100644
--- a/clang/include/clang/StaticAnalyzer/Core/Analyses.def
+++ b/clang/include/clang/StaticAnalyzer/Core/Analyses.def
@@ -52,9 +52,14 @@ ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html",
"Output analysis results using HTML wrapped with Plists",
createPlistHTMLDiagnosticConsumer)
-ANALYSIS_DIAGNOSTICS(SARIF, "sarif", "Output analysis results in a SARIF file",
+ANALYSIS_DIAGNOSTICS(SARIF, "sarif", "Output analysis results using SARIF",
createSarifDiagnosticConsumer)
+ANALYSIS_DIAGNOSTICS(SARIF_HTML, "sarif-html",
+ "Output analysis results using both SARIF and HTML "
+ "output files",
+ createSarifHTMLDiagnosticConsumer)
+
ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results to stderr",
createTextPathDiagnosticConsumer)
diff --git a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
index 27bc0dda1f1c..3c93ebeccde8 100644
--- a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
+++ b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -432,6 +432,8 @@ public:
void markInteresting(SymbolRef sym, bugreporter::TrackingKind TKind =
bugreporter::TrackingKind::Thorough);
+ void markNotInteresting(SymbolRef sym);
+
/// Marks a region as interesting. Different kinds of interestingness will
/// be processed differently by visitors (e.g. if the tracking kind is
/// condition, will append "will be used as a condition" to the message).
@@ -439,6 +441,8 @@ public:
const MemRegion *R,
bugreporter::TrackingKind TKind = bugreporter::TrackingKind::Thorough);
+ void markNotInteresting(const MemRegion *R);
+
/// Marks a symbolic value as interesting. Different kinds of interestingness
/// will be processed differently by visitors (e.g. if the tracking kind is
/// condition, will append "will be used as a condition" to the message).
@@ -489,11 +493,16 @@ public:
///
/// The visitors should be used when the default trace is not sufficient.
/// For example, they allow constructing a more elaborate trace.
- /// \sa registerConditionVisitor(), registerTrackNullOrUndefValue(),
- /// registerFindLastStore(), registerNilReceiverVisitor(), and
- /// registerVarDeclsLastStore().
+ /// @{
void addVisitor(std::unique_ptr<BugReporterVisitor> visitor);
+ template <class VisitorType, class... Args>
+ void addVisitor(Args &&... ConstructorArgs) {
+ addVisitor(
+ std::make_unique<VisitorType>(std::forward<Args>(ConstructorArgs)...));
+ }
+ /// @}
+
/// Remove all visitors attached to this bug report.
void clearVisitors();
@@ -720,14 +729,43 @@ public:
}
};
+/// The tag that carries some information with it.
+///
+/// It can be valuable to produce tags with some bits of information and later
+/// reuse them for a better diagnostic.
+///
+/// Please make sure that derived class' constuctor is private and that the user
+/// can only create objects using DataTag::Factory. This also means that
+/// DataTag::Factory should be friend for every derived class.
+class DataTag : public ProgramPointTag {
+public:
+ StringRef getTagDescription() const override { return "Data Tag"; }
+
+ // Manage memory for DataTag objects.
+ class Factory {
+ std::vector<std::unique_ptr<DataTag>> Tags;
+
+ public:
+ template <class DataTagType, class... Args>
+ const DataTagType *make(Args &&... ConstructorArgs) {
+ // We cannot use std::make_unique because we cannot access the private
+ // constructor from inside it.
+ Tags.emplace_back(
+ new DataTagType(std::forward<Args>(ConstructorArgs)...));
+ return static_cast<DataTagType *>(Tags.back().get());
+ }
+ };
+
+protected:
+ DataTag(void *TagKind) : ProgramPointTag(TagKind) {}
+};
/// The tag upon which the TagVisitor reacts. Add these in order to display
/// additional PathDiagnosticEventPieces along the path.
-class NoteTag : public ProgramPointTag {
+class NoteTag : public DataTag {
public:
- using Callback =
- std::function<std::string(BugReporterContext &,
- PathSensitiveBugReport &)>;
+ using Callback = std::function<std::string(BugReporterContext &,
+ PathSensitiveBugReport &)>;
private:
static int Kind;
@@ -736,7 +774,7 @@ private:
const bool IsPrunable;
NoteTag(Callback &&Cb, bool IsPrunable)
- : ProgramPointTag(&Kind), Cb(std::move(Cb)), IsPrunable(IsPrunable) {}
+ : DataTag(&Kind), Cb(std::move(Cb)), IsPrunable(IsPrunable) {}
public:
static bool classof(const ProgramPointTag *T) {
@@ -761,20 +799,7 @@ public:
bool isPrunable() const { return IsPrunable; }
- // Manage memory for NoteTag objects.
- class Factory {
- std::vector<std::unique_ptr<NoteTag>> Tags;
-
- public:
- const NoteTag *makeNoteTag(Callback &&Cb, bool IsPrunable = false) {
- // We cannot use std::make_unique because we cannot access the private
- // constructor from inside it.
- std::unique_ptr<NoteTag> T(new NoteTag(std::move(Cb), IsPrunable));
- Tags.push_back(std::move(T));
- return Tags.back().get();
- }
- };
-
+ friend class Factory;
friend class TagVisitor;
};
diff --git a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
index 58a88f452ed9..24cae12af24a 100644
--- a/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
+++ b/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h
@@ -19,9 +19,12 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
+#include <list>
#include <memory>
+#include <utility>
namespace clang {
@@ -93,74 +96,293 @@ enum class TrackingKind {
/// gathered about the tracked expression value as possible.
Thorough,
/// Specifies that a more moderate tracking should be used for the expression
- /// value. This will essentially make sure that functions relevant to the it
+ /// value. This will essentially make sure that functions relevant to it
/// aren't pruned, but otherwise relies on the user reading the code or
/// following the arrows.
Condition
};
-/// Attempts to add visitors to track expression value back to its point of
-/// origin.
-///
-/// \param N A node "downstream" from the evaluation of the statement.
-/// \param E The expression value which we are tracking
-/// \param R The bug report to which visitors should be attached.
-/// \param EnableNullFPSuppression Whether we should employ false positive
-/// suppression (inlined defensive checks, returned null).
+/// Defines a set of options altering tracking behavior.
+struct TrackingOptions {
+ /// Specifies the kind of tracking.
+ TrackingKind Kind = TrackingKind::Thorough;
+ /// Specifies whether we should employ false positive suppression
+ /// (inlined defensive checks, returned null).
+ bool EnableNullFPSuppression = true;
+};
+
+/// Describes an event when the value got stored into a memory region.
///
-/// \return Whether or not the function was able to add visitors for this
-/// statement. Note that returning \c true does not actually imply
-/// that any visitors were added.
-bool trackExpressionValue(const ExplodedNode *N, const Expr *E,
- PathSensitiveBugReport &R,
- TrackingKind TKind = TrackingKind::Thorough,
- bool EnableNullFPSuppression = true);
+/// As opposed to checker checkBind API, it reacts also to binds
+/// generated by the checker as well. It can be useful when the binding
+/// happened as a result of evalCall, for example.
+struct StoreInfo {
+ enum Kind {
+ /// The value got stored into the region during initialization:
+ /// int x = 42;
+ Initialization,
+ /// The value got stored into the region during assignment:
+ /// int x;
+ /// x = 42;
+ Assignment,
+ /// The value got stored into the parameter region as the result
+ /// of a call.
+ CallArgument,
+ /// The value got stored into the region as block capture.
+ /// Block data is modeled as a separate region, thus whenever
+ /// the analyzer sees a captured variable, its value is copied
+ /// into a special block region.
+ BlockCapture
+ };
+
+ /// The type of store operation.
+ Kind StoreKind;
+ /// The node where the store happened.
+ const ExplodedNode *StoreSite;
+ /// The expression where the value comes from.
+ /// NOTE: might be null.
+ const Expr *SourceOfTheValue;
+ /// Symbolic value that is being stored.
+ SVal Value;
+ /// Memory regions involved in the store operation.
+ /// Dest <- Origin
+ /// NOTE: Origin might be null, when the stored value doesn't come
+ /// from another region.
+ const MemRegion *Dest, *Origin;
+};
-const Expr *getDerefExpr(const Stmt *S);
+class Tracker;
+using TrackerRef = llvm::IntrusiveRefCntPtr<Tracker>;
-} // namespace bugreporter
+class ExpressionHandler;
+class StoreHandler;
-/// Finds last store into the given region,
-/// which is different from a given symbolic value.
-class FindLastStoreBRVisitor final : public BugReporterVisitor {
- const MemRegion *R;
- SVal V;
- bool Satisfied = false;
+/// A generalized component for tracking expressions, values, and stores.
+///
+/// Tracker aimes at providing a sensible set of default behaviors that can be
+/// used by any checker, while providing mechanisms to hook into any part of the
+/// tracking process and insert checker-specific logic.
+class Tracker : public llvm::RefCountedBase<Tracker> {
+private:
+ using ExpressionHandlerPtr = std::unique_ptr<ExpressionHandler>;
+ using StoreHandlerPtr = std::unique_ptr<StoreHandler>;
- /// If the visitor is tracking the value directly responsible for the
- /// bug, we are going to employ false positive suppression.
- bool EnableNullFPSuppression;
+ PathSensitiveBugReport &Report;
+ std::list<ExpressionHandlerPtr> ExpressionHandlers;
+ std::list<StoreHandlerPtr> StoreHandlers;
- using TrackingKind = bugreporter::TrackingKind;
- TrackingKind TKind;
- const StackFrameContext *OriginSFC;
+protected:
+ /// \param Report The bug report to which visitors should be attached.
+ Tracker(PathSensitiveBugReport &Report);
public:
+ virtual ~Tracker() = default;
+
+ static TrackerRef create(PathSensitiveBugReport &Report) {
+ return new Tracker(Report);
+ }
+
+ PathSensitiveBugReport &getReport() { return Report; }
+
+ /// Describes a tracking result with the most basic information of what was
+ /// actually done (or not done).
+ struct Result {
+ /// Usually it means that the tracker added visitors.
+ bool FoundSomethingToTrack = false;
+ /// Signifies that the tracking was interrupted at some point.
+ /// Usually this information is important only for sub-trackers.
+ bool WasInterrupted = false;
+
+ /// Combines the current result with the given result.
+ void combineWith(const Result &Other) {
+ // If we found something in one of the cases, we can
+ // say we found something overall.
+ FoundSomethingToTrack |= Other.FoundSomethingToTrack;
+ // The same goes to the interruption.
+ WasInterrupted |= Other.WasInterrupted;
+ }
+ };
+
+ /// Track expression value back to its point of origin.
+ ///
+ /// \param E The expression value which we are tracking
+ /// \param N A node "downstream" from the evaluation of the statement.
+ /// \param Opts Tracking options specifying how we want to track the value.
+ virtual Result track(const Expr *E, const ExplodedNode *N,
+ TrackingOptions Opts = {});
+
+ /// Track how the value got stored into the given region and where it came
+ /// from.
+ ///
/// \param V We're searching for the store where \c R received this value.
/// \param R The region we're tracking.
- /// \param TKind May limit the amount of notes added to the bug report.
- /// \param OriginSFC Only adds notes when the last store happened in a
+ /// \param Opts Tracking options specifying how we want to track the value.
+ /// \param Origin Only adds notes when the last store happened in a
/// different stackframe to this one. Disregarded if the tracking kind
/// is thorough.
/// This is useful, because for non-tracked regions, notes about
/// changes to its value in a nested stackframe could be pruned, and
/// this visitor can prevent that without polluting the bugpath too
/// much.
- FindLastStoreBRVisitor(KnownSVal V, const MemRegion *R,
- bool InEnableNullFPSuppression, TrackingKind TKind,
- const StackFrameContext *OriginSFC = nullptr)
- : R(R), V(V), EnableNullFPSuppression(InEnableNullFPSuppression),
- TKind(TKind), OriginSFC(OriginSFC) {
- assert(R);
+ virtual Result track(SVal V, const MemRegion *R, TrackingOptions Opts = {},
+ const StackFrameContext *Origin = nullptr);
+
+ /// Handle the store operation and produce the note.
+ ///
+ /// \param SI The information fully describing the store.
+ /// \param Opts Tracking options specifying how we got to it.
+ ///
+ /// NOTE: this method is designed for sub-trackers and visitors.
+ virtual PathDiagnosticPieceRef handle(StoreInfo SI, BugReporterContext &BRC,
+ TrackingOptions Opts);
+
+ /// Add custom expression handler with the highest priority.
+ ///
+ /// It means that it will be asked for handling first, and can prevent
+ /// other handlers from running if decides to interrupt.
+ void addHighPriorityHandler(ExpressionHandlerPtr SH) {
+ ExpressionHandlers.push_front(std::move(SH));
}
- void Profile(llvm::FoldingSetNodeID &ID) const override;
+ /// Add custom expression handler with the lowest priority.
+ ///
+ /// It means that it will be asked for handling last, and other handlers can
+ /// prevent it from running if any of them decides to interrupt.
+ void addLowPriorityHandler(ExpressionHandlerPtr SH) {
+ ExpressionHandlers.push_back(std::move(SH));
+ }
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
+ /// Add custom store handler with the highest priority.
+ ///
+ /// It means that it will be asked for handling first, and will prevent
+ /// other handlers from running if it produces non-null note.
+ void addHighPriorityHandler(StoreHandlerPtr SH) {
+ StoreHandlers.push_front(std::move(SH));
+ }
+
+ /// Add custom store handler with the lowest priority.
+ ///
+ /// It means that it will be asked for handling last, only
+ /// if all other handlers failed to produce the note.
+ void addLowPriorityHandler(StoreHandlerPtr SH) {
+ StoreHandlers.push_back(std::move(SH));
+ }
+
+ /// Add custom expression/store handler with the highest priority
+ ///
+ /// See other overloads for explanation.
+ template <class HandlerType, class... Args>
+ void addHighPriorityHandler(Args &&... ConstructorArgs) {
+ addHighPriorityHandler(std::make_unique<HandlerType>(
+ *this, std::forward<Args>(ConstructorArgs)...));
+ }
+
+ /// Add custom expression/store handler with the lowest priority
+ ///
+ /// See other overloads for explanation.
+ template <class HandlerType, class... Args>
+ void addLowPriorityHandler(Args &&... ConstructorArgs) {
+ addLowPriorityHandler(std::make_unique<HandlerType>(
+ *this, std::forward<Args>(ConstructorArgs)...));
+ }
+};
+
+/// Handles expressions during the tracking.
+class ExpressionHandler {
+private:
+ Tracker &ParentTracker;
+
+public:
+ ExpressionHandler(Tracker &ParentTracker) : ParentTracker(ParentTracker) {}
+ virtual ~ExpressionHandler() {}
+
+ /// Handle the given expression from the given node.
+ ///
+ /// \param E The expression value which we are tracking
+ /// \param Original A node "downstream" where the tracking started.
+ /// \param ExprNode A node where the evaluation of \c E actually happens.
+ /// \param Opts Tracking options specifying how we are tracking the value.
+ virtual Tracker::Result handle(const Expr *E, const ExplodedNode *Original,
+ const ExplodedNode *ExprNode,
+ TrackingOptions Opts) = 0;
+
+ /// \Return the tracker that initiated the process.
+ Tracker &getParentTracker() { return ParentTracker; }
+};
+
+/// Handles stores during the tracking.
+class StoreHandler {
+private:
+ Tracker &ParentTracker;
+
+public:
+ StoreHandler(Tracker &ParentTracker) : ParentTracker(ParentTracker) {}
+ virtual ~StoreHandler() {}
+
+ /// Handle the given store and produce the node.
+ ///
+ /// \param SI The information fully describing the store.
+ /// \param Opts Tracking options specifying how we are tracking the value.
+ ///
+ /// \return the produced note, null if the handler doesn't support this kind
+ /// of stores.
+ virtual PathDiagnosticPieceRef handle(StoreInfo SI, BugReporterContext &BRC,
+ TrackingOptions Opts) = 0;
+
+ Tracker &getParentTracker() { return ParentTracker; }
+
+protected:
+ PathDiagnosticPieceRef constructNote(StoreInfo SI, BugReporterContext &BRC,
+ StringRef NodeText);
};
+/// Visitor that tracks expressions and values.
+class TrackingBugReporterVisitor : public BugReporterVisitor {
+private:
+ TrackerRef ParentTracker;
+
+public:
+ TrackingBugReporterVisitor(TrackerRef ParentTracker)
+ : ParentTracker(ParentTracker) {}
+
+ Tracker &getParentTracker() { return *ParentTracker; }
+};
+
+/// Attempts to add visitors to track expression value back to its point of
+/// origin.
+///
+/// \param N A node "downstream" from the evaluation of the statement.
+/// \param E The expression value which we are tracking
+/// \param R The bug report to which visitors should be attached.
+/// \param Opts Tracking options specifying how we are tracking the value.
+///
+/// \return Whether or not the function was able to add visitors for this
+/// statement. Note that returning \c true does not actually imply
+/// that any visitors were added.
+bool trackExpressionValue(const ExplodedNode *N, const Expr *E,
+ PathSensitiveBugReport &R, TrackingOptions Opts = {});
+
+/// Track how the value got stored into the given region and where it came
+/// from.
+///
+/// \param V We're searching for the store where \c R received this value.
+/// \param R The region we're tracking.
+/// \param Opts Tracking options specifying how we want to track the value.
+/// \param Origin Only adds notes when the last store happened in a
+/// different stackframe to this one. Disregarded if the tracking kind
+/// is thorough.
+/// This is useful, because for non-tracked regions, notes about
+/// changes to its value in a nested stackframe could be pruned, and
+/// this visitor can prevent that without polluting the bugpath too
+/// much.
+void trackStoredValue(KnownSVal V, const MemRegion *R,
+ PathSensitiveBugReport &Report, TrackingOptions Opts = {},
+ const StackFrameContext *Origin = nullptr);
+
+const Expr *getDerefExpr(const Stmt *S);
+
+} // namespace bugreporter
+
class TrackConstraintBRVisitor final : public BugReporterVisitor {
DefinedSVal Constraint;
bool Assumption;
diff --git a/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h b/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
index 637b89fd9036..392bc484bf62 100644
--- a/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
+++ b/clang/include/clang/StaticAnalyzer/Core/BugReporter/CommonBugCategories.h
@@ -19,7 +19,9 @@ extern const char *const MemoryRefCount;
extern const char *const MemoryError;
extern const char *const UnixAPI;
extern const char *const CXXObjectLifecycle;
+extern const char *const CXXMoveSemantics;
extern const char *const SecurityError;
+extern const char *const UnusedCode;
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
index f40f88eb32ff..71a590d9e9a2 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -21,7 +21,9 @@
namespace clang {
class AnalyzerOptions;
+class MacroExpansionContext;
class Preprocessor;
+
namespace cross_tu {
class CrossTranslationUnitContext;
}
@@ -35,7 +37,8 @@ typedef std::vector<PathDiagnosticConsumer*> PathDiagnosticConsumers;
void CREATEFN(PathDiagnosticConsumerOptions Diagopts, \
PathDiagnosticConsumers &C, const std::string &Prefix, \
const Preprocessor &PP, \
- const cross_tu::CrossTranslationUnitContext &CTU);
+ const cross_tu::CrossTranslationUnitContext &CTU, \
+ const MacroExpansionContext &MacroExpansions);
#include "clang/StaticAnalyzer/Core/Analyses.def"
} // end 'ento' namespace
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
index 142b1ab11750..bb598af68166 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -52,6 +52,8 @@ public:
iterator begin() const { return L.begin(); }
iterator end() const { return L.end(); }
+ QualType getType() const { return T; }
+
static void Profile(llvm::FoldingSetNodeID& ID, QualType T,
llvm::ImmutableList<SVal> L);
@@ -139,6 +141,12 @@ public:
/// Returns the type of the APSInt used to store values of the given QualType.
APSIntType getAPSIntType(QualType T) const {
+ // For the purposes of the analysis and constraints, we treat atomics
+ // as their underlying types.
+ if (const AtomicType *AT = T->getAs<AtomicType>()) {
+ T = AT->getValueType();
+ }
+
assert(T->isIntegralOrEnumerationType() || Loc::isLocType(T));
return APSIntType(Ctx.getIntWidth(T),
!T->isSignedIntegerOrEnumerationType());
@@ -258,9 +266,9 @@ public:
return CXXBaseListFactory.add(CBS, L);
}
- const PointerToMemberData *accumCXXBase(
- llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
- const nonloc::PointerToMember &PTM);
+ const PointerToMemberData *
+ accumCXXBase(llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
+ const nonloc::PointerToMember &PTM, const clang::CastKind &kind);
const llvm::APSInt* evalAPSInt(BinaryOperator::Opcode Op,
const llvm::APSInt& V1,
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
index 54572bd81f20..a383012dc351 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -255,7 +255,7 @@ public:
/// to omit the note from the report if it would make the displayed
/// bug path significantly shorter.
const NoteTag *getNoteTag(NoteTag::Callback &&Cb, bool IsPrunable = false) {
- return Eng.getNoteTags().makeNoteTag(std::move(Cb), IsPrunable);
+ return Eng.getDataTags().make<NoteTag>(std::move(Cb), IsPrunable);
}
/// A shorthand version of getNoteTag that doesn't require you to accept
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
index f253c14cc487..a81d67ab3063 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -13,7 +13,9 @@
#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CHECKERHELPERS_H
#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_CHECKERHELPERS_H
+#include "clang/AST/OperationKinds.h"
#include "clang/AST/Stmt.h"
+#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/Optional.h"
#include <tuple>
@@ -69,6 +71,45 @@ Nullability getNullabilityAnnotation(QualType Type);
/// token for an integer. If we cannot parse the value then None is returned.
llvm::Optional<int> tryExpandAsInteger(StringRef Macro, const Preprocessor &PP);
+class OperatorKind {
+ union {
+ BinaryOperatorKind Bin;
+ UnaryOperatorKind Un;
+ } Op;
+ bool IsBinary;
+
+public:
+ explicit OperatorKind(BinaryOperatorKind Bin) : Op{Bin}, IsBinary{true} {}
+ explicit OperatorKind(UnaryOperatorKind Un) : IsBinary{false} { Op.Un = Un; }
+ bool IsBinaryOp() const { return IsBinary; }
+
+ BinaryOperatorKind GetBinaryOpUnsafe() const {
+ assert(IsBinary && "cannot get binary operator - we have a unary operator");
+ return Op.Bin;
+ }
+
+ Optional<BinaryOperatorKind> GetBinaryOp() const {
+ if (IsBinary)
+ return Op.Bin;
+ return {};
+ }
+
+ UnaryOperatorKind GetUnaryOpUnsafe() const {
+ assert(!IsBinary &&
+ "cannot get unary operator - we have a binary operator");
+ return Op.Un;
+ }
+
+ Optional<UnaryOperatorKind> GetUnaryOp() const {
+ if (!IsBinary)
+ return Op.Un;
+ return {};
+ }
+};
+
+OperatorKind operationKindFromOverloadedOperator(OverloadedOperatorKind OOK,
+ bool IsBinary);
+
} // namespace ento
} // namespace clang
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
index 2aca2c99ef4f..9898b9b42f4b 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -96,9 +96,10 @@ private:
/// (This data is owned by AnalysisConsumer.)
FunctionSummariesTy *FunctionSummaries;
- /// Add path note tags along the path when we see that something interesting
- /// is happening. This field is the allocator for such tags.
- NoteTag::Factory NoteTags;
+ /// Add path tags with some useful data along the path when we see that
+ /// something interesting is happening. This field is the allocator for such
+ /// tags.
+ DataTag::Factory DataTags;
void generateNode(const ProgramPoint &Loc,
ProgramStateRef State,
@@ -200,7 +201,7 @@ public:
/// Enqueue a single node created as a result of statement processing.
void enqueueStmtNode(ExplodedNode *N, const CFGBlock *Block, unsigned Idx);
- NoteTag::Factory &getNoteTags() { return NoteTags; }
+ DataTag::Factory &getDataTags() { return DataTags; }
};
// TODO: Turn into a class.
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h
new file mode 100644
index 000000000000..cfd7aa9664b6
--- /dev/null
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h
@@ -0,0 +1,59 @@
+//===- DynamicExtent.h - Dynamic extent related APIs ------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs that track and query dynamic extent information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICEXTENT_H
+#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICEXTENT_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+
+namespace clang {
+namespace ento {
+
+/// \returns The stored dynamic extent for the region \p MR.
+DefinedOrUnknownSVal getDynamicExtent(ProgramStateRef State,
+ const MemRegion *MR, SValBuilder &SVB);
+
+/// \returns The element extent of the type \p Ty.
+DefinedOrUnknownSVal getElementExtent(QualType Ty, SValBuilder &SVB);
+
+/// \returns The stored element count of the region \p MR.
+DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
+ const MemRegion *MR,
+ SValBuilder &SVB, QualType Ty);
+
+/// Set the dynamic extent \p Extent of the region \p MR.
+ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR,
+ DefinedOrUnknownSVal Extent, SValBuilder &SVB);
+
+/// Get the dynamic extent for a symbolic value that represents a buffer. If
+/// there is an offsetting to the underlying buffer we consider that too.
+/// Returns with an SVal that represents the extent, this is Unknown if the
+/// engine cannot deduce the extent.
+/// E.g.
+/// char buf[3];
+/// (buf); // extent is 3
+/// (buf + 1); // extent is 2
+/// (buf + 3); // extent is 0
+/// (buf + 4); // extent is -1
+///
+/// char *bufptr;
+/// (bufptr) // extent is unknown
+SVal getDynamicExtentWithOffset(ProgramStateRef State, SVal BufV);
+
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICEXTENT_H
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h
deleted file mode 100644
index 398f9b6ac33a..000000000000
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h
+++ /dev/null
@@ -1,53 +0,0 @@
-//===- DynamicSize.h - Dynamic size related APIs ----------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines APIs that track and query dynamic size information.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
-#define LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
-
-#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
-
-namespace clang {
-namespace ento {
-
-/// Get the stored dynamic size for the region \p MR.
-DefinedOrUnknownSVal getDynamicSize(ProgramStateRef State, const MemRegion *MR,
- SValBuilder &SVB);
-
-/// Get the stored element count of the region \p MR.
-DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
- const MemRegion *MR,
- SValBuilder &SVB,
- QualType ElementTy);
-
-/// Get the dynamic size for a symbolic value that represents a buffer. If
-/// there is an offsetting to the underlying buffer we consider that too.
-/// Returns with an SVal that represents the size, this is Unknown if the
-/// engine cannot deduce the size.
-/// E.g.
-/// char buf[3];
-/// (buf); // size is 3
-/// (buf + 1); // size is 2
-/// (buf + 3); // size is 0
-/// (buf + 4); // size is -1
-///
-/// char *bufptr;
-/// (bufptr) // size is unknown
-SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV);
-
-} // namespace ento
-} // namespace clang
-
-#endif // LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_DYNAMICSIZE_H
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
index 2679339537e8..ffe1fe846be1 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/DynamicType.h
@@ -24,7 +24,6 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
-#include <utility>
namespace clang {
namespace ento {
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
index 582a56cbee1e..cef7dda172f3 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -428,8 +428,7 @@ public:
SymbolManager &getSymbolManager() { return SymMgr; }
MemRegionManager &getRegionManager() { return MRMgr; }
- NoteTag::Factory &getNoteTags() { return Engine.getNoteTags(); }
-
+ DataTag::Factory &getDataTags() { return Engine.getDataTags(); }
// Functions for external checking of whether we have unfinished work
bool wasBlocksExhausted() const { return Engine.wasBlocksExhausted(); }
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
index d25d26435454..53b221cb53c9 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopUnrolling.h
@@ -5,7 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-///
+/// \file
/// This header contains the declarations of functions which are used to decide
/// which loops should be completely unrolled and mark their corresponding
/// CFGBlocks. It is done by tracking a stack of loops in the ProgramState. This
@@ -18,7 +18,6 @@
/// has to be initialized by a literal in the corresponding initStmt.
/// - Does not contain goto, switch and returnStmt.
///
-///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_STATICANALYZER_CORE_PATHSENSITIVE_LOOPUNROLLING_H
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
index 7484a51b1eda..e75228f92a8e 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/LoopWidening.h
@@ -5,7 +5,7 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-///
+/// \file
/// This header contains the declarations of functions which are used to widen
/// loops which do not otherwise exit. The widening is done by invalidating
/// anything which might be modified by the body of the loop.
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
index bc5d5f57cd68..c67df1e51b4f 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/RangedConstraintManager.h
@@ -16,6 +16,8 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SimpleConstraintManager.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/Support/Allocator.h"
namespace clang {
@@ -24,21 +26,19 @@ namespace ento {
/// A Range represents the closed range [from, to]. The caller must
/// guarantee that from <= to. Note that Range is immutable, so as not
/// to subvert RangeSet's immutability.
-class Range : public std::pair<const llvm::APSInt *, const llvm::APSInt *> {
+class Range {
public:
- Range(const llvm::APSInt &from, const llvm::APSInt &to)
- : std::pair<const llvm::APSInt *, const llvm::APSInt *>(&from, &to) {
- assert(from <= to);
+ Range(const llvm::APSInt &From, const llvm::APSInt &To) : Impl(&From, &To) {
+ assert(From <= To);
}
- Range(const llvm::APSInt &point)
- : std::pair<const llvm::APSInt *, const llvm::APSInt *>(&point, &point) {}
+ Range(const llvm::APSInt &Point) : Range(Point, Point) {}
- bool Includes(const llvm::APSInt &v) const {
- return *first <= v && v <= *second;
+ bool Includes(const llvm::APSInt &Point) const {
+ return From() <= Point && Point <= To();
}
- const llvm::APSInt &From() const { return *first; }
- const llvm::APSInt &To() const { return *second; }
+ const llvm::APSInt &From() const { return *Impl.first; }
+ const llvm::APSInt &To() const { return *Impl.second; }
const llvm::APSInt *getConcreteValue() const {
return &From() == &To() ? &From() : nullptr;
}
@@ -47,93 +47,267 @@ public:
ID.AddPointer(&From());
ID.AddPointer(&To());
}
-};
+ void dump(raw_ostream &OS) const;
-class RangeTrait : public llvm::ImutContainerInfo<Range> {
-public:
- // When comparing if one Range is less than another, we should compare
- // the actual APSInt values instead of their pointers. This keeps the order
- // consistent (instead of comparing by pointer values) and can potentially
- // be used to speed up some of the operations in RangeSet.
- static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
- return *lhs.first < *rhs.first ||
- (!(*rhs.first < *lhs.first) && *lhs.second < *rhs.second);
- }
+ // In order to keep non-overlapping ranges sorted, we can compare only From
+ // points.
+ bool operator<(const Range &RHS) const { return From() < RHS.From(); }
+
+ bool operator==(const Range &RHS) const { return Impl == RHS.Impl; }
+ bool operator!=(const Range &RHS) const { return !operator==(RHS); }
+
+private:
+ std::pair<const llvm::APSInt *, const llvm::APSInt *> Impl;
};
-/// RangeSet contains a set of ranges. If the set is empty, then
-/// there the value of a symbol is overly constrained and there are no
-/// possible values for that symbol.
+/// @class RangeSet is a persistent set of non-overlapping ranges.
+///
+/// New RangeSet objects can be ONLY produced by RangeSet::Factory object, which
+/// also supports the most common operations performed on range sets.
+///
+/// Empty set corresponds to an overly constrained symbol meaning that there
+/// are no possible values for that symbol.
class RangeSet {
- typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
- PrimRangeSet ranges; // no need to make const, since it is an
- // ImmutableSet - this allows default operator=
- // to work.
public:
- typedef PrimRangeSet::Factory Factory;
- typedef PrimRangeSet::iterator iterator;
-
- RangeSet(PrimRangeSet RS) : ranges(RS) {}
-
- /// Create a new set with all ranges of this set and RS.
- /// Possible intersections are not checked here.
- RangeSet addRange(Factory &F, const RangeSet &RS) {
- PrimRangeSet Ranges(RS.ranges);
- for (const auto &range : ranges)
- Ranges = F.add(Ranges, range);
- return RangeSet(Ranges);
- }
-
- iterator begin() const { return ranges.begin(); }
- iterator end() const { return ranges.end(); }
+ class Factory;
- bool isEmpty() const { return ranges.isEmpty(); }
+private:
+ // We use llvm::SmallVector as the underlying container for the following
+ // reasons:
+ //
+ // * Range sets are usually very simple, 1 or 2 ranges.
+ // That's why llvm::ImmutableSet is not perfect.
+ //
+ // * Ranges in sets are NOT overlapping, so it is natural to keep them
+ // sorted for efficient operations and queries. For this reason,
+ // llvm::SmallSet doesn't fit the requirements, it is not sorted when it
+ // is a vector.
+ //
+ // * Range set operations usually a bit harder than add/remove a range.
+ // Complex operations might do many of those for just one range set.
+ // Formerly it used to be llvm::ImmutableSet, which is inefficient for our
+ // purposes as we want to make these operations BOTH immutable AND
+ // efficient.
+ //
+ // * Iteration over ranges is widespread and a more cache-friendly
+ // structure is preferred.
+ using ImplType = llvm::SmallVector<Range, 4>;
+
+ struct ContainerType : public ImplType, public llvm::FoldingSetNode {
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ for (const Range &It : *this) {
+ It.Profile(ID);
+ }
+ }
+ };
+ // This is a non-owning pointer to an actual container.
+ // The memory is fully managed by the factory and is alive as long as the
+ // factory itself is alive.
+ // It is a pointer as opposed to a reference, so we can easily reassign
+ // RangeSet objects.
+ using UnderlyingType = const ContainerType *;
+ UnderlyingType Impl;
- /// Construct a new RangeSet representing '{ [from, to] }'.
- RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
- : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+public:
+ using const_iterator = ImplType::const_iterator;
+
+ const_iterator begin() const { return Impl->begin(); }
+ const_iterator end() const { return Impl->end(); }
+ size_t size() const { return Impl->size(); }
+
+ bool isEmpty() const { return Impl->empty(); }
+
+ class Factory {
+ public:
+ Factory(BasicValueFactory &BV) : ValueFactory(BV) {}
+
+ /// Create a new set with all ranges from both LHS and RHS.
+ /// Possible intersections are not checked here.
+ ///
+ /// Complexity: O(N + M)
+ /// where N = size(LHS), M = size(RHS)
+ RangeSet add(RangeSet LHS, RangeSet RHS);
+ /// Create a new set with all ranges from the original set plus the new one.
+ /// Possible intersections are not checked here.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(Original)
+ RangeSet add(RangeSet Original, Range Element);
+ /// Create a new set with all ranges from the original set plus the point.
+ /// Possible intersections are not checked here.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(Original)
+ RangeSet add(RangeSet Original, const llvm::APSInt &Point);
+
+ RangeSet getEmptySet() { return &EmptySet; }
+
+ /// Create a new set with just one range.
+ /// @{
+ RangeSet getRangeSet(Range Origin);
+ RangeSet getRangeSet(const llvm::APSInt &From, const llvm::APSInt &To) {
+ return getRangeSet(Range(From, To));
+ }
+ RangeSet getRangeSet(const llvm::APSInt &Origin) {
+ return getRangeSet(Origin, Origin);
+ }
+ /// @}
+
+ /// Intersect the given range sets.
+ ///
+ /// Complexity: O(N + M)
+ /// where N = size(LHS), M = size(RHS)
+ RangeSet intersect(RangeSet LHS, RangeSet RHS);
+ /// Intersect the given set with the closed range [Lower, Upper].
+ ///
+ /// Unlike the Range type, this range uses modular arithmetic, corresponding
+ /// to the common treatment of C integer overflow. Thus, if the Lower bound
+ /// is greater than the Upper bound, the range is taken to wrap around. This
+ /// is equivalent to taking the intersection with the two ranges [Min,
+ /// Upper] and [Lower, Max], or, alternatively, /removing/ all integers
+ /// between Upper and Lower.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(What)
+ RangeSet intersect(RangeSet What, llvm::APSInt Lower, llvm::APSInt Upper);
+ /// Intersect the given range with the given point.
+ ///
+ /// The result can be either an empty set or a set containing the given
+ /// point depending on whether the point is in the range set.
+ ///
+ /// Complexity: O(logN)
+ /// where N = size(What)
+ RangeSet intersect(RangeSet What, llvm::APSInt Point);
+
+ /// Delete the given point from the range set.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(From)
+ RangeSet deletePoint(RangeSet From, const llvm::APSInt &Point);
+ /// Negate the given range set.
+ ///
+ /// Turn all [A, B] ranges to [-B, -A], when "-" is a C-like unary minus
+ /// operation under the values of the type.
+ ///
+ /// We also handle MIN because applying unary minus to MIN does not change
+ /// it.
+ /// Example 1:
+ /// char x = -128; // -128 is a MIN value in a range of 'char'
+ /// char y = -x; // y: -128
+ ///
+ /// Example 2:
+ /// unsigned char x = 0; // 0 is a MIN value in a range of 'unsigned char'
+ /// unsigned char y = -x; // y: 0
+ ///
+ /// And it makes us to separate the range
+ /// like [MIN, N] to [MIN, MIN] U [-N, MAX].
+ /// For instance, whole range is {-128..127} and subrange is [-128,-126],
+ /// thus [-128,-127,-126,...] negates to [-128,...,126,127].
+ ///
+ /// Negate restores disrupted ranges on bounds,
+ /// e.g. [MIN, B] => [MIN, MIN] U [-B, MAX] => [MIN, B].
+ ///
+ /// Negate is a self-inverse function, i.e. negate(negate(R)) == R.
+ ///
+ /// Complexity: O(N)
+ /// where N = size(What)
+ RangeSet negate(RangeSet What);
+
+ /// Return associated value factory.
+ BasicValueFactory &getValueFactory() const { return ValueFactory; }
+
+ private:
+ /// Return a persistent version of the given container.
+ RangeSet makePersistent(ContainerType &&From);
+ /// Construct a new persistent version of the given container.
+ ContainerType *construct(ContainerType &&From);
+
+ RangeSet intersect(const ContainerType &LHS, const ContainerType &RHS);
+
+ // Many operations include producing new APSInt values and that's why
+ // we need this factory.
+ BasicValueFactory &ValueFactory;
+ // Allocator for all the created containers.
+ // Containers might own their own memory and that's why it is specific
+ // for the type, so it calls container destructors upon deletion.
+ llvm::SpecificBumpPtrAllocator<ContainerType> Arena;
+ // Usually we deal with the same ranges and range sets over and over.
+ // Here we track all created containers and try not to repeat ourselves.
+ llvm::FoldingSet<ContainerType> Cache;
+ static ContainerType EmptySet;
+ };
+
+ RangeSet(const RangeSet &) = default;
+ RangeSet &operator=(const RangeSet &) = default;
+ RangeSet(RangeSet &&) = default;
+ RangeSet &operator=(RangeSet &&) = default;
+ ~RangeSet() = default;
+
+ /// Construct a new RangeSet representing '{ [From, To] }'.
+ RangeSet(Factory &F, const llvm::APSInt &From, const llvm::APSInt &To)
+ : RangeSet(F.getRangeSet(From, To)) {}
/// Construct a new RangeSet representing the given point as a range.
- RangeSet(Factory &F, const llvm::APSInt &point) : RangeSet(F, point, point) {}
+ RangeSet(Factory &F, const llvm::APSInt &Point)
+ : RangeSet(F.getRangeSet(Point)) {}
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const RangeSet &RS) {
+ ID.AddPointer(RS.Impl);
+ }
/// Profile - Generates a hash profile of this RangeSet for use
/// by FoldingSet.
- void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+ void Profile(llvm::FoldingSetNodeID &ID) const { Profile(ID, *this); }
- /// getConcreteValue - If a symbol is contrained to equal a specific integer
+ /// getConcreteValue - If a symbol is constrained to equal a specific integer
/// constant then this method returns that value. Otherwise, it returns
/// NULL.
const llvm::APSInt *getConcreteValue() const {
- return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : nullptr;
+ return Impl->size() == 1 ? begin()->getConcreteValue() : nullptr;
}
- /// Get a minimal value covered by the ranges in the set
+ /// Get the minimal value covered by the ranges in the set.
+ ///
+ /// Complexity: O(1)
const llvm::APSInt &getMinValue() const;
- /// Get a maximal value covered by the ranges in the set
+ /// Get the maximal value covered by the ranges in the set.
+ ///
+ /// Complexity: O(1)
const llvm::APSInt &getMaxValue() const;
-private:
- void IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower, const llvm::APSInt &Upper,
- PrimRangeSet &newRanges, PrimRangeSet::iterator &i,
- PrimRangeSet::iterator &e) const;
+ /// Test whether the given point is contained by any of the ranges.
+ ///
+ /// Complexity: O(logN)
+ /// where N = size(this)
+ bool contains(llvm::APSInt Point) const { return containsImpl(Point); }
+
+ void dump(raw_ostream &OS) const;
+
+ bool operator==(const RangeSet &Other) const { return *Impl == *Other.Impl; }
+ bool operator!=(const RangeSet &Other) const { return !(*this == Other); }
+private:
+ /* implicit */ RangeSet(ContainerType *RawContainer) : Impl(RawContainer) {}
+ /* implicit */ RangeSet(UnderlyingType Ptr) : Impl(Ptr) {}
+
+ /// Pin given points to the type represented by the current range set.
+ ///
+ /// This makes parameter points to be in-out parameters.
+ /// In order to maintain consistent types across all of the ranges in the set
+ /// and to keep all the operations to compare ONLY points of the same type, we
+ /// need to pin every point before any operation.
+ ///
+ /// @Returns true if the given points can be converted to the target type
+ /// without changing the values (i.e. trivially) and false otherwise.
+ /// @{
bool pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const;
+ bool pin(llvm::APSInt &Point) const;
+ /// @}
-public:
- RangeSet Intersect(BasicValueFactory &BV, Factory &F, llvm::APSInt Lower,
- llvm::APSInt Upper) const;
- RangeSet Intersect(BasicValueFactory &BV, Factory &F,
- const RangeSet &Other) const;
- RangeSet Negate(BasicValueFactory &BV, Factory &F) const;
- RangeSet Delete(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Point) const;
-
- void print(raw_ostream &os) const;
-
- bool operator==(const RangeSet &other) const {
- return ranges == other.ranges;
- }
+ // This version of this function modifies its arguments (pins it).
+ bool containsImpl(llvm::APSInt &Point) const;
+
+ friend class Factory;
};
using ConstraintMap = llvm::ImmutableMap<SymbolRef, RangeSet>;
@@ -213,6 +387,11 @@ private:
static void computeAdjustment(SymbolRef &Sym, llvm::APSInt &Adjustment);
};
+/// Try to simplify a given symbolic expression's associated value based on the
+/// constraints in State. This is needed because the Environment bindings are
+/// not getting updated when a new constraint is added to the State.
+SymbolRef simplify(ProgramStateRef State, SymbolRef Sym);
+
} // namespace ento
} // namespace clang
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
index 07fc73a670f3..e4878d4e0156 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SMTConstraintManager.h
@@ -146,7 +146,7 @@ public:
Solver->addConstraint(NotExp);
Optional<bool> isNotSat = Solver->check();
- if (!isSat.hasValue() || isNotSat.getValue())
+ if (!isNotSat.hasValue() || isNotSat.getValue())
return nullptr;
// This is the only solution, store it
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
index 4ea85f9730bb..87a49cf4ffe9 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -72,13 +72,27 @@ protected:
/// The width of the scalar type used for array indices.
const unsigned ArrayIndexWidth;
- virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy) = 0;
- virtual SVal evalCastFromLoc(Loc val, QualType castTy) = 0;
-
-public:
- // FIXME: Make these protected again once RegionStoreManager correctly
- // handles loads from different bound value types.
- virtual SVal dispatchCast(SVal val, QualType castTy) = 0;
+ SVal evalCastKind(UndefinedVal V, QualType CastTy, QualType OriginalTy);
+ SVal evalCastKind(UnknownVal V, QualType CastTy, QualType OriginalTy);
+ SVal evalCastKind(Loc V, QualType CastTy, QualType OriginalTy);
+ SVal evalCastKind(NonLoc V, QualType CastTy, QualType OriginalTy);
+ SVal evalCastSubKind(loc::ConcreteInt V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(loc::GotoLabel V, QualType CastTy, QualType OriginalTy);
+ SVal evalCastSubKind(loc::MemRegionVal V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::CompoundVal V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::ConcreteInt V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::LazyCompoundVal V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::LocAsInteger V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
+ QualType OriginalTy);
+ SVal evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
+ QualType OriginalTy);
public:
SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
@@ -102,7 +116,7 @@ public:
Ty2->isIntegralOrEnumerationType()));
}
- SVal evalCast(SVal val, QualType castTy, QualType originalType);
+ SVal evalCast(SVal V, QualType CastTy, QualType OriginalTy);
// Handles casts of type CK_IntegralCast.
SVal evalIntegralCast(ProgramStateRef state, SVal val, QualType castTy,
@@ -224,6 +238,14 @@ public:
const LocationContext *LCtx,
unsigned Count);
+ /// Conjure a symbol representing heap allocated memory region.
+ ///
+ /// Note, now, the expression *doesn't* need to represent a location.
+ /// But the type need to!
+ DefinedOrUnknownSVal getConjuredHeapSymbolVal(const Expr *E,
+ const LocationContext *LCtx,
+ QualType type, unsigned Count);
+
DefinedOrUnknownSVal getDerivedRegionValueSymbolVal(
SymbolRef parentSymbol, const TypedValueRegion *region);
@@ -366,6 +388,10 @@ public:
return loc::ConcreteInt(BasicVals.getValue(integer));
}
+ /// Return MemRegionVal on success cast, otherwise return None.
+ Optional<loc::MemRegionVal> getCastedMemRegionVal(const MemRegion *region,
+ QualType type);
+
/// Make an SVal that represents the given symbol. This follows the convention
/// of representing Loc-type symbols (symbolic pointers and references)
/// as Loc values wrapping the symbol rather than as plain symbol values.
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
index bb295ab591d4..6199c8d8d179 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -201,6 +201,19 @@ public:
SymExpr::symbol_iterator symbol_end() const {
return SymExpr::symbol_end();
}
+
+ /// Try to get a reasonable type for the given value.
+ ///
+ /// \returns The best approximation of the value type or Null.
+ /// In theory, all symbolic values should be typed, but this function
+ /// is still a WIP and might have a few blind spots.
+ ///
+ /// \note This function should not be used when the user has access to the
+ /// bound expression AST node as well, since AST always has exact types.
+ ///
+ /// \note Loc values are interpreted as pointer rvalues for the purposes of
+ /// this method.
+ QualType getType(const ASTContext &) const;
};
inline raw_ostream &operator<<(raw_ostream &os, clang::ento::SVal V) {
@@ -511,10 +524,11 @@ private:
/// This value is qualified as NonLoc because neither loading nor storing
/// operations are applied to it. Instead, the analyzer uses the L-value coming
/// from pointer-to-member applied to an object.
-/// This SVal is represented by a DeclaratorDecl which can be a member function
-/// pointer or a member data pointer and a list of CXXBaseSpecifiers. This list
-/// is required to accumulate the pointer-to-member cast history to figure out
-/// the correct subobject field.
+/// This SVal is represented by a NamedDecl which can be a member function
+/// pointer or a member data pointer and an optional list of CXXBaseSpecifiers.
+/// This list is required to accumulate the pointer-to-member cast history to
+/// figure out the correct subobject field. In particular, implicit casts grow
+/// this list and explicit casts like static_cast shrink this list.
class PointerToMember : public NonLoc {
friend class ento::SValBuilder;
diff --git a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
index c3b590e4784e..d2461705d128 100644
--- a/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
+++ b/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -181,7 +181,8 @@ public:
/// castRegion - Used by ExprEngine::VisitCast to handle casts from
/// a MemRegion* to a specific location type. 'R' is the region being
/// casted and 'CastToTy' the result type of the cast.
- const MemRegion *castRegion(const MemRegion *region, QualType CastToTy);
+ Optional<const MemRegion *> castRegion(const MemRegion *region,
+ QualType CastToTy);
virtual StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
SymbolReaper &SymReaper) = 0;
@@ -280,12 +281,6 @@ protected:
QualType pointeeTy,
uint64_t index = 0);
- /// CastRetrievedVal - Used by subclasses of StoreManager to implement
- /// implicit casts that arise from loads from regions that are reinterpreted
- /// as another region.
- SVal CastRetrievedVal(SVal val, const TypedValueRegion *region,
- QualType castTy);
-
private:
SVal getLValueFieldOrIvar(const Decl *decl, SVal base);
};
diff --git a/clang/include/clang/Tooling/ArgumentsAdjusters.h b/clang/include/clang/Tooling/ArgumentsAdjusters.h
index c48a8725aae9..bf0886034324 100644
--- a/clang/include/clang/Tooling/ArgumentsAdjusters.h
+++ b/clang/include/clang/Tooling/ArgumentsAdjusters.h
@@ -43,10 +43,6 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster();
/// arguments.
ArgumentsAdjuster getClangStripOutputAdjuster();
-/// Gets an argument adjuster which removes command line arguments related to
-/// diagnostic serialization.
-ArgumentsAdjuster getClangStripSerializeDiagnosticAdjuster();
-
/// Gets an argument adjuster which removes dependency-file
/// related command line arguments.
ArgumentsAdjuster getClangStripDependencyFileAdjuster();
diff --git a/clang/include/clang/Tooling/CommonOptionsParser.h b/clang/include/clang/Tooling/CommonOptionsParser.h
index a5bfeeeaf77f..0f072c2886ab 100644
--- a/clang/include/clang/Tooling/CommonOptionsParser.h
+++ b/clang/include/clang/Tooling/CommonOptionsParser.h
@@ -63,21 +63,8 @@ namespace tooling {
/// }
/// \endcode
class CommonOptionsParser {
-public:
- /// Parses command-line, initializes a compilation database.
- ///
- /// This constructor can change argc and argv contents, e.g. consume
- /// command-line options used for creating FixedCompilationDatabase.
- ///
- /// All options not belonging to \p Category become hidden.
- ///
- /// This constructor exits program in case of error.
- CommonOptionsParser(int &argc, const char **argv,
- llvm::cl::OptionCategory &Category,
- const char *Overview = nullptr)
- : CommonOptionsParser(argc, argv, Category, llvm::cl::OneOrMore,
- Overview) {}
+protected:
/// Parses command-line, initializes a compilation database.
///
/// This constructor can change argc and argv contents, e.g. consume
@@ -86,16 +73,17 @@ public:
/// All options not belonging to \p Category become hidden.
///
/// It also allows calls to set the required number of positional parameters.
- CommonOptionsParser(int &argc, const char **argv,
- llvm::cl::OptionCategory &Category,
- llvm::cl::NumOccurrencesFlag OccurrencesFlag,
- const char *Overview = nullptr);
+ CommonOptionsParser(
+ int &argc, const char **argv, llvm::cl::OptionCategory &Category,
+ llvm::cl::NumOccurrencesFlag OccurrencesFlag = llvm::cl::OneOrMore,
+ const char *Overview = nullptr);
+public:
/// A factory method that is similar to the above constructor, except
/// this returns an error instead exiting the program on error.
static llvm::Expected<CommonOptionsParser>
create(int &argc, const char **argv, llvm::cl::OptionCategory &Category,
- llvm::cl::NumOccurrencesFlag OccurrencesFlag,
+ llvm::cl::NumOccurrencesFlag OccurrencesFlag = llvm::cl::OneOrMore,
const char *Overview = nullptr);
/// Returns a reference to the loaded compilations database.
diff --git a/clang/include/clang/Tooling/CompilationDatabase.h b/clang/include/clang/Tooling/CompilationDatabase.h
index 44af236347b3..90af15536961 100644
--- a/clang/include/clang/Tooling/CompilationDatabase.h
+++ b/clang/include/clang/Tooling/CompilationDatabase.h
@@ -213,6 +213,12 @@ private:
std::vector<CompileCommand> CompileCommands;
};
+/// Transforms a compile command so that it applies the same configuration to
+/// a different file. Most args are left intact, but tweaks may be needed
+/// to certain flags (-x, -std etc).
+tooling::CompileCommand transferCompileCommand(tooling::CompileCommand,
+ StringRef Filename);
+
/// Returns a wrapped CompilationDatabase that defers to the provided one,
/// but getCompileCommands() will infer commands for unknown files.
/// The return value of getAllFiles() or getAllCompileCommands() is unchanged.
diff --git a/clang/include/clang/Tooling/Core/Diagnostic.h b/clang/include/clang/Tooling/Core/Diagnostic.h
index 123874f9ccf7..4553380bcf00 100644
--- a/clang/include/clang/Tooling/Core/Diagnostic.h
+++ b/clang/include/clang/Tooling/Core/Diagnostic.h
@@ -26,6 +26,17 @@
namespace clang {
namespace tooling {
+/// Represents a range within a specific source file.
+struct FileByteRange {
+ FileByteRange() = default;
+
+ FileByteRange(const SourceManager &Sources, CharSourceRange Range);
+
+ std::string FilePath;
+ unsigned FileOffset;
+ unsigned Length;
+};
+
/// Represents the diagnostic message with the error message associated
/// and the information on the location of the problem.
struct DiagnosticMessage {
@@ -39,29 +50,24 @@ struct DiagnosticMessage {
///
DiagnosticMessage(llvm::StringRef Message, const SourceManager &Sources,
SourceLocation Loc);
+
std::string Message;
std::string FilePath;
unsigned FileOffset;
/// Fixes for this diagnostic, grouped by file path.
llvm::StringMap<Replacements> Fix;
-};
-
-/// Represents a range within a specific source file.
-struct FileByteRange {
- FileByteRange() = default;
- FileByteRange(const SourceManager &Sources, CharSourceRange Range);
-
- std::string FilePath;
- unsigned FileOffset;
- unsigned Length;
+ /// Extra source ranges associated with the note, in addition to the location
+ /// of the Message itself.
+ llvm::SmallVector<FileByteRange, 1> Ranges;
};
/// Represents the diagnostic with the level of severity and possible
/// fixes to be applied.
struct Diagnostic {
enum Level {
+ Remark = DiagnosticsEngine::Remark,
Warning = DiagnosticsEngine::Warning,
Error = DiagnosticsEngine::Error
};
@@ -73,8 +79,7 @@ struct Diagnostic {
Diagnostic(llvm::StringRef DiagnosticName, const DiagnosticMessage &Message,
const SmallVector<DiagnosticMessage, 1> &Notes, Level DiagLevel,
- llvm::StringRef BuildDirectory,
- const SmallVector<FileByteRange, 1> &Ranges);
+ llvm::StringRef BuildDirectory);
/// Name identifying the Diagnostic.
std::string DiagnosticName;
@@ -96,10 +101,6 @@ struct Diagnostic {
///
/// Note: it is empty in unittest.
std::string BuildDirectory;
-
- /// Extra source ranges associated with the diagnostic (in addition to the
- /// location of the Message above).
- SmallVector<FileByteRange, 1> Ranges;
};
/// Collection of Diagnostics generated from a single translation unit.
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
index 7d0881343478..c52da3305f7c 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h
@@ -65,7 +65,7 @@ public:
return MaybeStat.getError();
assert(!MaybeStat->isDirectory() && "not a file");
assert(isValid() && "not initialized");
- return StringRef(Contents);
+ return Contents.str();
}
/// \returns The error or the status of the entry.
@@ -103,7 +103,8 @@ private:
};
/// This class is a shared cache, that caches the 'stat' and 'open' calls to the
-/// underlying real file system.
+/// underlying real file system. It distinguishes between minimized and original
+/// files.
///
/// It is sharded based on the hash of the key to reduce the lock contention for
/// the worker threads.
@@ -114,21 +115,62 @@ public:
CachedFileSystemEntry Value;
};
- DependencyScanningFilesystemSharedCache();
-
/// Returns a cache entry for the corresponding key.
///
/// A new cache entry is created if the key is not in the cache. This is a
/// thread safe call.
- SharedFileSystemEntry &get(StringRef Key);
+ SharedFileSystemEntry &get(StringRef Key, bool Minimized);
private:
- struct CacheShard {
- std::mutex CacheLock;
- llvm::StringMap<SharedFileSystemEntry, llvm::BumpPtrAllocator> Cache;
+ class SingleCache {
+ public:
+ SingleCache();
+
+ SharedFileSystemEntry &get(StringRef Key);
+
+ private:
+ struct CacheShard {
+ std::mutex CacheLock;
+ llvm::StringMap<SharedFileSystemEntry, llvm::BumpPtrAllocator> Cache;
+ };
+ std::unique_ptr<CacheShard[]> CacheShards;
+ unsigned NumShards;
};
- std::unique_ptr<CacheShard[]> CacheShards;
- unsigned NumShards;
+
+ SingleCache CacheMinimized;
+ SingleCache CacheOriginal;
+};
+
+/// This class is a local cache, that caches the 'stat' and 'open' calls to the
+/// underlying real file system. It distinguishes between minimized and original
+/// files.
+class DependencyScanningFilesystemLocalCache {
+private:
+ using SingleCache =
+ llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator>;
+
+ SingleCache CacheMinimized;
+ SingleCache CacheOriginal;
+
+ SingleCache &selectCache(bool Minimized) {
+ return Minimized ? CacheMinimized : CacheOriginal;
+ }
+
+public:
+ void setCachedEntry(StringRef Filename, bool Minimized,
+ const CachedFileSystemEntry *Entry) {
+ SingleCache &Cache = selectCache(Minimized);
+ bool IsInserted = Cache.try_emplace(Filename, Entry).second;
+ (void)IsInserted;
+ assert(IsInserted && "local cache is updated more than once");
+ }
+
+ const CachedFileSystemEntry *getCachedEntry(StringRef Filename,
+ bool Minimized) {
+ SingleCache &Cache = selectCache(Minimized);
+ auto It = Cache.find(Filename);
+ return It == Cache.end() ? nullptr : It->getValue();
+ }
};
/// A virtual file system optimized for the dependency discovery.
@@ -153,32 +195,26 @@ public:
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
openFileForRead(const Twine &Path) override;
- /// The set of files that should not be minimized.
- llvm::StringSet<> IgnoredFiles;
+ void clearIgnoredFiles() { IgnoredFiles.clear(); }
+ void ignoreFile(StringRef Filename);
private:
- void setCachedEntry(StringRef Filename, const CachedFileSystemEntry *Entry) {
- bool IsInserted = Cache.try_emplace(Filename, Entry).second;
- (void)IsInserted;
- assert(IsInserted && "local cache is updated more than once");
- }
-
- const CachedFileSystemEntry *getCachedEntry(StringRef Filename) {
- auto It = Cache.find(Filename);
- return It == Cache.end() ? nullptr : It->getValue();
- }
+ bool shouldIgnoreFile(StringRef Filename);
llvm::ErrorOr<const CachedFileSystemEntry *>
getOrCreateFileSystemEntry(const StringRef Filename);
+ /// The global cache shared between worker threads.
DependencyScanningFilesystemSharedCache &SharedCache;
/// The local cache is used by the worker thread to cache file system queries
/// locally instead of querying the global cache every time.
- llvm::StringMap<const CachedFileSystemEntry *, llvm::BumpPtrAllocator> Cache;
+ DependencyScanningFilesystemLocalCache Cache;
/// The optional mapping structure which records information about the
/// excluded conditional directive skip mappings that are used by the
/// currently active preprocessor.
ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
+ /// The set of files that should not be minimized.
+ llvm::StringSet<> IgnoredFiles;
};
} // end namespace dependencies
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
index 1c106ed4b765..f88dc472c80b 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningTool.h
@@ -22,51 +22,45 @@ namespace dependencies{
/// The full dependencies and module graph for a specific input.
struct FullDependencies {
- /// The name of the C++20 module this translation unit exports. This may
- /// include `:` for C++20 module partitons.
+ /// The identifier of the C++20 module this translation unit exports.
///
- /// If the translation unit is not a module then this will be empty.
- std::string ExportedModuleName;
-
- /// The context hash represents the set of compiler options that may make one
- /// version of a module incompatible with another. This includes things like
- /// language mode, predefined macros, header search paths, etc...
- ///
- /// Modules with the same name but a different \c ContextHash should be
- /// treated as separate modules for the purpose of a build.
- std::string ContextHash;
+ /// If the translation unit is not a module then \c ID.ModuleName is empty.
+ ModuleID ID;
/// A collection of absolute paths to files that this translation unit
/// directly depends on, not including transitive dependencies.
std::vector<std::string> FileDeps;
+ /// A collection of prebuilt modules this translation unit directly depends
+ /// on, not including transitive dependencies.
+ std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
+
/// A list of modules this translation unit directly depends on, not including
/// transitive dependencies.
///
/// This may include modules with a different context hash when it can be
/// determined that the differences are benign for this compilation.
- std::vector<ClangModuleDep> ClangModuleDeps;
-
- /// A partial addtional set of command line arguments that can be used to
- /// build this translation unit.
- ///
- /// Call \c getFullAdditionalCommandLine() to get a command line suitable for
- /// appending to the original command line to pass to clang.
- std::vector<std::string> AdditionalNonPathCommandLine;
+ std::vector<ModuleID> ClangModuleDeps;
- /// Gets the full addtional command line suitable for appending to the
- /// original command line to pass to clang.
+ /// Get additional arguments suitable for appending to the original Clang
+ /// command line.
///
- /// \param LookupPCMPath this function is called to fill in `-fmodule-file=`
- /// flags and for the `-o` flag. It needs to return a
- /// path for where the PCM for the given module is to
+ /// \param LookupPCMPath This function is called to fill in "-fmodule-file="
+ /// arguments and the "-o" argument. It needs to return
+ /// a path for where the PCM for the given module is to
/// be located.
- /// \param LookupModuleDeps this fucntion is called to collect the full
+ /// \param LookupModuleDeps This function is called to collect the full
/// transitive set of dependencies for this
- /// compilation.
- std::vector<std::string> getAdditionalCommandLine(
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const;
+ /// compilation and fill in "-fmodule-map-file="
+ /// arguments.
+ std::vector<std::string> getAdditionalArgs(
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const;
+
+ /// Get additional arguments suitable for appending to the original Clang
+ /// command line, excluding arguments containing modules-related paths:
+ /// "-fmodule-file=", "-fmodule-map-file=".
+ std::vector<std::string> getAdditionalArgsWithoutModulePaths() const;
};
struct FullDependenciesResult {
@@ -91,15 +85,14 @@ public:
getDependencyFile(const tooling::CompilationDatabase &Compilations,
StringRef CWD);
- /// Collect the full module depenedency graph for the input, ignoring any
+ /// Collect the full module dependency graph for the input, ignoring any
/// modules which have already been seen.
///
- /// \param AlreadySeen this is used to not report modules that have previously
- /// been reported. Use the same `llvm::StringSet<>` for all
- /// calls to `getFullDependencies` for a single
- /// `DependencyScanningTool` for a single build. Use a
- /// different one for different tools, and clear it between
- /// builds.
+ /// \param AlreadySeen This stores modules which have previously been
+ /// reported. Use the same instance for all calls to this
+ /// function for a single \c DependencyScanningTool in a
+ /// single build. Use a different one for different tools,
+ /// and clear it between builds.
///
/// \returns a \c StringError with the diagnostic output if clang errors
/// occurred, \c FullDependencies otherwise.
diff --git a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
index 689119330c41..5903ad13c1d8 100644
--- a/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
+++ b/clang/include/clang/Tooling/DependencyScanning/DependencyScanningWorker.h
@@ -34,8 +34,12 @@ class DependencyConsumer {
public:
virtual ~DependencyConsumer() {}
- virtual void handleFileDependency(const DependencyOutputOptions &Opts,
- StringRef Filename) = 0;
+ virtual void
+ handleDependencyOutputOpts(const DependencyOutputOptions &Opts) = 0;
+
+ virtual void handleFileDependency(StringRef Filename) = 0;
+
+ virtual void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) = 0;
virtual void handleModuleDependency(ModuleDeps MD) = 0;
diff --git a/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h b/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
index c490bb38c167..a9f2b4d0c6fc 100644
--- a/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
+++ b/clang/include/clang/Tooling/DependencyScanning/ModuleDepCollector.h
@@ -12,6 +12,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/PPCallbacks.h"
@@ -28,16 +29,21 @@ namespace dependencies {
class DependencyConsumer;
-/// This is used to refer to a specific module.
-///
-/// See \c ModuleDeps for details about what these members mean.
-struct ClangModuleDep {
+/// Modular dependency that has already been built prior to the dependency scan.
+struct PrebuiltModuleDep {
std::string ModuleName;
- std::string ContextHash;
+ std::string PCMFile;
+ std::string ModuleMapFile;
+
+ explicit PrebuiltModuleDep(const Module *M)
+ : ModuleName(M->getTopLevelModuleName()),
+ PCMFile(M->getASTFile()->getName()),
+ ModuleMapFile(M->PresumedModuleMapFile) {}
};
-struct ModuleDeps {
- /// The name of the module. This may include `:` for C++20 module partitons,
+/// This is used to identify a specific module.
+struct ModuleID {
+ /// The name of the module. This may include `:` for C++20 module partitions,
/// or a header-name for C++20 header units.
std::string ModuleName;
@@ -49,6 +55,24 @@ struct ModuleDeps {
/// treated as separate modules for the purpose of a build.
std::string ContextHash;
+ bool operator==(const ModuleID &Other) const {
+ return ModuleName == Other.ModuleName && ContextHash == Other.ContextHash;
+ }
+};
+
+struct ModuleIDHasher {
+ std::size_t operator()(const ModuleID &MID) const {
+ return llvm::hash_combine(MID.ModuleName, MID.ContextHash);
+ }
+};
+
+struct ModuleDeps {
+ /// The identifier of the module.
+ ModuleID ID;
+
+ /// Whether this is a "system" module.
+ bool IsSystem;
+
/// The path to the modulemap file which defines this module.
///
/// This can be used to explicitly build this module. This file will
@@ -62,50 +86,60 @@ struct ModuleDeps {
/// on, not including transitive dependencies.
llvm::StringSet<> FileDeps;
- /// A list of modules this module directly depends on, not including
- /// transitive dependencies.
+ /// A collection of prebuilt modular dependencies this module directly depends
+ /// on, not including transitive dependencies.
+ std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
+
+ /// A list of module identifiers this module directly depends on, not
+ /// including transitive dependencies.
///
/// This may include modules with a different context hash when it can be
/// determined that the differences are benign for this compilation.
- std::vector<ClangModuleDep> ClangModuleDeps;
-
- /// A partial command line that can be used to build this module.
- ///
- /// Call \c getFullCommandLine() to get a command line suitable for passing to
- /// clang.
- std::vector<std::string> NonPathCommandLine;
+ std::vector<ModuleID> ClangModuleDeps;
// Used to track which modules that were discovered were directly imported by
// the primary TU.
bool ImportedByMainFile = false;
- /// Gets the full command line suitable for passing to clang.
+ /// Compiler invocation that can be used to build this module (without paths).
+ CompilerInvocation Invocation;
+
+ /// Gets the canonical command line suitable for passing to clang.
///
- /// \param LookupPCMPath this function is called to fill in `-fmodule-file=`
- /// flags and for the `-o` flag. It needs to return a
- /// path for where the PCM for the given module is to
+ /// \param LookupPCMPath This function is called to fill in "-fmodule-file="
+ /// arguments and the "-o" argument. It needs to return
+ /// a path for where the PCM for the given module is to
/// be located.
- /// \param LookupModuleDeps this fucntion is called to collect the full
+ /// \param LookupModuleDeps This function is called to collect the full
/// transitive set of dependencies for this
- /// compilation.
- std::vector<std::string> getFullCommandLine(
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const;
+ /// compilation and fill in "-fmodule-map-file="
+ /// arguments.
+ std::vector<std::string> getCanonicalCommandLine(
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const;
+
+ /// Gets the canonical command line suitable for passing to clang, excluding
+ /// arguments containing modules-related paths: "-fmodule-file=", "-o",
+ /// "-fmodule-map-file=".
+ std::vector<std::string> getCanonicalCommandLineWithoutModulePaths() const;
};
namespace detail {
-/// Append the `-fmodule-file=` and `-fmodule-map-file=` arguments for the
-/// modules in \c Modules transitively, along with other needed arguments to
-/// use explicitly built modules.
-void appendCommonModuleArguments(
- llvm::ArrayRef<ClangModuleDep> Modules,
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps,
- std::vector<std::string> &Result);
+/// Collect the paths of PCM and module map files for the modules in \c Modules
+/// transitively.
+void collectPCMAndModuleMapPaths(
+ llvm::ArrayRef<ModuleID> Modules,
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps,
+ std::vector<std::string> &PCMPaths, std::vector<std::string> &ModMapPaths);
} // namespace detail
class ModuleDepCollector;
+/// Callback that records textual includes and direct modular includes/imports
+/// during preprocessing. At the end of the main file, it also collects
+/// transitive modular dependencies and passes everything to the
+/// \c DependencyConsumer of the parent \c ModuleDepCollector.
class ModuleDepCollectorPP final : public PPCallbacks {
public:
ModuleDepCollectorPP(CompilerInstance &I, ModuleDepCollector &MDC)
@@ -126,22 +160,38 @@ public:
void EndOfMainFile() override;
private:
+ /// The compiler instance for the current translation unit.
CompilerInstance &Instance;
+ /// The parent dependency collector.
ModuleDepCollector &MDC;
- llvm::DenseSet<const Module *> DirectDeps;
+ /// Working set of direct modular dependencies.
+ llvm::DenseSet<const Module *> DirectModularDeps;
+ /// Working set of direct modular dependencies that have already been built.
+ llvm::DenseSet<const Module *> DirectPrebuiltModularDeps;
void handleImport(const Module *Imported);
- void handleTopLevelModule(const Module *M);
+
+ /// Adds direct modular dependencies that have already been built to the
+ /// ModuleDeps instance.
+ void addDirectPrebuiltModuleDeps(const Module *M, ModuleDeps &MD);
+
+ /// Traverses the previously collected direct modular dependencies to discover
+ /// transitive modular dependencies and fills the parent \c ModuleDepCollector
+ /// with both.
+ ModuleID handleTopLevelModule(const Module *M);
void addAllSubmoduleDeps(const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &AddedModules);
void addModuleDep(const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &AddedModules);
};
+/// Collects modular and non-modular dependencies of the main file by attaching
+/// \c ModuleDepCollectorPP to the preprocessor.
class ModuleDepCollector final : public DependencyCollector {
public:
ModuleDepCollector(std::unique_ptr<DependencyOutputOptions> Opts,
- CompilerInstance &I, DependencyConsumer &C);
+ CompilerInstance &I, DependencyConsumer &C,
+ CompilerInvocation &&OriginalCI);
void attachToPreprocessor(Preprocessor &PP) override;
void attachToASTReader(ASTReader &R) override;
@@ -149,13 +199,32 @@ public:
private:
friend ModuleDepCollectorPP;
+ /// The compiler instance for the current translation unit.
CompilerInstance &Instance;
+ /// The consumer of collected dependency information.
DependencyConsumer &Consumer;
+ /// Path to the main source file.
std::string MainFile;
+ /// Hash identifying the compilation conditions of the current TU.
std::string ContextHash;
- std::vector<std::string> MainDeps;
- std::unordered_map<std::string, ModuleDeps> Deps;
+ /// Non-modular file dependencies. This includes the main source file and
+ /// textually included header files.
+ std::vector<std::string> FileDeps;
+ /// Direct and transitive modular dependencies of the main source file.
+ std::unordered_map<const Module *, ModuleDeps> ModularDeps;
+ /// Options that control the dependency output generation.
std::unique_ptr<DependencyOutputOptions> Opts;
+ /// The original Clang invocation passed to dependency scanner.
+ CompilerInvocation OriginalInvocation;
+
+ /// Checks whether the module is known as being prebuilt.
+ bool isPrebuiltModule(const Module *M);
+
+ /// Constructs a CompilerInvocation that can be used to build the given
+ /// module, excluding paths to discovered modular dependencies that are yet to
+ /// be built.
+ CompilerInvocation
+ makeInvocationForModuleBuildWithoutPaths(const ModuleDeps &Deps) const;
};
} // end namespace dependencies
diff --git a/clang/include/clang/Tooling/DiagnosticsYaml.h b/clang/include/clang/Tooling/DiagnosticsYaml.h
index 38fbcfc1da95..3f257d84f813 100644
--- a/clang/include/clang/Tooling/DiagnosticsYaml.h
+++ b/clang/include/clang/Tooling/DiagnosticsYaml.h
@@ -54,6 +54,7 @@ template <> struct MappingTraits<clang::tooling::DiagnosticMessage> {
<< llvm::toString(std::move(Err)) << "\n";
}
}
+ Io.mapOptional("Ranges", M.Ranges);
}
};
@@ -67,12 +68,11 @@ template <> struct MappingTraits<clang::tooling::Diagnostic> {
NormalizedDiagnostic(const IO &, const clang::tooling::Diagnostic &D)
: DiagnosticName(D.DiagnosticName), Message(D.Message), Notes(D.Notes),
- DiagLevel(D.DiagLevel), BuildDirectory(D.BuildDirectory),
- Ranges(D.Ranges) {}
+ DiagLevel(D.DiagLevel), BuildDirectory(D.BuildDirectory) {}
clang::tooling::Diagnostic denormalize(const IO &) {
return clang::tooling::Diagnostic(DiagnosticName, Message, Notes,
- DiagLevel, BuildDirectory, Ranges);
+ DiagLevel, BuildDirectory);
}
std::string DiagnosticName;
@@ -80,7 +80,6 @@ template <> struct MappingTraits<clang::tooling::Diagnostic> {
SmallVector<clang::tooling::DiagnosticMessage, 1> Notes;
clang::tooling::Diagnostic::Level DiagLevel;
std::string BuildDirectory;
- SmallVector<clang::tooling::FileByteRange, 1> Ranges;
};
static void mapping(IO &Io, clang::tooling::Diagnostic &D) {
@@ -91,7 +90,6 @@ template <> struct MappingTraits<clang::tooling::Diagnostic> {
Io.mapOptional("Notes", Keys->Notes);
Io.mapOptional("Level", Keys->DiagLevel);
Io.mapOptional("BuildDirectory", Keys->BuildDirectory);
- Io.mapOptional("Ranges", Keys->Ranges);
}
};
@@ -108,6 +106,7 @@ template <> struct ScalarEnumerationTraits<clang::tooling::Diagnostic::Level> {
static void enumeration(IO &IO, clang::tooling::Diagnostic::Level &Value) {
IO.enumCase(Value, "Warning", clang::tooling::Diagnostic::Warning);
IO.enumCase(Value, "Error", clang::tooling::Diagnostic::Error);
+ IO.enumCase(Value, "Remark", clang::tooling::Diagnostic::Remark);
}
};
diff --git a/clang/include/clang/Tooling/NodeIntrospection.h b/clang/include/clang/Tooling/NodeIntrospection.h
new file mode 100644
index 000000000000..91552cad2eca
--- /dev/null
+++ b/clang/include/clang/Tooling/NodeIntrospection.h
@@ -0,0 +1,101 @@
+//===- NodeIntrospection.h ------------------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the NodeIntrospection.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_NODEINTROSPECTION_H
+#define LLVM_CLANG_TOOLING_NODEINTROSPECTION_H
+
+#include "clang/AST/ASTTypeTraits.h"
+#include "clang/AST/DeclarationName.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include <set>
+
+namespace clang {
+
+class Stmt;
+class Decl;
+class CXXCtorInitializer;
+class NestedNameSpecifierLoc;
+class TemplateArgumentLoc;
+class CXXBaseSpecifier;
+struct DeclarationNameInfo;
+
+namespace tooling {
+
+class LocationCall;
+using SharedLocationCall = llvm::IntrusiveRefCntPtr<LocationCall>;
+
+class LocationCall : public llvm::ThreadSafeRefCountedBase<LocationCall> {
+public:
+ enum LocationCallFlags { NoFlags, ReturnsPointer, IsCast };
+ LocationCall(SharedLocationCall on, std::string name,
+ LocationCallFlags flags = NoFlags)
+ : m_flags(flags), m_on(std::move(on)), m_name(std::move(name)) {}
+
+ LocationCall *on() const { return m_on.get(); }
+ StringRef name() const { return m_name; }
+ bool returnsPointer() const { return m_flags & ReturnsPointer; }
+ bool isCast() const { return m_flags & IsCast; }
+
+private:
+ LocationCallFlags m_flags;
+ SharedLocationCall m_on;
+ std::string m_name;
+};
+
+class LocationCallFormatterCpp {
+public:
+ static void print(const LocationCall &Call, llvm::raw_ostream &OS);
+ static std::string format(const LocationCall &Call);
+};
+
+namespace internal {
+struct RangeLessThan {
+ bool operator()(std::pair<SourceRange, SharedLocationCall> const &LHS,
+ std::pair<SourceRange, SharedLocationCall> const &RHS) const;
+ bool
+ operator()(std::pair<SourceLocation, SharedLocationCall> const &LHS,
+ std::pair<SourceLocation, SharedLocationCall> const &RHS) const;
+};
+
+} // namespace internal
+
+// Note that this container stores unique results in a deterministic, but
+// the location calls are in an unspecified order. Clients which desire
+// a particular order for the location calls, such as alphabetical,
+// should sort results after retrieval, because the order is dependent
+// on how the LocationCalls are formatted.
+template <typename T, typename U>
+using UniqueMultiMap = std::set<std::pair<T, U>, internal::RangeLessThan>;
+
+using SourceLocationMap = UniqueMultiMap<SourceLocation, SharedLocationCall>;
+using SourceRangeMap = UniqueMultiMap<SourceRange, SharedLocationCall>;
+
+struct NodeLocationAccessors {
+ SourceLocationMap LocationAccessors;
+ SourceRangeMap RangeAccessors;
+};
+
+namespace NodeIntrospection {
+bool hasIntrospectionSupport();
+NodeLocationAccessors GetLocations(clang::Stmt const *Object);
+NodeLocationAccessors GetLocations(clang::Decl const *Object);
+NodeLocationAccessors GetLocations(clang::CXXCtorInitializer const *Object);
+NodeLocationAccessors GetLocations(clang::NestedNameSpecifierLoc const &);
+NodeLocationAccessors GetLocations(clang::TemplateArgumentLoc const &);
+NodeLocationAccessors GetLocations(clang::CXXBaseSpecifier const *);
+NodeLocationAccessors GetLocations(clang::TypeLoc const &);
+NodeLocationAccessors GetLocations(clang::DeclarationNameInfo const &);
+NodeLocationAccessors GetLocations(clang::DynTypedNode const &Node);
+} // namespace NodeIntrospection
+} // namespace tooling
+} // namespace clang
+#endif
diff --git a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
index c0f995d85c14..63d46abc2034 100644
--- a/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
+++ b/clang/include/clang/Tooling/Refactoring/RecursiveSymbolVisitor.h
@@ -122,6 +122,17 @@ public:
return BaseType::TraverseNestedNameSpecifierLoc(NNS);
}
+ bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
+ for (const DesignatedInitExpr::Designator &D : E->designators()) {
+ if (D.isFieldDesignator() && D.getField()) {
+ const FieldDecl *Decl = D.getField();
+ if (!visit(Decl, D.getFieldLoc(), D.getFieldLoc()))
+ return false;
+ }
+ }
+ return true;
+ }
+
private:
const SourceManager &SM;
const LangOptions &LangOpts;
diff --git a/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h b/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
index 0c6e38af381f..57dffa945acc 100644
--- a/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
+++ b/clang/include/clang/Tooling/Refactoring/RefactoringActionRule.h
@@ -12,7 +12,6 @@
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
-#include <vector>
namespace clang {
namespace tooling {
diff --git a/clang/include/clang/Tooling/Syntax/Tokens.h b/clang/include/clang/Tooling/Syntax/Tokens.h
index 98320bd54d6f..e4bc1553c2d6 100644
--- a/clang/include/clang/Tooling/Syntax/Tokens.h
+++ b/clang/include/clang/Tooling/Syntax/Tokens.h
@@ -34,6 +34,7 @@
#include "clang/Basic/TokenKinds.h"
#include "clang/Lex/Token.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Compiler.h"
@@ -192,8 +193,13 @@ public:
return ExpandedTokens;
}
+ /// Builds a cache to make future calls to expandedToken(SourceRange) faster.
+ /// Creates an index only once. Further calls to it will be no-op.
+ void indexExpandedTokens();
+
/// Returns the subrange of expandedTokens() corresponding to the closed
/// token range R.
+ /// Consider calling indexExpandedTokens() before for faster lookups.
llvm::ArrayRef<syntax::Token> expandedTokens(SourceRange R) const;
/// Returns the subrange of spelled tokens corresponding to AST node spanning
@@ -366,6 +372,8 @@ private:
/// same stream as 'clang -E' (excluding the preprocessor directives like
/// #file, etc.).
std::vector<syntax::Token> ExpandedTokens;
+ // Index of ExpandedTokens for faster lookups by SourceLocation.
+ llvm::DenseMap<SourceLocation, unsigned> ExpandedTokIndex;
llvm::DenseMap<FileID, MarkedFile> Files;
// The value is never null, pointer instead of reference to avoid disabling
// implicit assignment operator.
diff --git a/clang/include/clang/Tooling/Tooling.h b/clang/include/clang/Tooling/Tooling.h
index 8b3b2e5ad002..73d09662562b 100644
--- a/clang/include/clang/Tooling/Tooling.h
+++ b/clang/include/clang/Tooling/Tooling.h
@@ -66,6 +66,14 @@ namespace tooling {
class CompilationDatabase;
+/// Retrieves the flags of the `-cc1` job in `Compilation` that has only source
+/// files as its inputs.
+/// Returns nullptr if there are no such jobs or multiple of them. Note that
+/// offloading jobs are ignored.
+const llvm::opt::ArgStringList *
+getCC1Arguments(DiagnosticsEngine *Diagnostics,
+ driver::Compilation *Compilation);
+
/// Interface to process a clang::CompilerInvocation.
///
/// If your tool is based on FrontendAction, you should be deriving from
diff --git a/clang/include/clang/Tooling/Transformer/Parsing.h b/clang/include/clang/Tooling/Transformer/Parsing.h
index 8e51f595cd5b..b143f63d8ca8 100644
--- a/clang/include/clang/Tooling/Transformer/Parsing.h
+++ b/clang/include/clang/Tooling/Transformer/Parsing.h
@@ -21,7 +21,6 @@
#include "clang/Tooling/Transformer/RangeSelector.h"
#include "llvm/Support/Error.h"
#include <functional>
-#include <string>
namespace clang {
namespace transformer {
diff --git a/clang/include/clang/Tooling/Transformer/RangeSelector.h b/clang/include/clang/Tooling/Transformer/RangeSelector.h
index f17fb8c7b5c6..8ff31f7a0342 100644
--- a/clang/include/clang/Tooling/Transformer/RangeSelector.h
+++ b/clang/include/clang/Tooling/Transformer/RangeSelector.h
@@ -73,9 +73,9 @@ RangeSelector statement(std::string ID);
/// binding in the match result.
RangeSelector member(std::string ID);
-/// Given a node with a "name", (like \c NamedDecl, \c DeclRefExpr or \c
-/// CxxCtorInitializer) selects the name's token. Only selects the final
-/// identifier of a qualified name, but not any qualifiers or template
+/// Given a node with a "name", (like \c NamedDecl, \c DeclRefExpr, \c
+/// CxxCtorInitializer, and \c TypeLoc) selects the name's token. Only selects
+/// the final identifier of a qualified name, but not any qualifiers or template
/// arguments. For example, for `::foo::bar::baz` and `::foo::bar::baz<int>`,
/// it selects only `baz`.
///
diff --git a/clang/include/clang/module.modulemap b/clang/include/clang/module.modulemap
index 332e533f0347..33fcf9dc7576 100644
--- a/clang/include/clang/module.modulemap
+++ b/clang/include/clang/module.modulemap
@@ -40,11 +40,11 @@ module Clang_Basic {
textual header "Basic/BuiltinsHexagon.def"
textual header "Basic/BuiltinsHexagonDep.def"
textual header "Basic/BuiltinsHexagonMapCustomDep.def"
- textual header "Basic/BuiltinsLe64.def"
textual header "Basic/BuiltinsMips.def"
textual header "Basic/BuiltinsNEON.def"
textual header "Basic/BuiltinsNVPTX.def"
textual header "Basic/BuiltinsPPC.def"
+ textual header "Basic/BuiltinsRISCV.def"
textual header "Basic/BuiltinsSVE.def"
textual header "Basic/BuiltinsSystemZ.def"
textual header "Basic/BuiltinsWebAssembly.def"
@@ -63,7 +63,9 @@ module Clang_Basic {
textual header "Basic/OpenMPKinds.def"
textual header "Basic/OperatorKinds.def"
textual header "Basic/PPCTypes.def"
+ textual header "Basic/RISCVVTypes.def"
textual header "Basic/Sanitizers.def"
+ textual header "Basic/TargetCXXABI.def"
textual header "Basic/TokenKinds.def"
textual header "Basic/X86Target.def"
diff --git a/clang/lib/APINotes/APINotesYAMLCompiler.cpp b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
index a4120120a01c..75100fde59b8 100644
--- a/clang/lib/APINotes/APINotesYAMLCompiler.cpp
+++ b/clang/lib/APINotes/APINotesYAMLCompiler.cpp
@@ -551,7 +551,9 @@ struct Module {
llvm::Optional<bool> SwiftInferImportAsMember = {llvm::None};
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
LLVM_DUMP_METHOD void dump() /*const*/;
+#endif
};
} // namespace
@@ -571,10 +573,12 @@ template <> struct MappingTraits<Module> {
} // namespace yaml
} // namespace llvm
-void Module::dump() {
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void Module::dump() {
llvm::yaml::Output OS(llvm::errs());
OS << *this;
}
+#endif
namespace {
bool parseAPINotes(StringRef YI, Module &M, llvm::SourceMgr::DiagHandlerTy Diag,
diff --git a/clang/lib/ARCMigrate/FileRemapper.cpp b/clang/lib/ARCMigrate/FileRemapper.cpp
index f536af1795ed..92027fe4f1f4 100644
--- a/clang/lib/ARCMigrate/FileRemapper.cpp
+++ b/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -63,7 +63,7 @@ bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> fileBuf =
- llvm::MemoryBuffer::getFile(infoFile);
+ llvm::MemoryBuffer::getFile(infoFile, /*IsText=*/true);
if (!fileBuf)
return report("Error opening file: " + infoFile, Diag);
@@ -121,7 +121,7 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
std::error_code EC;
std::string infoFile = std::string(outputPath);
- llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::OF_None);
+ llvm::raw_fd_ostream infoOut(infoFile, EC, llvm::sys::fs::OF_Text);
if (EC)
return report(EC.message(), Diag);
@@ -142,9 +142,10 @@ bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
SmallString<64> tempPath;
int fd;
- if (fs::createTemporaryFile(path::filename(origFE->getName()),
- path::extension(origFE->getName()).drop_front(), fd,
- tempPath))
+ if (fs::createTemporaryFile(
+ path::filename(origFE->getName()),
+ path::extension(origFE->getName()).drop_front(), fd, tempPath,
+ llvm::sys::fs::OF_Text))
return report("Could not create file: " + tempPath.str(), Diag);
llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true);
diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp
index 68a51a49c718..c8069b51567c 100644
--- a/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -613,7 +613,7 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
continue;
HasAtleastOneRequiredProperty = true;
DeclContext::lookup_result R = IDecl->lookup(Property->getDeclName());
- if (R.size() == 0) {
+ if (R.empty()) {
// Relax the rule and look into class's implementation for a synthesize
// or dynamic declaration. Class is implementing a property coming from
// another protocol. This still makes the target protocol as conforming.
@@ -621,14 +621,12 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
Property->getDeclName().getAsIdentifierInfo(),
Property->getQueryKind()))
return false;
- }
- else if (ObjCPropertyDecl *ClassProperty = dyn_cast<ObjCPropertyDecl>(R[0])) {
- if ((ClassProperty->getPropertyAttributes()
- != Property->getPropertyAttributes()) ||
- !Ctx.hasSameType(ClassProperty->getType(), Property->getType()))
- return false;
- }
- else
+ } else if (auto *ClassProperty = R.find_first<ObjCPropertyDecl>()) {
+ if ((ClassProperty->getPropertyAttributes() !=
+ Property->getPropertyAttributes()) ||
+ !Ctx.hasSameType(ClassProperty->getType(), Property->getType()))
+ return false;
+ } else
return false;
}
@@ -645,12 +643,12 @@ ClassImplementsAllMethodsAndProperties(ASTContext &Ctx,
if (MD->getImplementationControl() == ObjCMethodDecl::Optional)
continue;
DeclContext::lookup_result R = ImpDecl->lookup(MD->getDeclName());
- if (R.size() == 0)
+ if (R.empty())
return false;
bool match = false;
HasAtleastOneRequiredMethod = true;
- for (unsigned I = 0, N = R.size(); I != N; ++I)
- if (ObjCMethodDecl *ImpMD = dyn_cast<ObjCMethodDecl>(R[0]))
+ for (NamedDecl *ND : R)
+ if (ObjCMethodDecl *ImpMD = dyn_cast<ObjCMethodDecl>(ND))
if (Ctx.ObjCMethodsAreEqual(MD, ImpMD)) {
match = true;
break;
diff --git a/clang/lib/ARCMigrate/PlistReporter.cpp b/clang/lib/ARCMigrate/PlistReporter.cpp
index d01563b2974d..c233d6bd9002 100644
--- a/clang/lib/ARCMigrate/PlistReporter.cpp
+++ b/clang/lib/ARCMigrate/PlistReporter.cpp
@@ -56,7 +56,7 @@ void arcmt::writeARCDiagsToPlist(const std::string &outPath,
}
std::error_code EC;
- llvm::raw_fd_ostream o(outPath, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream o(outPath, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
llvm::errs() << "error: could not create file: " << outPath << '\n';
return;
diff --git a/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
index 5d0cfb8a8b9c..e9c21b8106d7 100644
--- a/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
+++ b/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -52,7 +52,7 @@ static bool isEmptyARCMTMacroStatement(NullStmt *S,
if (AfterMacroLoc == SemiLoc)
return true;
- int RelOffs = 0;
+ SourceLocation::IntTy RelOffs = 0;
if (!SM.isInSameSLocAddrSpace(AfterMacroLoc, SemiLoc, &RelOffs))
return false;
if (RelOffs < 0)
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index cb7f00abf9e9..e102a3ba508d 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -56,8 +56,8 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/ObjCRuntime.h"
-#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
@@ -84,6 +84,7 @@
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MD5.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -879,10 +880,15 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
return CanonTTP;
}
+TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
+ auto Kind = getTargetInfo().getCXXABI().getKind();
+ return getLangOpts().CXXABI.getValueOr(Kind);
+}
+
CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
if (!LangOpts.CPlusPlus) return nullptr;
- switch (T.getCXXABI().getKind()) {
+ switch (getCXXABIKind()) {
case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericARM: // Same as Itanium at this level
@@ -930,6 +936,11 @@ static const LangASMap *getAddressSpaceMap(const TargetInfo &T,
7, // cuda_device
8, // cuda_constant
9, // cuda_shared
+ 1, // sycl_global
+ 5, // sycl_global_device
+ 6, // sycl_global_host
+ 3, // sycl_local
+ 0, // sycl_private
10, // ptr32_sptr
11, // ptr32_uptr
12 // ptr64
@@ -955,23 +966,22 @@ static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
IdentifierTable &idents, SelectorTable &sels,
- Builtin::Context &builtins)
+ Builtin::Context &builtins, TranslationUnitKind TUKind)
: ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
SubstTemplateTemplateParmPacks(this_()),
CanonTemplateTemplateParms(this_()), SourceMgr(SM), LangOpts(LOpts),
- SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
+ NoSanitizeL(new NoSanitizeList(LangOpts.NoSanitizeFiles, SM)),
XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
LangOpts.XRayNeverInstrumentFiles,
LangOpts.XRayAttrListFiles, SM)),
ProfList(new ProfileList(LangOpts.ProfileListFiles, SM)),
PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
- BuiltinInfo(builtins), DeclarationNames(*this), Comments(SM),
- CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
+ BuiltinInfo(builtins), TUKind(TUKind), DeclarationNames(*this),
+ Comments(SM), CommentCommandTraits(BumpAlloc, LOpts.CommentOpts),
CompCategories(this_()), LastSDM(nullptr, 0) {
- TUDecl = TranslationUnitDecl::Create(*this);
- TraversalScope = {TUDecl};
+ addTranslationUnitDecl();
}
ASTContext::~ASTContext() {
@@ -1185,9 +1195,10 @@ ExternCContextDecl *ASTContext::getExternCContextDecl() const {
BuiltinTemplateDecl *
ASTContext::buildBuiltinTemplateDecl(BuiltinTemplateKind BTK,
const IdentifierInfo *II) const {
- auto *BuiltinTemplate = BuiltinTemplateDecl::Create(*this, TUDecl, II, BTK);
+ auto *BuiltinTemplate =
+ BuiltinTemplateDecl::Create(*this, getTranslationUnitDecl(), II, BTK);
BuiltinTemplate->setImplicit();
- TUDecl->addDecl(BuiltinTemplate);
+ getTranslationUnitDecl()->addDecl(BuiltinTemplate);
return BuiltinTemplate;
}
@@ -1436,6 +1447,12 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
#include "clang/Basic/PPCTypes.def"
}
+ if (Target.hasRISCVVTypes()) {
+#define RVV_TYPE(Name, Id, SingletonId) \
+ InitBuiltinType(SingletonId, BuiltinType::Id);
+#include "clang/Basic/RISCVVTypes.def"
+ }
+
// Builtin type for __objc_yes and __objc_no
ObjCBuiltinBoolTy = (Target.useSignedCharForObjCBool() ?
SignedCharTy : BoolTy);
@@ -1445,7 +1462,7 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
ObjCSuperType = QualType();
// void * type
- if (LangOpts.OpenCLVersion >= 200) {
+ if (LangOpts.OpenCLGenericAddressSpace) {
auto Q = VoidTy.getQualifiers();
Q.setAddressSpace(LangAS::opencl_generic);
VoidPtrTy = getPointerType(getCanonicalType(
@@ -1468,7 +1485,7 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
// MSVC predeclares struct _GUID, and we need it to create MSGuidDecls.
if (LangOpts.MicrosoftExt || LangOpts.Borland) {
MSGuidTagDecl = buildImplicitRecord("_GUID");
- TUDecl->addDecl(MSGuidTagDecl);
+ getTranslationUnitDecl()->addDecl(MSGuidTagDecl);
}
}
@@ -1554,6 +1571,21 @@ ASTContext::setInstantiatedFromUsingDecl(NamedDecl *Inst, NamedDecl *Pattern) {
InstantiatedFromUsingDecl[Inst] = Pattern;
}
+UsingEnumDecl *
+ASTContext::getInstantiatedFromUsingEnumDecl(UsingEnumDecl *UUD) {
+ auto Pos = InstantiatedFromUsingEnumDecl.find(UUD);
+ if (Pos == InstantiatedFromUsingEnumDecl.end())
+ return nullptr;
+
+ return Pos->second;
+}
+
+void ASTContext::setInstantiatedFromUsingEnumDecl(UsingEnumDecl *Inst,
+ UsingEnumDecl *Pattern) {
+ assert(!InstantiatedFromUsingEnumDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingEnumDecl[Inst] = Pattern;
+}
+
UsingShadowDecl *
ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
@@ -1776,6 +1808,13 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
}
}
+ // Some targets have hard limitation on the maximum requestable alignment in
+ // aligned attribute for static variables.
+ const unsigned MaxAlignedAttr = getTargetInfo().getMaxAlignedAttribute();
+ const auto *VD = dyn_cast<VarDecl>(D);
+ if (MaxAlignedAttr && VD && VD->getStorageClass() == SC_Static)
+ Align = std::min(Align, MaxAlignedAttr);
+
return toCharUnitsFromBits(Align);
}
@@ -2167,6 +2206,18 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Size; \
break;
#include "clang/Basic/PPCTypes.def"
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, ElKind, ElBits, NF, IsSigned, \
+ IsFP) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = ElBits; \
+ break;
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, ElKind) \
+ case BuiltinType::Id: \
+ Width = 0; \
+ Align = 8; \
+ break;
+#include "clang/Basic/RISCVVTypes.def"
}
break;
case Type::ObjCObjectPointer:
@@ -2422,7 +2473,7 @@ unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
// The preferred alignment of member pointers is that of a pointer.
if (T->isMemberPointerType())
return getPreferredTypeAlign(getPointerDiffType().getTypePtr());
-
+
if (!Target->allowsLargerPreferedTypeAlignment())
return ABIAlign;
@@ -3811,6 +3862,19 @@ ASTContext::getBuiltinVectorTypeInfo(const BuiltinType *Ty) const {
return SVE_ELTTY(BFloat16Ty, 8, 3);
case BuiltinType::SveBFloat16x4:
return SVE_ELTTY(BFloat16Ty, 8, 4);
+#define RVV_VECTOR_TYPE_INT(Name, Id, SingletonId, NumEls, ElBits, NF, \
+ IsSigned) \
+ case BuiltinType::Id: \
+ return {getIntTypeForBitwidth(ElBits, IsSigned), \
+ llvm::ElementCount::getScalable(NumEls), NF};
+#define RVV_VECTOR_TYPE_FLOAT(Name, Id, SingletonId, NumEls, ElBits, NF) \
+ case BuiltinType::Id: \
+ return {ElBits == 16 ? Float16Ty : (ElBits == 32 ? FloatTy : DoubleTy), \
+ llvm::ElementCount::getScalable(NumEls), NF};
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
+ case BuiltinType::Id: \
+ return {BoolTy, llvm::ElementCount::getScalable(NumEls), 1};
+#include "clang/Basic/RISCVVTypes.def"
}
}
@@ -3837,6 +3901,20 @@ QualType ASTContext::getScalableVectorType(QualType EltTy,
if (EltTy->isBooleanType() && NumElts == NumEls) \
return SingletonId;
#include "clang/Basic/AArch64SVEACLETypes.def"
+ } else if (Target->hasRISCVVTypes()) {
+ uint64_t EltTySize = getTypeSize(EltTy);
+#define RVV_VECTOR_TYPE(Name, Id, SingletonId, NumEls, ElBits, NF, IsSigned, \
+ IsFP) \
+ if (!EltTy->isBooleanType() && \
+ ((EltTy->hasIntegerRepresentation() && \
+ EltTy->hasSignedIntegerRepresentation() == IsSigned) || \
+ (EltTy->hasFloatingRepresentation() && IsFP)) && \
+ EltTySize == ElBits && NumElts == NumEls) \
+ return SingletonId;
+#define RVV_PREDICATE_TYPE(Name, Id, SingletonId, NumEls) \
+ if (EltTy->isBooleanType() && NumElts == NumEls) \
+ return SingletonId;
+#include "clang/Basic/RISCVVTypes.def"
}
return QualType();
}
@@ -5705,29 +5783,29 @@ QualType ASTContext::getUnqualifiedArrayType(QualType type,
/// Attempt to unwrap two types that may both be array types with the same bound
/// (or both be array types of unknown bound) for the purpose of comparing the
/// cv-decomposition of two types per C++ [conv.qual].
-bool ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
- bool UnwrappedAny = false;
+void ASTContext::UnwrapSimilarArrayTypes(QualType &T1, QualType &T2) {
while (true) {
auto *AT1 = getAsArrayType(T1);
- if (!AT1) return UnwrappedAny;
+ if (!AT1)
+ return;
auto *AT2 = getAsArrayType(T2);
- if (!AT2) return UnwrappedAny;
+ if (!AT2)
+ return;
// If we don't have two array types with the same constant bound nor two
// incomplete array types, we've unwrapped everything we can.
if (auto *CAT1 = dyn_cast<ConstantArrayType>(AT1)) {
auto *CAT2 = dyn_cast<ConstantArrayType>(AT2);
if (!CAT2 || CAT1->getSize() != CAT2->getSize())
- return UnwrappedAny;
+ return;
} else if (!isa<IncompleteArrayType>(AT1) ||
!isa<IncompleteArrayType>(AT2)) {
- return UnwrappedAny;
+ return;
}
T1 = AT1->getElementType();
T2 = AT2->getElementType();
- UnwrappedAny = true;
}
}
@@ -5840,9 +5918,8 @@ ASTContext::getNameForTemplate(TemplateName Name,
} else {
DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
// DNInfo work in progress: FIXME: source locations?
- DeclarationNameLoc DNLoc;
- DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
- DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ DeclarationNameLoc DNLoc =
+ DeclarationNameLoc::makeCXXOperatorNameLoc(SourceRange());
return DeclarationNameInfo(DName, NameLoc, DNLoc);
}
}
@@ -6545,7 +6622,7 @@ QualType ASTContext::getCFConstantStringType() const {
QualType ASTContext::getObjCSuperType() const {
if (ObjCSuperType.isNull()) {
RecordDecl *ObjCSuperTypeDecl = buildImplicitRecord("objc_super");
- TUDecl->addDecl(ObjCSuperTypeDecl);
+ getTranslationUnitDecl()->addDecl(ObjCSuperTypeDecl);
ObjCSuperType = getTagDeclType(ObjCSuperTypeDecl);
}
return ObjCSuperType;
@@ -7213,13 +7290,15 @@ static char getObjCEncodingForPrimitiveType(const ASTContext *C,
#define SVE_TYPE(Name, Id, SingletonId) \
case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
- {
- DiagnosticsEngine &Diags = C->getDiagnostics();
- unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "cannot yet @encode type %0");
- Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy());
- return ' ';
- }
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+ {
+ DiagnosticsEngine &Diags = C->getDiagnostics();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet @encode type %0");
+ Diags.Report(DiagID) << BT->getName(C->getPrintingPolicy());
+ return ' ';
+ }
case BuiltinType::ObjCId:
case BuiltinType::ObjCClass:
@@ -7306,6 +7385,40 @@ static void EncodeBitField(const ASTContext *Ctx, std::string& S,
S += llvm::utostr(FD->getBitWidthValue(*Ctx));
}
+// Helper function for determining whether the encoded type string would include
+// a template specialization type.
+static bool hasTemplateSpecializationInEncodedString(const Type *T,
+ bool VisitBasesAndFields) {
+ T = T->getBaseElementTypeUnsafe();
+
+ if (auto *PT = T->getAs<PointerType>())
+ return hasTemplateSpecializationInEncodedString(
+ PT->getPointeeType().getTypePtr(), false);
+
+ auto *CXXRD = T->getAsCXXRecordDecl();
+
+ if (!CXXRD)
+ return false;
+
+ if (isa<ClassTemplateSpecializationDecl>(CXXRD))
+ return true;
+
+ if (!CXXRD->hasDefinition() || !VisitBasesAndFields)
+ return false;
+
+ for (auto B : CXXRD->bases())
+ if (hasTemplateSpecializationInEncodedString(B.getType().getTypePtr(),
+ true))
+ return true;
+
+ for (auto *FD : CXXRD->fields())
+ if (hasTemplateSpecializationInEncodedString(FD->getType().getTypePtr(),
+ true))
+ return true;
+
+ return false;
+}
+
// FIXME: Use SmallString for accumulating string.
void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
const ObjCEncOptions Options,
@@ -7398,6 +7511,15 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
S += '@';
return;
}
+ // If the encoded string for the class includes template names, just emit
+ // "^v" for pointers to the class.
+ if (getLangOpts().CPlusPlus &&
+ (!getLangOpts().EncodeCXXClassTemplateSpec &&
+ hasTemplateSpecializationInEncodedString(
+ RTy, Options.ExpandPointedToStructures()))) {
+ S += "^v";
+ return;
+ }
// fall through...
}
S += '^';
@@ -7876,19 +7998,21 @@ static TypedefDecl *CreateVoidPtrBuiltinVaListDecl(const ASTContext *Context) {
static TypedefDecl *
CreateAArch64ABIBuiltinVaListDecl(const ASTContext *Context) {
- // struct __va_list
RecordDecl *VaListTagDecl = Context->buildImplicitRecord("__va_list");
- if (Context->getLangOpts().CPlusPlus) {
- // namespace std { struct __va_list {
- NamespaceDecl *NS;
- NS = NamespaceDecl::Create(const_cast<ASTContext &>(*Context),
- Context->getTranslationUnitDecl(),
- /*Inline*/ false, SourceLocation(),
- SourceLocation(), &Context->Idents.get("std"),
- /*PrevDecl*/ nullptr);
- NS->setImplicit();
- VaListTagDecl->setDeclContext(NS);
- }
+ // namespace std { struct __va_list {
+ // Note that we create the namespace even in C. This is intentional so that
+ // the type is consistent between C and C++, which is important in cases where
+ // the types need to match between translation units (e.g. with
+ // -fsanitize=cfi-icall). Ideally we wouldn't have created this namespace at
+ // all, but it's now part of the ABI (e.g. in mangled names), so we can't
+ // change it.
+ auto *NS = NamespaceDecl::Create(
+ const_cast<ASTContext &>(*Context), Context->getTranslationUnitDecl(),
+ /*Inline*/ false, SourceLocation(), SourceLocation(),
+ &Context->Idents.get("std"),
+ /*PrevDecl*/ nullptr);
+ NS->setImplicit();
+ VaListTagDecl->setDeclContext(NS);
VaListTagDecl->startDefinition();
@@ -8546,6 +8670,14 @@ bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
return false;
}
+/// getSVETypeSize - Return SVE vector or predicate register size.
+static uint64_t getSVETypeSize(ASTContext &Context, const BuiltinType *Ty) {
+ assert(Ty->isVLSTBuiltinType() && "Invalid SVE Type");
+ return Ty->getKind() == BuiltinType::SveBool
+ ? Context.getLangOpts().ArmSveVectorBits / Context.getCharWidth()
+ : Context.getLangOpts().ArmSveVectorBits;
+}
+
bool ASTContext::areCompatibleSveTypes(QualType FirstType,
QualType SecondType) {
assert(((FirstType->isSizelessBuiltinType() && SecondType->isVectorType()) ||
@@ -8563,7 +8695,7 @@ bool ASTContext::areCompatibleSveTypes(QualType FirstType,
return VT->getElementType().getCanonicalType() ==
FirstType->getSveEltType(*this);
else if (VT->getVectorKind() == VectorType::GenericVector)
- return getTypeSize(SecondType) == getLangOpts().ArmSveVectorBits &&
+ return getTypeSize(SecondType) == getSVETypeSize(*this, BT) &&
hasSameType(VT->getElementType(),
getBuiltinVectorTypeInfo(BT).ElementType);
}
@@ -8582,7 +8714,8 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
"Expected SVE builtin type and vector type!");
auto IsLaxCompatible = [this](QualType FirstType, QualType SecondType) {
- if (!FirstType->getAs<BuiltinType>())
+ const auto *BT = FirstType->getAs<BuiltinType>();
+ if (!BT)
return false;
const auto *VecTy = SecondType->getAs<VectorType>();
@@ -8592,13 +8725,19 @@ bool ASTContext::areLaxCompatibleSveTypes(QualType FirstType,
const LangOptions::LaxVectorConversionKind LVCKind =
getLangOpts().getLaxVectorConversions();
+ // Can not convert between sve predicates and sve vectors because of
+ // different size.
+ if (BT->getKind() == BuiltinType::SveBool &&
+ VecTy->getVectorKind() == VectorType::SveFixedLengthDataVector)
+ return false;
+
// If __ARM_FEATURE_SVE_BITS != N do not allow GNU vector lax conversion.
// "Whenever __ARM_FEATURE_SVE_BITS==N, GNUT implicitly
// converts to VLAT and VLAT implicitly converts to GNUT."
// ACLE Spec Version 00bet6, 3.7.3.2. Behavior common to vectors and
// predicates.
if (VecTy->getVectorKind() == VectorType::GenericVector &&
- getTypeSize(SecondType) != getLangOpts().ArmSveVectorBits)
+ getTypeSize(SecondType) != getSVETypeSize(*this, BT))
return false;
// If -flax-vector-conversions=all is specified, the types are
@@ -10000,7 +10139,12 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
VTy->getNumElements(), VTy->getVectorKind());
- // For enums, we return the unsigned version of the base type.
+ // For _ExtInt, return an unsigned _ExtInt with same width.
+ if (const auto *EITy = T->getAs<ExtIntType>())
+ return getExtIntType(/*IsUnsigned=*/true, EITy->getNumBits());
+
+ // For enums, get the underlying integer type of the enum, and let the general
+ // integer type signchanging code handle it.
if (const auto *ETy = T->getAs<EnumType>())
T = ETy->getDecl()->getIntegerType();
@@ -10053,6 +10197,74 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) const {
}
}
+QualType ASTContext::getCorrespondingSignedType(QualType T) const {
+ assert((T->hasUnsignedIntegerRepresentation() ||
+ T->isUnsignedFixedPointType()) &&
+ "Unexpected type");
+
+ // Turn <4 x unsigned int> -> <4 x signed int>
+ if (const auto *VTy = T->getAs<VectorType>())
+ return getVectorType(getCorrespondingSignedType(VTy->getElementType()),
+ VTy->getNumElements(), VTy->getVectorKind());
+
+ // For _ExtInt, return a signed _ExtInt with same width.
+ if (const auto *EITy = T->getAs<ExtIntType>())
+ return getExtIntType(/*IsUnsigned=*/false, EITy->getNumBits());
+
+ // For enums, get the underlying integer type of the enum, and let the general
+ // integer type signchanging code handle it.
+ if (const auto *ETy = T->getAs<EnumType>())
+ T = ETy->getDecl()->getIntegerType();
+
+ switch (T->castAs<BuiltinType>()->getKind()) {
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return SignedCharTy;
+ case BuiltinType::UShort:
+ return ShortTy;
+ case BuiltinType::UInt:
+ return IntTy;
+ case BuiltinType::ULong:
+ return LongTy;
+ case BuiltinType::ULongLong:
+ return LongLongTy;
+ case BuiltinType::UInt128:
+ return Int128Ty;
+ // wchar_t is special. It is either unsigned or not, but when it's unsigned,
+ // there's no matching "signed wchar_t". Therefore we return the signed
+ // version of it's underlying type instead.
+ case BuiltinType::WChar_U:
+ return getSignedWCharType();
+
+ case BuiltinType::UShortAccum:
+ return ShortAccumTy;
+ case BuiltinType::UAccum:
+ return AccumTy;
+ case BuiltinType::ULongAccum:
+ return LongAccumTy;
+ case BuiltinType::SatUShortAccum:
+ return SatShortAccumTy;
+ case BuiltinType::SatUAccum:
+ return SatAccumTy;
+ case BuiltinType::SatULongAccum:
+ return SatLongAccumTy;
+ case BuiltinType::UShortFract:
+ return ShortFractTy;
+ case BuiltinType::UFract:
+ return FractTy;
+ case BuiltinType::ULongFract:
+ return LongFractTy;
+ case BuiltinType::SatUShortFract:
+ return SatShortFractTy;
+ case BuiltinType::SatUFract:
+ return SatFractTy;
+ case BuiltinType::SatULongFract:
+ return SatLongFractTy;
+ default:
+ llvm_unreachable("Unexpected unsigned integer or fixed point type");
+ }
+}
+
ASTMutationListener::~ASTMutationListener() = default;
void ASTMutationListener::DeducedReturnType(const FunctionDecl *FD,
@@ -10173,6 +10385,11 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
// Read the base type.
switch (*Str++) {
default: llvm_unreachable("Unknown builtin type letter!");
+ case 'x':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'x'!");
+ Type = Context.Float16Ty;
+ break;
case 'y':
assert(HowLong == 0 && !Signed && !Unsigned &&
"Bad modifiers used with 'y'!");
@@ -10556,7 +10773,10 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
return GVA_StrongODR;
// Single source offloading languages like CUDA/HIP need to be able to
// access static device variables from host code of the same compilation
- // unit. This is done by externalizing the static variable.
+ // unit. This is done by externalizing the static variable with a shared
+ // name between the host and device compilation which is the same for the
+ // same compilation unit whereas different among different compilation
+ // units.
if (Context.shouldExternalizeStaticVar(D))
return GVA_StrongExternal;
}
@@ -10799,6 +11019,9 @@ void ASTContext::forEachMultiversionedFunctionVersion(
assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
FD = FD->getMostRecentDecl();
+ // FIXME: The order of traversal here matters and depends on the order of
+ // lookup results, which happens to be (mostly) oldest-to-newest, but we
+ // shouldn't rely on that.
for (auto *CurDecl :
FD->getDeclContext()->getRedeclContext()->lookup(FD->getDeclName())) {
FunctionDecl *CurFD = CurDecl->getAsFunction()->getMostRecentDecl();
@@ -10889,6 +11112,33 @@ MangleContext *ASTContext::createMangleContext(const TargetInfo *T) {
llvm_unreachable("Unsupported ABI");
}
+MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
+ assert(T.getCXXABI().getKind() != TargetCXXABI::Microsoft &&
+ "Device mangle context does not support Microsoft mangling.");
+ switch (T.getCXXABI().getKind()) {
+ case TargetCXXABI::AppleARM64:
+ case TargetCXXABI::Fuchsia:
+ case TargetCXXABI::GenericAArch64:
+ case TargetCXXABI::GenericItanium:
+ case TargetCXXABI::GenericARM:
+ case TargetCXXABI::GenericMIPS:
+ case TargetCXXABI::iOS:
+ case TargetCXXABI::WebAssembly:
+ case TargetCXXABI::WatchOS:
+ case TargetCXXABI::XL:
+ return ItaniumMangleContext::create(
+ *this, getDiagnostics(),
+ [](ASTContext &, const NamedDecl *ND) -> llvm::Optional<unsigned> {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
+ return RD->getDeviceLambdaManglingNumber();
+ return llvm::None;
+ });
+ case TargetCXXABI::Microsoft:
+ return MicrosoftMangleContext::create(*this, getDiagnostics());
+ }
+ llvm_unreachable("Unsupported ABI");
+}
+
CXXABI::~CXXABI() = default;
size_t ASTContext::getSideTableAllocatedMemory() const {
@@ -11436,16 +11686,112 @@ operator<<(const StreamingDiagnostic &DB,
}
bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
- return !getLangOpts().GPURelocatableDeviceCode &&
- ((D->hasAttr<CUDADeviceAttr>() &&
- !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
- (D->hasAttr<CUDAConstantAttr>() &&
- !D->getAttr<CUDAConstantAttr>()->isImplicit())) &&
- isa<VarDecl>(D) && cast<VarDecl>(D)->isFileVarDecl() &&
- cast<VarDecl>(D)->getStorageClass() == SC_Static;
+ bool IsStaticVar =
+ isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static;
+ bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
+ !D->getAttr<CUDADeviceAttr>()->isImplicit()) ||
+ (D->hasAttr<CUDAConstantAttr>() &&
+ !D->getAttr<CUDAConstantAttr>()->isImplicit());
+ // CUDA/HIP: static managed variables need to be externalized since it is
+ // a declaration in IR, therefore cannot have internal linkage.
+ return IsStaticVar &&
+ (D->hasAttr<HIPManagedAttr>() || IsExplicitDeviceVar);
}
bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const {
return mayExternalizeStaticVar(D) &&
- CUDAStaticDeviceVarReferencedByHost.count(cast<VarDecl>(D));
+ (D->hasAttr<HIPManagedAttr>() ||
+ CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D)));
+}
+
+StringRef ASTContext::getCUIDHash() const {
+ if (!CUIDHash.empty())
+ return CUIDHash;
+ if (LangOpts.CUID.empty())
+ return StringRef();
+ CUIDHash = llvm::utohexstr(llvm::MD5Hash(LangOpts.CUID), /*LowerCase=*/true);
+ return CUIDHash;
+}
+
+// Get the closest named parent, so we can order the sycl naming decls somewhere
+// that mangling is meaningful.
+static const DeclContext *GetNamedParent(const CXXRecordDecl *RD) {
+ const DeclContext *DC = RD->getDeclContext();
+
+ while (!isa<NamedDecl, TranslationUnitDecl>(DC))
+ DC = DC->getParent();
+ return DC;
+}
+
+void ASTContext::AddSYCLKernelNamingDecl(const CXXRecordDecl *RD) {
+ assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
+ RD = RD->getCanonicalDecl();
+ const DeclContext *DC = GetNamedParent(RD);
+
+ assert(RD->getLocation().isValid() &&
+ "Invalid location on kernel naming decl");
+
+ (void)SYCLKernelNamingTypes[DC].insert(RD);
+}
+
+bool ASTContext::IsSYCLKernelNamingDecl(const NamedDecl *ND) const {
+ assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
+ const auto *RD = dyn_cast<CXXRecordDecl>(ND);
+ if (!RD)
+ return false;
+ RD = RD->getCanonicalDecl();
+ const DeclContext *DC = GetNamedParent(RD);
+
+ auto Itr = SYCLKernelNamingTypes.find(DC);
+
+ if (Itr == SYCLKernelNamingTypes.end())
+ return false;
+
+ return Itr->getSecond().count(RD);
+}
+
+// Filters the Decls list to those that share the lambda mangling with the
+// passed RD.
+void ASTContext::FilterSYCLKernelNamingDecls(
+ const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<const CXXRecordDecl *> &Decls) {
+
+ if (!SYCLKernelFilterContext)
+ SYCLKernelFilterContext.reset(
+ ItaniumMangleContext::create(*this, getDiagnostics()));
+
+ llvm::SmallString<128> LambdaSig;
+ llvm::raw_svector_ostream Out(LambdaSig);
+ SYCLKernelFilterContext->mangleLambdaSig(RD, Out);
+
+ llvm::erase_if(Decls, [this, &LambdaSig](const CXXRecordDecl *LocalRD) {
+ llvm::SmallString<128> LocalLambdaSig;
+ llvm::raw_svector_ostream LocalOut(LocalLambdaSig);
+ SYCLKernelFilterContext->mangleLambdaSig(LocalRD, LocalOut);
+ return LambdaSig != LocalLambdaSig;
+ });
+}
+
+unsigned ASTContext::GetSYCLKernelNamingIndex(const NamedDecl *ND) {
+ assert(getLangOpts().isSYCL() && "Only valid for SYCL programs");
+ assert(IsSYCLKernelNamingDecl(ND) &&
+ "Lambda not involved in mangling asked for a naming index?");
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(ND)->getCanonicalDecl();
+ const DeclContext *DC = GetNamedParent(RD);
+
+ auto Itr = SYCLKernelNamingTypes.find(DC);
+ assert(Itr != SYCLKernelNamingTypes.end() && "Not a valid DeclContext?");
+
+ const llvm::SmallPtrSet<const CXXRecordDecl *, 4> &Set = Itr->getSecond();
+
+ llvm::SmallVector<const CXXRecordDecl *> Decls{Set.begin(), Set.end()};
+
+ FilterSYCLKernelNamingDecls(RD, Decls);
+
+ llvm::sort(Decls, [](const CXXRecordDecl *LHS, const CXXRecordDecl *RHS) {
+ return LHS->getLambdaManglingNumber() < RHS->getLambdaManglingNumber();
+ });
+
+ return llvm::find(Decls, RD) - Decls.begin();
}
diff --git a/clang/lib/AST/ASTDiagnostic.cpp b/clang/lib/AST/ASTDiagnostic.cpp
index 2bc731717b98..dc22481d0a84 100644
--- a/clang/lib/AST/ASTDiagnostic.cpp
+++ b/clang/lib/AST/ASTDiagnostic.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/TemplateBase.h"
#include "clang/AST/Type.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -1756,7 +1757,7 @@ class TemplateDiff {
if (FromIntType->isBooleanType()) {
OS << ((FromInt == 0) ? "false" : "true");
} else {
- OS << FromInt.toString(10);
+ OS << toString(FromInt, 10);
}
return;
}
@@ -1800,7 +1801,7 @@ class TemplateDiff {
if (IntType->isBooleanType()) {
OS << ((Val == 0) ? "false" : "true");
} else {
- OS << Val.toString(10);
+ OS << toString(Val, 10);
}
} else if (E) {
PrintExpr(E);
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 085c50c0667b..787e02029dae 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -322,6 +322,21 @@ namespace clang {
}
}
+ void updateLookupTableForTemplateParameters(TemplateParameterList &Params,
+ DeclContext *OldDC) {
+ ASTImporterLookupTable *LT = Importer.SharedState->getLookupTable();
+ if (!LT)
+ return;
+
+ for (NamedDecl *TP : Params)
+ LT->update(TP, OldDC);
+ }
+
+ void updateLookupTableForTemplateParameters(TemplateParameterList &Params) {
+ updateLookupTableForTemplateParameters(
+ Params, Importer.getToContext().getTranslationUnitDecl());
+ }
+
public:
explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) {}
@@ -358,6 +373,8 @@ namespace clang {
ExpectedType VisitDecltypeType(const DecltypeType *T);
ExpectedType VisitUnaryTransformType(const UnaryTransformType *T);
ExpectedType VisitAutoType(const AutoType *T);
+ ExpectedType VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T);
ExpectedType VisitInjectedClassNameType(const InjectedClassNameType *T);
// FIXME: DependentDecltypeType
ExpectedType VisitRecordType(const RecordType *T);
@@ -479,6 +496,7 @@ namespace clang {
ExpectedDecl VisitAccessSpecDecl(AccessSpecDecl *D);
ExpectedDecl VisitStaticAssertDecl(StaticAssertDecl *D);
ExpectedDecl VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ ExpectedDecl VisitBindingDecl(BindingDecl *D);
ExpectedDecl VisitNamespaceDecl(NamespaceDecl *D);
ExpectedDecl VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
ExpectedDecl VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
@@ -510,6 +528,8 @@ namespace clang {
ExpectedDecl VisitUsingDecl(UsingDecl *D);
ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D);
ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ ExpectedDecl ImportUsingShadowDecls(BaseUsingDecl *D, BaseUsingDecl *ToSI);
+ ExpectedDecl VisitUsingEnumDecl(UsingEnumDecl *D);
ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
ExpectedDecl VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
ExpectedDecl VisitBuiltinTemplateDecl(BuiltinTemplateDecl *D);
@@ -574,6 +594,7 @@ namespace clang {
// Importing expressions
ExpectedStmt VisitExpr(Expr *E);
+ ExpectedStmt VisitSourceLocExpr(SourceLocExpr *E);
ExpectedStmt VisitVAArgExpr(VAArgExpr *E);
ExpectedStmt VisitChooseExpr(ChooseExpr *E);
ExpectedStmt VisitGNUNullExpr(GNUNullExpr *E);
@@ -1037,6 +1058,10 @@ ExpectedType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
case BuiltinType::Id: \
return Importer.getToContext().Id##Ty;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ return Importer.getToContext().SingletonId;
+#include "clang/Basic/RISCVVTypes.def"
#define SHARED_SINGLETON_TYPE(Expansion)
#define BUILTIN_TYPE(Id, SingletonId) \
case BuiltinType::Id: return Importer.getToContext().SingletonId;
@@ -1371,6 +1396,20 @@ ExpectedType ASTNodeImporter::VisitAutoType(const AutoType *T) {
ToTemplateArgs);
}
+ExpectedType ASTNodeImporter::VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T) {
+ // FIXME: Make sure that the "to" context supports C++17!
+ Expected<TemplateName> ToTemplateNameOrErr = import(T->getTemplateName());
+ if (!ToTemplateNameOrErr)
+ return ToTemplateNameOrErr.takeError();
+ ExpectedType ToDeducedTypeOrErr = import(T->getDeducedType());
+ if (!ToDeducedTypeOrErr)
+ return ToDeducedTypeOrErr.takeError();
+
+ return Importer.getToContext().getDeducedTemplateSpecializationType(
+ *ToTemplateNameOrErr, *ToDeducedTypeOrErr, T->isDependentType());
+}
+
ExpectedType ASTNodeImporter::VisitInjectedClassNameType(
const InjectedClassNameType *T) {
Expected<CXXRecordDecl *> ToDeclOrErr = import(T->getDecl());
@@ -2253,6 +2292,35 @@ ExpectedDecl ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
return ToD;
}
+ExpectedDecl ASTNodeImporter::VisitBindingDecl(BindingDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToND;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToND, Loc))
+ return std::move(Err);
+ if (ToND)
+ return ToND;
+
+ BindingDecl *ToD;
+ if (GetImportedOrCreateDecl(ToD, D, Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo()))
+ return ToD;
+
+ Error Err = Error::success();
+ QualType ToType = importChecked(Err, D->getType());
+ Expr *ToBinding = importChecked(Err, D->getBinding());
+ ValueDecl *ToDecomposedDecl = importChecked(Err, D->getDecomposedDecl());
+ if (Err)
+ return std::move(Err);
+
+ ToD->setBinding(ToType, ToBinding);
+ ToD->setDecomposedDecl(ToDecomposedDecl);
+ addDeclToContexts(D, ToD);
+
+ return ToD;
+}
+
ExpectedDecl ASTNodeImporter::VisitAccessSpecDecl(AccessSpecDecl *D) {
ExpectedSLoc LocOrErr = import(D->getLocation());
if (!LocOrErr)
@@ -2586,6 +2654,8 @@ ASTNodeImporter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
ToAlias->setAccess(D->getAccess());
ToAlias->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToAlias);
+ if (DC != Importer.getToContext().getTranslationUnitDecl())
+ updateLookupTableForTemplateParameters(*ToTemplateParameters);
return ToAlias;
}
@@ -2711,7 +2781,20 @@ ExpectedDecl ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
D2->setBraceRange(ToBraceRange);
D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
- LexicalDC->addDeclInternal(D2);
+ addDeclToContexts(D, D2);
+
+ if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
+ TemplateSpecializationKind SK = MemberInfo->getTemplateSpecializationKind();
+ EnumDecl *FromInst = D->getInstantiatedFromMemberEnum();
+ if (Expected<EnumDecl *> ToInstOrErr = import(FromInst))
+ D2->setInstantiationOfMemberEnum(*ToInstOrErr, SK);
+ else
+ return ToInstOrErr.takeError();
+ if (ExpectedSLoc POIOrErr = import(MemberInfo->getPointOfInstantiation()))
+ D2->getMemberSpecializationInfo()->setPointOfInstantiation(*POIOrErr);
+ else
+ return POIOrErr.takeError();
+ }
// Import the definition
if (D->isCompleteDefinition())
@@ -2848,6 +2931,8 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return CDeclOrErr.takeError();
D2CXX->setLambdaMangling(DCXX->getLambdaManglingNumber(), *CDeclOrErr,
DCXX->hasKnownLambdaInternalLinkage());
+ D2CXX->setDeviceLambdaManglingNumber(
+ DCXX->getDeviceLambdaManglingNumber());
} else if (DCXX->isInjectedClassName()) {
// We have to be careful to do a similar dance to the one in
// Sema::ActOnStartCXXMemberDeclarations
@@ -3421,11 +3506,13 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
} else if (auto *Guide = dyn_cast<CXXDeductionGuideDecl>(D)) {
ExplicitSpecifier ESpec =
importExplicitSpecifier(Err, Guide->getExplicitSpecifier());
+ CXXConstructorDecl *Ctor =
+ importChecked(Err, Guide->getCorrespondingConstructor());
if (Err)
return std::move(Err);
if (GetImportedOrCreateDecl<CXXDeductionGuideDecl>(
ToFunction, D, Importer.getToContext(), DC, ToInnerLocStart, ESpec,
- NameInfo, T, TInfo, ToEndLoc))
+ NameInfo, T, TInfo, ToEndLoc, Ctor))
return ToFunction;
cast<CXXDeductionGuideDecl>(ToFunction)
->setIsCopyDeductionCandidate(Guide->isCopyDeductionCandidate());
@@ -3465,6 +3552,8 @@ ExpectedDecl ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
for (auto *Param : Parameters) {
Param->setOwningFunction(ToFunction);
ToFunction->addDeclInternal(Param);
+ if (ASTImporterLookupTable *LT = Importer.SharedState->getLookupTable())
+ LT->update(Param, Importer.getToContext().getTranslationUnitDecl());
}
ToFunction->setParams(Parameters);
@@ -3625,6 +3714,10 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
auto ToInitializer = importChecked(Err, D->getInClassInitializer());
if (Err)
return std::move(Err);
+ const Type *ToCapturedVLAType = nullptr;
+ if (Error Err = Importer.importInto(
+ ToCapturedVLAType, cast_or_null<Type>(D->getCapturedVLAType())))
+ return std::move(Err);
FieldDecl *ToField;
if (GetImportedOrCreateDecl(ToField, D, Importer.getToContext(), DC,
@@ -3638,6 +3731,8 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
if (ToInitializer)
ToField->setInClassInitializer(ToInitializer);
ToField->setImplicit(D->isImplicit());
+ if (ToCapturedVLAType)
+ ToField->setCapturedVLAType(cast<VariableArrayType>(ToCapturedVLAType));
LexicalDC->addDeclInternal(ToField);
return ToField;
}
@@ -4003,15 +4098,28 @@ ExpectedDecl ASTNodeImporter::VisitVarDecl(VarDecl *D) {
if (Err)
return std::move(Err);
- // Create the imported variable.
VarDecl *ToVar;
- if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC,
- ToInnerLocStart, Loc,
- Name.getAsIdentifierInfo(),
- ToType, ToTypeSourceInfo,
- D->getStorageClass()))
- return ToVar;
+ if (auto *FromDecomp = dyn_cast<DecompositionDecl>(D)) {
+ SmallVector<BindingDecl *> Bindings(FromDecomp->bindings().size());
+ if (Error Err =
+ ImportArrayChecked(FromDecomp->bindings(), Bindings.begin()))
+ return std::move(Err);
+ DecompositionDecl *ToDecomp;
+ if (GetImportedOrCreateDecl(
+ ToDecomp, FromDecomp, Importer.getToContext(), DC, ToInnerLocStart,
+ Loc, ToType, ToTypeSourceInfo, D->getStorageClass(), Bindings))
+ return ToDecomp;
+ ToVar = ToDecomp;
+ } else {
+ // Create the imported variable.
+ if (GetImportedOrCreateDecl(ToVar, D, Importer.getToContext(), DC,
+ ToInnerLocStart, Loc,
+ Name.getAsIdentifierInfo(), ToType,
+ ToTypeSourceInfo, D->getStorageClass()))
+ return ToVar;
+ }
+ ToVar->setTSCSpec(D->getTSCSpec());
ToVar->setQualifierInfo(ToQualifierLoc);
ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
@@ -4517,6 +4625,19 @@ ExpectedDecl ASTNodeImporter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
return ToLinkageSpec;
}
+ExpectedDecl ASTNodeImporter::ImportUsingShadowDecls(BaseUsingDecl *D,
+ BaseUsingDecl *ToSI) {
+ for (UsingShadowDecl *FromShadow : D->shadows()) {
+ if (Expected<UsingShadowDecl *> ToShadowOrErr = import(FromShadow))
+ ToSI->addShadowDecl(*ToShadowOrErr);
+ else
+ // FIXME: We return error here but the definition is already created
+ // and available with lookups. How to fix this?..
+ return ToShadowOrErr.takeError();
+ }
+ return ToSI;
+}
+
ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
DeclContext *DC, *LexicalDC;
DeclarationName Name;
@@ -4556,15 +4677,44 @@ ExpectedDecl ASTNodeImporter::VisitUsingDecl(UsingDecl *D) {
return ToPatternOrErr.takeError();
}
- for (UsingShadowDecl *FromShadow : D->shadows()) {
- if (Expected<UsingShadowDecl *> ToShadowOrErr = import(FromShadow))
- ToUsing->addShadowDecl(*ToShadowOrErr);
+ return ImportUsingShadowDecls(D, ToUsing);
+}
+
+ExpectedDecl ASTNodeImporter::VisitUsingEnumDecl(UsingEnumDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD = nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
+ if (ToD)
+ return ToD;
+
+ Error Err = Error::success();
+ auto ToUsingLoc = importChecked(Err, D->getUsingLoc());
+ auto ToEnumLoc = importChecked(Err, D->getEnumLoc());
+ auto ToEnumDecl = importChecked(Err, D->getEnumDecl());
+ if (Err)
+ return std::move(Err);
+
+ UsingEnumDecl *ToUsingEnum;
+ if (GetImportedOrCreateDecl(ToUsingEnum, D, Importer.getToContext(), DC,
+ ToUsingLoc, ToEnumLoc, Loc, ToEnumDecl))
+ return ToUsingEnum;
+
+ ToUsingEnum->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToUsingEnum);
+
+ if (UsingEnumDecl *FromPattern =
+ Importer.getFromContext().getInstantiatedFromUsingEnumDecl(D)) {
+ if (Expected<UsingEnumDecl *> ToPatternOrErr = import(FromPattern))
+ Importer.getToContext().setInstantiatedFromUsingEnumDecl(ToUsingEnum,
+ *ToPatternOrErr);
else
- // FIXME: We return error here but the definition is already created
- // and available with lookups. How to fix this?..
- return ToShadowOrErr.takeError();
+ return ToPatternOrErr.takeError();
}
- return ToUsing;
+
+ return ImportUsingShadowDecls(D, ToUsingEnum);
}
ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
@@ -4577,9 +4727,9 @@ ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
if (ToD)
return ToD;
- Expected<UsingDecl *> ToUsingOrErr = import(D->getUsingDecl());
- if (!ToUsingOrErr)
- return ToUsingOrErr.takeError();
+ Expected<BaseUsingDecl *> ToIntroducerOrErr = import(D->getIntroducer());
+ if (!ToIntroducerOrErr)
+ return ToIntroducerOrErr.takeError();
Expected<NamedDecl *> ToTargetOrErr = import(D->getTargetDecl());
if (!ToTargetOrErr)
@@ -4587,7 +4737,7 @@ ExpectedDecl ASTNodeImporter::VisitUsingShadowDecl(UsingShadowDecl *D) {
UsingShadowDecl *ToShadow;
if (GetImportedOrCreateDecl(ToShadow, D, Importer.getToContext(), DC, Loc,
- *ToUsingOrErr, *ToTargetOrErr))
+ Name, *ToIntroducerOrErr, *ToTargetOrErr))
return ToShadow;
ToShadow->setLexicalDeclContext(LexicalDC);
@@ -5058,6 +5208,11 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
auto FoundDecls = Importer.findDeclsInToCtx(DC, Name);
for (auto *FoundDecl : FoundDecls) {
if (auto *FoundProp = dyn_cast<ObjCPropertyDecl>(FoundDecl)) {
+ // Instance and class properties can share the same name but are different
+ // declarations.
+ if (FoundProp->isInstanceProperty() != D->isInstanceProperty())
+ continue;
+
// Check property types.
if (!Importer.IsStructurallyEquivalent(D->getType(),
FoundProp->getType())) {
@@ -5415,6 +5570,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
D2->setLexicalDeclContext(LexicalDC);
addDeclToContexts(D, D2);
+ updateLookupTableForTemplateParameters(**TemplateParamsOrErr);
if (FoundByLookup) {
auto *Recent =
@@ -5558,6 +5714,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Add this partial specialization to the class template.
ClassTemplate->AddPartialSpecialization(PartSpec2, InsertPos);
+ updateLookupTableForTemplateParameters(*ToTPList);
} else { // Not a partial specialization.
if (GetImportedOrCreateDecl(
D2, D, Importer.getToContext(), D->getTagKind(), DC,
@@ -5707,6 +5864,8 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateDecl(VarTemplateDecl *D) {
ToVarTD->setAccess(D->getAccess());
ToVarTD->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToVarTD);
+ if (DC != Importer.getToContext().getTranslationUnitDecl())
+ updateLookupTableForTemplateParameters(**TemplateParamsOrErr);
if (FoundByLookup) {
auto *Recent =
@@ -5832,6 +5991,9 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
D2 = ToPartial;
+ // FIXME: Use this update if VarTemplatePartialSpecializationDecl is fixed
+ // to adopt template parameters.
+ // updateLookupTableForTemplateParameters(**ToTPListOrErr);
} else { // Full specialization
if (GetImportedOrCreateDecl(D2, D, Importer.getToContext(), DC,
*BeginLocOrErr, *IdLocOrErr, VarTemplate,
@@ -5920,14 +6082,30 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
auto ParamsOrErr = import(D->getTemplateParameters());
if (!ParamsOrErr)
return ParamsOrErr.takeError();
+ TemplateParameterList *Params = *ParamsOrErr;
FunctionDecl *TemplatedFD;
if (Error Err = importInto(TemplatedFD, D->getTemplatedDecl()))
return std::move(Err);
+ // Template parameters of the ClassTemplateDecl and FunctionTemplateDecl are
+ // shared, if the FunctionTemplateDecl is a deduction guide for the class.
+ // At import the ClassTemplateDecl object is always created first (FIXME: is
+ // this really true?) because the dependency, then the FunctionTemplateDecl.
+ // The DeclContext of the template parameters is changed when the
+ // FunctionTemplateDecl is created, but was set already when the class
+ // template was created. So here it is not the TU (default value) any more.
+ // FIXME: The DeclContext of the parameters is now set finally to the
+ // CXXDeductionGuideDecl object that was imported later. This may not be the
+ // same that is in the original AST, specially if there are multiple deduction
+ // guides.
+ DeclContext *OldParamDC = nullptr;
+ if (Params->size() > 0)
+ OldParamDC = Params->getParam(0)->getDeclContext();
+
FunctionTemplateDecl *ToFunc;
if (GetImportedOrCreateDecl(ToFunc, D, Importer.getToContext(), DC, Loc, Name,
- *ParamsOrErr, TemplatedFD))
+ Params, TemplatedFD))
return ToFunc;
TemplatedFD->setDescribedFunctionTemplate(ToFunc);
@@ -5935,6 +6113,7 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ToFunc->setAccess(D->getAccess());
ToFunc->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(ToFunc);
+ updateLookupTableForTemplateParameters(*Params, OldParamDC);
if (FoundByLookup) {
auto *Recent =
@@ -6476,6 +6655,21 @@ ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
+ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) {
+ Error Err = Error::success();
+ auto BLoc = importChecked(Err, E->getBeginLoc());
+ auto RParenLoc = importChecked(Err, E->getEndLoc());
+ if (Err)
+ return std::move(Err);
+ auto ParentContextOrErr = Importer.ImportContext(E->getParentContext());
+ if (!ParentContextOrErr)
+ return ParentContextOrErr.takeError();
+
+ return new (Importer.getToContext())
+ SourceLocExpr(Importer.getToContext(), E->getIdentKind(), BLoc, RParenLoc,
+ *ParentContextOrErr);
+}
+
ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
Error Err = Error::success();
@@ -8145,28 +8339,37 @@ ASTImporter::Import(ExprWithCleanups::CleanupObject From) {
return make_error<ImportError>(ImportError::UnsupportedConstruct);
}
-Expected<QualType> ASTImporter::Import(QualType FromT) {
- if (FromT.isNull())
- return QualType{};
-
- const Type *FromTy = FromT.getTypePtr();
+Expected<const Type *> ASTImporter::Import(const Type *FromT) {
+ if (!FromT)
+ return FromT;
// Check whether we've already imported this type.
- llvm::DenseMap<const Type *, const Type *>::iterator Pos
- = ImportedTypes.find(FromTy);
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos =
+ ImportedTypes.find(FromT);
if (Pos != ImportedTypes.end())
- return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
+ return Pos->second;
// Import the type
ASTNodeImporter Importer(*this);
- ExpectedType ToTOrErr = Importer.Visit(FromTy);
+ ExpectedType ToTOrErr = Importer.Visit(FromT);
if (!ToTOrErr)
return ToTOrErr.takeError();
// Record the imported type.
- ImportedTypes[FromTy] = (*ToTOrErr).getTypePtr();
+ ImportedTypes[FromT] = ToTOrErr->getTypePtr();
+
+ return ToTOrErr->getTypePtr();
+}
+
+Expected<QualType> ASTImporter::Import(QualType FromT) {
+ if (FromT.isNull())
+ return QualType{};
- return ToContext.getQualifiedType(*ToTOrErr, FromT.getLocalQualifiers());
+ Expected<const Type *> ToTyOrErr = Import(FromT.getTypePtr());
+ if (!ToTyOrErr)
+ return ToTyOrErr.takeError();
+
+ return ToContext.getQualifiedType(*ToTyOrErr, FromT.getLocalQualifiers());
}
Expected<TypeSourceInfo *> ASTImporter::Import(TypeSourceInfo *FromTSI) {
@@ -8311,7 +8514,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// traverse of the 'to' context).
auto PosF = ImportedFromDecls.find(ToD);
if (PosF != ImportedFromDecls.end()) {
- SharedState->removeDeclFromLookup(ToD);
+ // In the case of TypedefNameDecl we create the Decl first and only
+ // then we import and set its DeclContext. So, the DC might not be set
+ // when we reach here.
+ if (ToD->getDeclContext())
+ SharedState->removeDeclFromLookup(ToD);
ImportedFromDecls.erase(PosF);
}
@@ -8837,10 +9044,6 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
assert(ToID.isValid() && "Unexpected invalid fileID was created.");
ImportedFileIDs[FromID] = ToID;
-
- if (FileIDImportHandler)
- FileIDImportHandler(ToID, FromID);
-
return ToID;
}
diff --git a/clang/lib/AST/ASTImporterLookupTable.cpp b/clang/lib/AST/ASTImporterLookupTable.cpp
index e17d6082dcdc..b78cc0c053f6 100644
--- a/clang/lib/AST/ASTImporterLookupTable.cpp
+++ b/clang/lib/AST/ASTImporterLookupTable.cpp
@@ -117,6 +117,19 @@ void ASTImporterLookupTable::remove(NamedDecl *ND) {
remove(ReDC, ND);
}
+void ASTImporterLookupTable::update(NamedDecl *ND, DeclContext *OldDC) {
+ assert(OldDC != ND->getDeclContext() &&
+ "DeclContext should be changed before update");
+ if (contains(ND->getDeclContext(), ND)) {
+ assert(!contains(OldDC, ND) &&
+ "Decl should not be found in the old context if already in the new");
+ return;
+ }
+
+ remove(OldDC, ND);
+ add(ND);
+}
+
ASTImporterLookupTable::LookupResult
ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
auto DCI = LookupTable.find(DC->getPrimaryContext());
@@ -131,6 +144,10 @@ ASTImporterLookupTable::lookup(DeclContext *DC, DeclarationName Name) const {
return NamesI->second;
}
+bool ASTImporterLookupTable::contains(DeclContext *DC, NamedDecl *ND) const {
+ return 0 < lookup(DC, ND->getDeclName()).count(ND);
+}
+
void ASTImporterLookupTable::dump(DeclContext *DC) const {
auto DCI = LookupTable.find(DC->getPrimaryContext());
if (DCI == LookupTable.end())
diff --git a/clang/lib/AST/ASTStructuralEquivalence.cpp b/clang/lib/AST/ASTStructuralEquivalence.cpp
index d004e443ae06..c4ff05ba9325 100644
--- a/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -86,6 +86,7 @@
#include "llvm/ADT/APSInt.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
@@ -1623,7 +1624,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
- << EC1->getDeclName() << EC1->getInitVal().toString(10);
+ << EC1->getDeclName() << toString(EC1->getInitVal(), 10);
Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
}
return false;
@@ -1639,9 +1640,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
- << EC2->getDeclName() << EC2->getInitVal().toString(10);
+ << EC2->getDeclName() << toString(EC2->getInitVal(), 10);
Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
- << EC1->getDeclName() << EC1->getInitVal().toString(10);
+ << EC1->getDeclName() << toString(EC1->getInitVal(), 10);
}
return false;
}
@@ -1653,7 +1654,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
diag::err_odr_tag_type_inconsistent))
<< Context.ToCtx.getTypeDeclType(D2);
Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
- << EC2->getDeclName() << EC2->getInitVal().toString(10);
+ << EC2->getDeclName() << toString(EC2->getInitVal(), 10);
Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
}
return false;
diff --git a/clang/lib/AST/ASTTypeTraits.cpp b/clang/lib/AST/ASTTypeTraits.cpp
index 8f9ceea656a3..4a033bf50bd4 100644
--- a/clang/lib/AST/ASTTypeTraits.cpp
+++ b/clang/lib/AST/ASTTypeTraits.cpp
@@ -140,9 +140,9 @@ ASTNodeKind ASTNodeKind::getFromNode(const OMPClause &C) {
void DynTypedNode::print(llvm::raw_ostream &OS,
const PrintingPolicy &PP) const {
if (const TemplateArgument *TA = get<TemplateArgument>())
- TA->print(PP, OS);
+ TA->print(PP, OS, /*IncludeType*/ true);
else if (const TemplateArgumentLoc *TAL = get<TemplateArgumentLoc>())
- TAL->getArgument().print(PP, OS);
+ TAL->getArgument().print(PP, OS, /*IncludeType*/ true);
else if (const TemplateName *TN = get<TemplateName>())
TN->print(OS, PP);
else if (const NestedNameSpecifier *NNS = get<NestedNameSpecifier>())
diff --git a/clang/lib/AST/AttrImpl.cpp b/clang/lib/AST/AttrImpl.cpp
index 09fdca67995f..662f86722fa3 100644
--- a/clang/lib/AST/AttrImpl.cpp
+++ b/clang/lib/AST/AttrImpl.cpp
@@ -141,57 +141,44 @@ void OMPDeclareTargetDeclAttr::printPrettyPragma(
OS << ' ' << ConvertMapTypeTyToStr(getMapType());
}
-llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
-OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
+llvm::Optional<OMPDeclareTargetDeclAttr *>
+OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) {
if (!VD->hasAttrs())
return llvm::None;
unsigned Level = 0;
- const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
- for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
- if (Level < Attr->getLevel()) {
+ OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
+ for (auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
+ if (Level <= Attr->getLevel()) {
Level = Attr->getLevel();
FoundAttr = Attr;
}
}
if (FoundAttr)
- return FoundAttr->getMapType();
+ return FoundAttr;
+ return llvm::None;
+}
+llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
+OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr.hasValue())
+ return ActiveAttr.getValue()->getMapType();
return llvm::None;
}
llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
- if (!VD->hasAttrs())
- return llvm::None;
- unsigned Level = 0;
- const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
- for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
- if (Level < Attr->getLevel()) {
- Level = Attr->getLevel();
- FoundAttr = Attr;
- }
- }
- if (FoundAttr)
- return FoundAttr->getDevType();
-
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr.hasValue())
+ return ActiveAttr.getValue()->getDevType();
return llvm::None;
}
llvm::Optional<SourceLocation>
OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) {
- if (!VD->hasAttrs())
- return llvm::None;
- unsigned Level = 0;
- const OMPDeclareTargetDeclAttr *FoundAttr = nullptr;
- for (const auto *Attr : VD->specific_attrs<OMPDeclareTargetDeclAttr>()) {
- if (Level < Attr->getLevel()) {
- Level = Attr->getLevel();
- FoundAttr = Attr;
- }
- }
- if (FoundAttr)
- return FoundAttr->getRange().getBegin();
-
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
+ if (ActiveAttr.hasValue())
+ return ActiveAttr.getValue()->getRange().getBegin();
return llvm::None;
}
diff --git a/clang/lib/AST/CXXABI.h b/clang/lib/AST/CXXABI.h
index 31cb36918726..ca9424bcb7a4 100644
--- a/clang/lib/AST/CXXABI.h
+++ b/clang/lib/AST/CXXABI.h
@@ -22,8 +22,9 @@ class ASTContext;
class CXXConstructorDecl;
class DeclaratorDecl;
class Expr;
-class MemberPointerType;
+class MangleContext;
class MangleNumberingContext;
+class MemberPointerType;
/// Implements C++ ABI-specific semantic analysis functions.
class CXXABI {
@@ -75,6 +76,8 @@ public:
/// Creates an instance of a C++ ABI class.
CXXABI *CreateItaniumCXXABI(ASTContext &Ctx);
CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx);
+std::unique_ptr<MangleNumberingContext>
+createItaniumNumberingContext(MangleContext *);
}
#endif
diff --git a/clang/lib/AST/CXXInheritance.cpp b/clang/lib/AST/CXXInheritance.cpp
index c87bcf31d120..9027fa7a7515 100644
--- a/clang/lib/AST/CXXInheritance.cpp
+++ b/clang/lib/AST/CXXInheritance.cpp
@@ -386,9 +386,9 @@ static bool isOrdinaryMember(const NamedDecl *ND) {
static bool findOrdinaryMember(const CXXRecordDecl *RD, CXXBasePath &Path,
DeclarationName Name) {
- Path.Decls = RD->lookup(Name);
- for (NamedDecl *ND : Path.Decls)
- if (isOrdinaryMember(ND))
+ Path.Decls = RD->lookup(Name).begin();
+ for (DeclContext::lookup_iterator I = Path.Decls, E = I.end(); I != E; ++I)
+ if (isOrdinaryMember(*I))
return true;
return false;
@@ -453,9 +453,10 @@ std::vector<const NamedDecl *> CXXRecordDecl::lookupDependentName(
},
Paths, /*LookupInDependent=*/true))
return Results;
- for (const NamedDecl *ND : Paths.front().Decls) {
- if (isOrdinaryMember(ND) && Filter(ND))
- Results.push_back(ND);
+ for (DeclContext::lookup_iterator I = Paths.front().Decls, E = I.end();
+ I != E; ++I) {
+ if (isOrdinaryMember(*I) && Filter(*I))
+ Results.push_back(*I);
}
return Results;
}
diff --git a/clang/lib/AST/ComputeDependence.cpp b/clang/lib/AST/ComputeDependence.cpp
index 4026fdc76fd6..5648cf2103d6 100644
--- a/clang/lib/AST/ComputeDependence.cpp
+++ b/clang/lib/AST/ComputeDependence.cpp
@@ -556,6 +556,10 @@ ExprDependence clang::computeDependence(RecoveryExpr *E) {
return D;
}
+ExprDependence clang::computeDependence(SYCLUniqueStableNameExpr *E) {
+ return toExprDependence(E->getTypeSourceInfo()->getType()->getDependence());
+}
+
ExprDependence clang::computeDependence(PredefinedExpr *E) {
return toExprDependence(E->getType()->getDependence()) &
~ExprDependence::UnexpandedPack;
@@ -744,6 +748,10 @@ ExprDependence clang::computeDependence(CXXDefaultInitExpr *E) {
return E->getExpr()->getDependence();
}
+ExprDependence clang::computeDependence(CXXDefaultArgExpr *E) {
+ return E->getExpr()->getDependence();
+}
+
ExprDependence clang::computeDependence(LambdaExpr *E,
bool ContainsUnexpandedParameterPack) {
auto D = toExprDependence(E->getType()->getDependence());
diff --git a/clang/lib/AST/Decl.cpp b/clang/lib/AST/Decl.cpp
index feb9b0645ebc..8f2ecb7bcf2a 100644
--- a/clang/lib/AST/Decl.cpp
+++ b/clang/lib/AST/Decl.cpp
@@ -41,8 +41,8 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Linkage.h"
#include "clang/Basic/Module.h"
+#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/PartialDiagnostic.h"
-#include "clang/Basic/SanitizerBlacklist.h"
#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
@@ -102,7 +102,7 @@ bool Decl::isOutOfLine() const {
TranslationUnitDecl::TranslationUnitDecl(ASTContext &ctx)
: Decl(TranslationUnit, nullptr, SourceLocation()),
- DeclContext(TranslationUnit), Ctx(ctx) {}
+ DeclContext(TranslationUnit), redeclarable_base(ctx), Ctx(ctx) {}
//===----------------------------------------------------------------------===//
// NamedDecl Implementation
@@ -1078,6 +1078,28 @@ bool NamedDecl::isLinkageValid() const {
return L == getCachedLinkage();
}
+ReservedIdentifierStatus
+NamedDecl::isReserved(const LangOptions &LangOpts) const {
+ const IdentifierInfo *II = getIdentifier();
+
+ // This triggers at least for CXXLiteralIdentifiers, which we already checked
+ // at lexing time.
+ if (!II)
+ return ReservedIdentifierStatus::NotReserved;
+
+ ReservedIdentifierStatus Status = II->isReserved(LangOpts);
+ if (Status == ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope) {
+ // Check if we're at TU level or not.
+ if (isa<ParmVarDecl>(this) || isTemplateParameter())
+ return ReservedIdentifierStatus::NotReserved;
+ const DeclContext *DC = getDeclContext()->getRedeclContext();
+ if (!DC->isTranslationUnit())
+ return ReservedIdentifierStatus::NotReserved;
+ }
+
+ return Status;
+}
+
ObjCStringFormatFamily NamedDecl::getObjCFStringFormattingFamily() const {
StringRef name = getName();
if (name.empty()) return SFF_None;
@@ -1347,6 +1369,7 @@ LinkageInfo LinkageComputer::computeLVForDecl(const NamedDecl *D,
case Decl::NamespaceAlias:
case Decl::ParmVar:
case Decl::Using:
+ case Decl::UsingEnum:
case Decl::UsingShadow:
case Decl::UsingDirective:
return LinkageInfo::none();
@@ -1487,10 +1510,13 @@ LinkageInfo LinkageComputer::getLVForDecl(const NamedDecl *D,
}
LinkageInfo LinkageComputer::getDeclLinkageAndVisibility(const NamedDecl *D) {
- return getLVForDecl(D,
- LVComputationKind(usesTypeVisibility(D)
- ? NamedDecl::VisibilityForType
- : NamedDecl::VisibilityForValue));
+ NamedDecl::ExplicitVisibilityKind EK = usesTypeVisibility(D)
+ ? NamedDecl::VisibilityForType
+ : NamedDecl::VisibilityForValue;
+ LVComputationKind CK(EK);
+ return getLVForDecl(D, D->getASTContext().getLangOpts().IgnoreXCOFFVisibility
+ ? CK.forLinkageOnly()
+ : CK);
}
Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
@@ -1609,8 +1635,7 @@ void NamedDecl::printNestedNameSpecifier(raw_ostream &OS,
// Suppress inline namespace if it doesn't make the result ambiguous.
if (P.SuppressInlineNamespace && Ctx->isInlineNamespace() && NameInScope &&
- Ctx->lookup(NameInScope).size() ==
- Ctx->getParent()->lookup(NameInScope).size())
+ cast<NamespaceDecl>(Ctx)->isRedundantInlineQualifierFor(NameInScope))
continue;
// Skip non-named contexts such as linkage specifications and ExportDecls.
@@ -2509,6 +2534,14 @@ bool VarDecl::isNonEscapingByref() const {
return hasAttr<BlocksAttr>() && !NonParmVarDeclBits.EscapingByref;
}
+bool VarDecl::hasDependentAlignment() const {
+ QualType T = getType();
+ return T->isDependentType() || T->isUndeducedAutoType() ||
+ llvm::any_of(specific_attrs<AlignedAttr>(), [](const AlignedAttr *AA) {
+ return AA->isAlignmentDependent();
+ });
+}
+
VarDecl *VarDecl::getTemplateInstantiationPattern() const {
const VarDecl *VD = this;
@@ -3263,6 +3296,8 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
if (const auto *ABAA = getAttr<ArmBuiltinAliasAttr>()) {
BuiltinID = ABAA->getBuiltinName()->getBuiltinID();
+ } else if (const auto *BAA = getAttr<BuiltinAliasAttr>()) {
+ BuiltinID = BAA->getBuiltinName()->getBuiltinID();
} else if (const auto *A = getAttr<BuiltinAttr>()) {
BuiltinID = A->getID();
}
@@ -3273,7 +3308,7 @@ unsigned FunctionDecl::getBuiltinID(bool ConsiderWrapperFunctions) const {
// If the function is marked "overloadable", it has a different mangled name
// and is not the C library function.
if (!ConsiderWrapperFunctions && hasAttr<OverloadableAttr>() &&
- !hasAttr<ArmBuiltinAliasAttr>())
+ (!hasAttr<ArmBuiltinAliasAttr>() && !hasAttr<BuiltinAliasAttr>()))
return 0;
ASTContext &Context = getASTContext();
@@ -4553,6 +4588,13 @@ RecordDecl::field_iterator RecordDecl::field_begin() const {
void RecordDecl::completeDefinition() {
assert(!isCompleteDefinition() && "Cannot redefine record!");
TagDecl::completeDefinition();
+
+ ASTContext &Ctx = getASTContext();
+
+ // Layouts are dumped when computed, so if we are dumping for all complete
+ // types, we need to force usage to get types that wouldn't be used elsewhere.
+ if (Ctx.getLangOpts().DumpRecordLayoutsComplete)
+ (void)Ctx.getASTRecordLayout(this);
}
/// isMsStruct - Get whether or not this record uses ms_struct layout.
@@ -4594,7 +4636,7 @@ bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
(SanitizerKind::Address | SanitizerKind::KernelAddress);
if (!EnabledAsanMask || !Context.getLangOpts().SanitizeAddressFieldPadding)
return false;
- const auto &Blacklist = Context.getSanitizerBlacklist();
+ const auto &NoSanitizeList = Context.getNoSanitizeList();
const auto *CXXRD = dyn_cast<CXXRecordDecl>(this);
// We may be able to relax some of these requirements.
int ReasonToReject = -1;
@@ -4610,12 +4652,11 @@ bool RecordDecl::mayInsertExtraPadding(bool EmitRemark) const {
ReasonToReject = 4; // has trivial destructor.
else if (CXXRD->isStandardLayout())
ReasonToReject = 5; // is standard layout.
- else if (Blacklist.isBlacklistedLocation(EnabledAsanMask, getLocation(),
+ else if (NoSanitizeList.containsLocation(EnabledAsanMask, getLocation(),
"field-padding"))
ReasonToReject = 6; // is in an excluded file.
- else if (Blacklist.isBlacklistedType(EnabledAsanMask,
- getQualifiedNameAsString(),
- "field-padding"))
+ else if (NoSanitizeList.containsType(
+ EnabledAsanMask, getQualifiedNameAsString(), "field-padding"))
ReasonToReject = 7; // The type is excluded.
if (EmitRemark) {
diff --git a/clang/lib/AST/DeclBase.cpp b/clang/lib/AST/DeclBase.cpp
index c26d6d1a42ea..3467da2b549e 100644
--- a/clang/lib/AST/DeclBase.cpp
+++ b/clang/lib/AST/DeclBase.cpp
@@ -784,6 +784,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case Using:
case UsingPack:
+ case UsingEnum:
return IDNS_Using;
case ObjCProtocol:
@@ -811,6 +812,9 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case TypeAliasTemplate:
return IDNS_Ordinary | IDNS_Tag | IDNS_Type;
+ case UnresolvedUsingIfExists:
+ return IDNS_Type | IDNS_Ordinary;
+
case OMPDeclareReduction:
return IDNS_OMPReduction;
@@ -1215,7 +1219,6 @@ bool DeclContext::Encloses(const DeclContext *DC) const {
DeclContext *DeclContext::getPrimaryContext() {
switch (getDeclKind()) {
- case Decl::TranslationUnit:
case Decl::ExternCContext:
case Decl::LinkageSpec:
case Decl::Export:
@@ -1227,6 +1230,8 @@ DeclContext *DeclContext::getPrimaryContext() {
// There is only one DeclContext for these entities.
return this;
+ case Decl::TranslationUnit:
+ return static_cast<TranslationUnitDecl *>(this)->getFirstDecl();
case Decl::Namespace:
// The original namespace is our primary context.
return static_cast<NamespaceDecl *>(this)->getOriginalNamespace();
@@ -1281,21 +1286,25 @@ DeclContext *DeclContext::getPrimaryContext() {
}
}
-void
-DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts){
- Contexts.clear();
+template <typename T>
+void collectAllContextsImpl(T *Self, SmallVectorImpl<DeclContext *> &Contexts) {
+ for (T *D = Self->getMostRecentDecl(); D; D = D->getPreviousDecl())
+ Contexts.push_back(D);
- if (getDeclKind() != Decl::Namespace) {
- Contexts.push_back(this);
- return;
- }
+ std::reverse(Contexts.begin(), Contexts.end());
+}
- auto *Self = static_cast<NamespaceDecl *>(this);
- for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
- N = N->getPreviousDecl())
- Contexts.push_back(N);
+void DeclContext::collectAllContexts(SmallVectorImpl<DeclContext *> &Contexts) {
+ Contexts.clear();
- std::reverse(Contexts.begin(), Contexts.end());
+ Decl::Kind Kind = getDeclKind();
+
+ if (Kind == Decl::TranslationUnit)
+ collectAllContextsImpl(static_cast<TranslationUnitDecl *>(this), Contexts);
+ else if (Kind == Decl::Namespace)
+ collectAllContextsImpl(static_cast<NamespaceDecl *>(this), Contexts);
+ else
+ Contexts.push_back(this);
}
std::pair<Decl *, Decl *>
@@ -1394,39 +1403,7 @@ ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
DC->reconcileExternalVisibleStorage();
StoredDeclsList &List = (*Map)[Name];
-
- // Clear out any old external visible declarations, to avoid quadratic
- // performance in the redeclaration checks below.
- List.removeExternalDecls();
-
- if (!List.isNull()) {
- // We have both existing declarations and new declarations for this name.
- // Some of the declarations may simply replace existing ones. Handle those
- // first.
- llvm::SmallVector<unsigned, 8> Skip;
- for (unsigned I = 0, N = Decls.size(); I != N; ++I)
- if (List.HandleRedeclaration(Decls[I], /*IsKnownNewer*/false))
- Skip.push_back(I);
- Skip.push_back(Decls.size());
-
- // Add in any new declarations.
- unsigned SkipPos = 0;
- for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- if (I == Skip[SkipPos])
- ++SkipPos;
- else
- List.AddSubsequentDecl(Decls[I]);
- }
- } else {
- // Convert the array to a StoredDeclsList.
- for (auto *D : Decls) {
- if (List.isNull())
- List.setOnlyValue(D);
- else
- List.AddSubsequentDecl(D);
- }
- }
-
+ List.replaceExternalDecls(Decls);
return List.getLookupResult();
}
@@ -1538,10 +1515,7 @@ void DeclContext::removeDecl(Decl *D) {
if (Map) {
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
- // Remove the decl only if it is contained.
- StoredDeclsList::DeclsTy *Vec = Pos->second.getAsVector();
- if ((Vec && is_contained(*Vec, ND)) || Pos->second.getAsDecl() == ND)
- Pos->second.remove(ND);
+ Pos->second.remove(ND);
}
} while (DC->isTransparentContext() && (DC = DC->getParent()));
}
@@ -1658,8 +1632,6 @@ void DeclContext::buildLookupImpl(DeclContext *DCtx, bool Internal) {
}
}
-NamedDecl *const DeclContextLookupResult::SingleElementDummyList = nullptr;
-
DeclContext::lookup_result
DeclContext::lookup(DeclarationName Name) const {
assert(getDeclKind() != Decl::LinkageSpec &&
@@ -1935,23 +1907,11 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
// In this case, we never try to replace an existing declaration; we'll
// handle that when we finalize the list of declarations for this name.
DeclNameEntries.setHasExternalDecls();
- DeclNameEntries.AddSubsequentDecl(D);
- return;
- }
-
- if (DeclNameEntries.isNull()) {
- DeclNameEntries.setOnlyValue(D);
- return;
- }
-
- if (DeclNameEntries.HandleRedeclaration(D, /*IsKnownNewer*/!Internal)) {
- // This declaration has replaced an existing one for which
- // declarationReplaces returns true.
+ DeclNameEntries.prependDeclNoReplace(D);
return;
}
- // Put this declaration into the appropriate slot.
- DeclNameEntries.AddSubsequentDecl(D);
+ DeclNameEntries.addOrReplaceDecl(D);
}
UsingDirectiveDecl *DeclContext::udir_iterator::operator*() const {
diff --git a/clang/lib/AST/DeclCXX.cpp b/clang/lib/AST/DeclCXX.cpp
index 0368ada0b81c..aeee35d9c74f 100644
--- a/clang/lib/AST/DeclCXX.cpp
+++ b/clang/lib/AST/DeclCXX.cpp
@@ -108,7 +108,8 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
ImplicitCopyConstructorCanHaveConstParamForNonVBase(true),
ImplicitCopyAssignmentHasConstParam(true),
HasDeclaredCopyConstructorWithConstParam(false),
- HasDeclaredCopyAssignmentWithConstParam(false), IsLambda(false),
+ HasDeclaredCopyAssignmentWithConstParam(false),
+ IsAnyDestructorNoReturn(false), IsLambda(false),
IsParsingBaseSpecifiers(false), ComputedVisibleConversions(false),
HasODRHash(false), Definition(D) {}
@@ -424,6 +425,9 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (!BaseClassDecl->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
+ if (BaseClassDecl->isAnyDestructorNoReturn())
+ data().IsAnyDestructorNoReturn = true;
+
// C++11 [class.copy]p18:
// The implicitly-declared copy assignment operator for a class X will
// have the form 'X& X::operator=(const X&)' if each direct base class B
@@ -836,6 +840,9 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasTrivialSpecialMembers &= ~SMF_Destructor;
data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
}
+
+ if (DD->isNoReturn())
+ data().IsAnyDestructorNoReturn = true;
}
// Handle member functions.
@@ -1233,6 +1240,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
if (!FieldRec->hasIrrelevantDestructor())
data().HasIrrelevantDestructor = false;
+ if (FieldRec->isAnyDestructorNoReturn())
+ data().IsAnyDestructorNoReturn = true;
if (FieldRec->hasObjectMember())
setHasObjectMember(true);
if (FieldRec->hasVolatileMember())
@@ -1593,6 +1602,20 @@ Decl *CXXRecordDecl::getLambdaContextDecl() const {
return getLambdaData().ContextDecl.get(Source);
}
+void CXXRecordDecl::setDeviceLambdaManglingNumber(unsigned Num) const {
+ assert(isLambda() && "Not a lambda closure type!");
+ if (Num)
+ getASTContext().DeviceLambdaManglingNumbers[this] = Num;
+}
+
+unsigned CXXRecordDecl::getDeviceLambdaManglingNumber() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ auto I = getASTContext().DeviceLambdaManglingNumbers.find(this);
+ if (I != getASTContext().DeviceLambdaManglingNumbers.end())
+ return I->second;
+ return 0;
+}
+
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
QualType T =
cast<CXXConversionDecl>(Conv->getUnderlyingDecl()->getAsFunction())
@@ -1874,29 +1897,6 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
return R.empty() ? nullptr : dyn_cast<CXXDestructorDecl>(R.front());
}
-bool CXXRecordDecl::isAnyDestructorNoReturn() const {
- // Destructor is noreturn.
- if (const CXXDestructorDecl *Destructor = getDestructor())
- if (Destructor->isNoReturn())
- return true;
-
- // Check base classes destructor for noreturn.
- for (const auto &Base : bases())
- if (const CXXRecordDecl *RD = Base.getType()->getAsCXXRecordDecl())
- if (RD->isAnyDestructorNoReturn())
- return true;
-
- // Check fields for noreturn.
- for (const auto *Field : fields())
- if (const CXXRecordDecl *RD =
- Field->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
- if (RD->isAnyDestructorNoReturn())
- return true;
-
- // All destructors are not noreturn.
- return false;
-}
-
static bool isDeclContextInNamespace(const DeclContext *DC) {
while (!DC->isTranslationUnit()) {
if (DC->isNamespace())
@@ -2059,19 +2059,21 @@ ExplicitSpecifier ExplicitSpecifier::getFromDecl(FunctionDecl *Function) {
}
}
-CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
- ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
- ExplicitSpecifier ES, const DeclarationNameInfo &NameInfo, QualType T,
- TypeSourceInfo *TInfo, SourceLocation EndLocation) {
+CXXDeductionGuideDecl *
+CXXDeductionGuideDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, ExplicitSpecifier ES,
+ const DeclarationNameInfo &NameInfo, QualType T,
+ TypeSourceInfo *TInfo, SourceLocation EndLocation,
+ CXXConstructorDecl *Ctor) {
return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, ES, NameInfo, T,
- TInfo, EndLocation);
+ TInfo, EndLocation, Ctor);
}
CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
unsigned ID) {
return new (C, ID) CXXDeductionGuideDecl(
C, nullptr, SourceLocation(), ExplicitSpecifier(), DeclarationNameInfo(),
- QualType(), nullptr, SourceLocation());
+ QualType(), nullptr, SourceLocation(), nullptr);
}
RequiresExprBodyDecl *RequiresExprBodyDecl::Create(
@@ -2218,7 +2220,7 @@ CXXMethodDecl *CXXMethodDecl::getDevirtualizedMethod(const Expr *Base,
// If the base expression (after skipping derived-to-base conversions) is a
// class prvalue, then we can devirtualize.
Base = Base->getBestDynamicClassTypeExpr();
- if (Base->isRValue() && Base->getType()->isRecordType())
+ if (Base->isPRValue() && Base->getType()->isRecordType())
return this;
// If we don't even know what we would call, we can't devirtualize.
@@ -2492,16 +2494,15 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation L, Expr *Init,
SourceLocation R,
SourceLocation EllipsisLoc)
- : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
+ : Initializee(TInfo), Init(Init), MemberOrEllipsisLocation(EllipsisLoc),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
IsWritten(false), SourceOrder(0) {}
-CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
- FieldDecl *Member,
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R)
- : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ : Initializee(Member), Init(Init), MemberOrEllipsisLocation(MemberLoc),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrder(0) {}
@@ -2510,7 +2511,7 @@ CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
SourceLocation MemberLoc,
SourceLocation L, Expr *Init,
SourceLocation R)
- : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ : Initializee(Member), Init(Init), MemberOrEllipsisLocation(MemberLoc),
LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
IsWritten(false), SourceOrder(0) {}
@@ -2587,19 +2588,19 @@ void CXXConstructorDecl::anchor() {}
CXXConstructorDecl *CXXConstructorDecl::CreateDeserialized(ASTContext &C,
unsigned ID,
uint64_t AllocKind) {
- bool hasTraillingExplicit = static_cast<bool>(AllocKind & TAKHasTailExplicit);
+ bool hasTrailingExplicit = static_cast<bool>(AllocKind & TAKHasTailExplicit);
bool isInheritingConstructor =
static_cast<bool>(AllocKind & TAKInheritsConstructor);
unsigned Extra =
additionalSizeToAlloc<InheritedConstructor, ExplicitSpecifier>(
- isInheritingConstructor, hasTraillingExplicit);
+ isInheritingConstructor, hasTrailingExplicit);
auto *Result = new (C, ID, Extra) CXXConstructorDecl(
C, nullptr, SourceLocation(), DeclarationNameInfo(), QualType(), nullptr,
ExplicitSpecifier(), false, false, ConstexprSpecKind::Unspecified,
InheritedConstructor(), nullptr);
Result->setInheritingConstructor(isInheritingConstructor);
Result->CXXConstructorDeclBits.HasTrailingExplicitSpecifier =
- hasTraillingExplicit;
+ hasTrailingExplicit;
Result->setExplicitSpecifier(ExplicitSpecifier());
return Result;
}
@@ -2968,10 +2969,10 @@ APValue *LifetimeExtendedTemporaryDecl::getOrCreateValue(bool MayCreate) const {
void UsingShadowDecl::anchor() {}
UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC,
- SourceLocation Loc, UsingDecl *Using,
- NamedDecl *Target)
- : NamedDecl(K, DC, Loc, Using ? Using->getDeclName() : DeclarationName()),
- redeclarable_base(C), UsingOrNextShadow(cast<NamedDecl>(Using)) {
+ SourceLocation Loc, DeclarationName Name,
+ BaseUsingDecl *Introducer, NamedDecl *Target)
+ : NamedDecl(K, DC, Loc, Name), redeclarable_base(C),
+ UsingOrNextShadow(Introducer) {
if (Target)
setTargetDecl(Target);
setImplicit();
@@ -2986,12 +2987,12 @@ UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) UsingShadowDecl(UsingShadow, C, EmptyShell());
}
-UsingDecl *UsingShadowDecl::getUsingDecl() const {
+BaseUsingDecl *UsingShadowDecl::getIntroducer() const {
const UsingShadowDecl *Shadow = this;
while (const auto *NextShadow =
dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
Shadow = NextShadow;
- return cast<UsingDecl>(Shadow->UsingOrNextShadow);
+ return cast<BaseUsingDecl>(Shadow->UsingOrNextShadow);
}
void ConstructorUsingShadowDecl::anchor() {}
@@ -3010,25 +3011,25 @@ ConstructorUsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
}
CXXRecordDecl *ConstructorUsingShadowDecl::getNominatedBaseClass() const {
- return getUsingDecl()->getQualifier()->getAsRecordDecl();
+ return getIntroducer()->getQualifier()->getAsRecordDecl();
}
-void UsingDecl::anchor() {}
+void BaseUsingDecl::anchor() {}
-void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
+void BaseUsingDecl::addShadowDecl(UsingShadowDecl *S) {
assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
"declaration already in set");
- assert(S->getUsingDecl() == this);
+ assert(S->getIntroducer() == this);
if (FirstUsingShadow.getPointer())
S->UsingOrNextShadow = FirstUsingShadow.getPointer();
FirstUsingShadow.setPointer(S);
}
-void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
+void BaseUsingDecl::removeShadowDecl(UsingShadowDecl *S) {
assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
"declaration not in set");
- assert(S->getUsingDecl() == this);
+ assert(S->getIntroducer() == this);
// Remove S from the shadow decl chain. This is O(n) but hopefully rare.
@@ -3046,6 +3047,8 @@ void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
S->UsingOrNextShadow = this;
}
+void UsingDecl::anchor() {}
+
UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
NestedNameSpecifierLoc QualifierLoc,
const DeclarationNameInfo &NameInfo,
@@ -3065,6 +3068,23 @@ SourceRange UsingDecl::getSourceRange() const {
return SourceRange(Begin, getNameInfo().getEndLoc());
}
+void UsingEnumDecl::anchor() {}
+
+UsingEnumDecl *UsingEnumDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UL, SourceLocation EL,
+ SourceLocation NL, EnumDecl *Enum) {
+ return new (C, DC) UsingEnumDecl(DC, Enum->getDeclName(), UL, EL, NL, Enum);
+}
+
+UsingEnumDecl *UsingEnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID) UsingEnumDecl(nullptr, DeclarationName(), SourceLocation(),
+ SourceLocation(), SourceLocation(), nullptr);
+}
+
+SourceRange UsingEnumDecl::getSourceRange() const {
+ return SourceRange(EnumLocation, getLocation());
+}
+
void UsingPackDecl::anchor() {}
UsingPackDecl *UsingPackDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3135,6 +3155,25 @@ UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
SourceLocation(), nullptr, SourceLocation());
}
+UnresolvedUsingIfExistsDecl *
+UnresolvedUsingIfExistsDecl::Create(ASTContext &Ctx, DeclContext *DC,
+ SourceLocation Loc, DeclarationName Name) {
+ return new (Ctx, DC) UnresolvedUsingIfExistsDecl(DC, Loc, Name);
+}
+
+UnresolvedUsingIfExistsDecl *
+UnresolvedUsingIfExistsDecl::CreateDeserialized(ASTContext &Ctx, unsigned ID) {
+ return new (Ctx, ID)
+ UnresolvedUsingIfExistsDecl(nullptr, SourceLocation(), DeclarationName());
+}
+
+UnresolvedUsingIfExistsDecl::UnresolvedUsingIfExistsDecl(DeclContext *DC,
+ SourceLocation Loc,
+ DeclarationName Name)
+ : NamedDecl(Decl::UnresolvedUsingIfExists, DC, Loc, Name) {}
+
+void UnresolvedUsingIfExistsDecl::anchor() {}
+
void StaticAssertDecl::anchor() {}
StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
@@ -3164,12 +3203,6 @@ BindingDecl *BindingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) BindingDecl(nullptr, SourceLocation(), nullptr);
}
-ValueDecl *BindingDecl::getDecomposedDecl() const {
- ExternalASTSource *Source =
- Decomp.isOffset() ? getASTContext().getExternalSource() : nullptr;
- return cast_or_null<ValueDecl>(Decomp.get(Source));
-}
-
VarDecl *BindingDecl::getHoldingVar() const {
Expr *B = getBinding();
if (!B)
diff --git a/clang/lib/AST/DeclObjC.cpp b/clang/lib/AST/DeclObjC.cpp
index 5f82fcec90e3..6e790f03b027 100644
--- a/clang/lib/AST/DeclObjC.cpp
+++ b/clang/lib/AST/DeclObjC.cpp
@@ -826,7 +826,8 @@ ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
}
bool ObjCMethodDecl::isDirectMethod() const {
- return hasAttr<ObjCDirectAttr>();
+ return hasAttr<ObjCDirectAttr>() &&
+ !getASTContext().getLangOpts().ObjCDisableDirectMethodsForTesting;
}
bool ObjCMethodDecl::isThisDeclarationADesignatedInitializer() const {
@@ -2295,6 +2296,11 @@ QualType ObjCPropertyDecl::getUsageType(QualType objectType) const {
ObjCSubstitutionContext::Property);
}
+bool ObjCPropertyDecl::isDirectProperty() const {
+ return (PropertyAttributes & ObjCPropertyAttribute::kind_direct) &&
+ !getASTContext().getLangOpts().ObjCDisableDirectMethodsForTesting;
+}
+
//===----------------------------------------------------------------------===//
// ObjCPropertyImplDecl
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index ca64f8f6cfbe..4dcf3d0e6ab1 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -98,6 +98,7 @@ namespace {
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingEnumDecl(UsingEnumDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitOMPThreadPrivateDecl(OMPThreadPrivateDecl *D);
void VisitOMPAllocateDecl(OMPAllocateDecl *D);
@@ -110,8 +111,12 @@ namespace {
void printTemplateParameters(const TemplateParameterList *Params,
bool OmitTemplateKW = false);
- void printTemplateArguments(llvm::ArrayRef<TemplateArgument> Args);
- void printTemplateArguments(llvm::ArrayRef<TemplateArgumentLoc> Args);
+ void printTemplateArguments(llvm::ArrayRef<TemplateArgument> Args,
+ const TemplateParameterList *Params,
+ bool TemplOverloaded);
+ void printTemplateArguments(llvm::ArrayRef<TemplateArgumentLoc> Args,
+ const TemplateParameterList *Params,
+ bool TemplOverloaded);
void prettyPrintAttributes(Decl *D);
void prettyPrintPragmas(Decl *D);
void printDeclType(QualType T, StringRef DeclName, bool Pack = false);
@@ -341,7 +346,8 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
SimpleInit = Init;
if (SimpleInit)
- SimpleInit->printPretty(Out, nullptr, Policy, Indentation);
+ SimpleInit->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
else {
for (unsigned I = 0; I != NumArgs; ++I) {
assert(Args[I] != nullptr && "Expected non-null Expr");
@@ -350,7 +356,8 @@ void DeclPrinter::PrintConstructorInitializers(CXXConstructorDecl *CDecl,
if (I)
Out << ", ";
- Args[I]->printPretty(Out, nullptr, Policy, Indentation);
+ Args[I]->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
}
}
}
@@ -568,13 +575,14 @@ void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
}
static void printExplicitSpecifier(ExplicitSpecifier ES, llvm::raw_ostream &Out,
- PrintingPolicy &Policy,
- unsigned Indentation) {
+ PrintingPolicy &Policy, unsigned Indentation,
+ const ASTContext &Context) {
std::string Proto = "explicit";
llvm::raw_string_ostream EOut(Proto);
if (ES.getExpr()) {
EOut << "(";
- ES.getExpr()->printPretty(EOut, nullptr, Policy, Indentation);
+ ES.getExpr()->printPretty(EOut, nullptr, Policy, Indentation, "\n",
+ &Context);
EOut << ")";
}
EOut << " ";
@@ -616,7 +624,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isConsteval()) Out << "consteval ";
ExplicitSpecifier ExplicitSpec = ExplicitSpecifier::getFromDecl(D);
if (ExplicitSpec.isSpecified())
- printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation);
+ printExplicitSpecifier(ExplicitSpec, Out, Policy, Indentation, Context);
}
PrintingPolicy SubPolicy(Policy);
@@ -641,11 +649,16 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
llvm::raw_string_ostream POut(Proto);
DeclPrinter TArgPrinter(POut, SubPolicy, Context, Indentation);
const auto *TArgAsWritten = D->getTemplateSpecializationArgsAsWritten();
+ const TemplateParameterList *TPL = D->getTemplateSpecializationInfo()
+ ->getTemplate()
+ ->getTemplateParameters();
if (TArgAsWritten && !Policy.PrintCanonicalTypes)
- TArgPrinter.printTemplateArguments(TArgAsWritten->arguments());
+ TArgPrinter.printTemplateArguments(TArgAsWritten->arguments(), TPL,
+ /*TemplOverloaded*/ true);
else if (const TemplateArgumentList *TArgs =
D->getTemplateSpecializationArgs())
- TArgPrinter.printTemplateArguments(TArgs->asArray());
+ TArgPrinter.printTemplateArguments(TArgs->asArray(), TPL,
+ /*TemplOverloaded*/ true);
}
QualType Ty = D->getType();
@@ -720,7 +733,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Proto += "(";
llvm::raw_string_ostream EOut(Proto);
FT->getNoexceptExpr()->printPretty(EOut, nullptr, SubPolicy,
- Indentation);
+ Indentation, "\n", &Context);
EOut.flush();
Proto += EOut.str();
Proto += ")";
@@ -744,7 +757,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (Expr *TrailingRequiresClause = D->getTrailingRequiresClause()) {
Out << " requires ";
- TrailingRequiresClause->printPretty(Out, nullptr, SubPolicy, Indentation);
+ TrailingRequiresClause->printPretty(Out, nullptr, SubPolicy, Indentation,
+ "\n", &Context);
}
} else {
Ty.print(Out, Policy, Proto);
@@ -776,7 +790,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
Out << ' ';
if (D->getBody())
- D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation);
+ D->getBody()->printPretty(Out, nullptr, SubPolicy, Indentation, "\n",
+ &Context);
} else {
if (!Policy.TerseOutput && isa<CXXConstructorDecl>(*D))
Out << " {}";
@@ -821,7 +836,8 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
if (D->isBitField()) {
Out << " : ";
- D->getBitWidth()->printPretty(Out, nullptr, Policy, Indentation);
+ D->getBitWidth()->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
}
Expr *Init = D->getInClassInitializer();
@@ -830,7 +846,7 @@ void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
Out << " ";
else
Out << " = ";
- Init->printPretty(Out, nullptr, Policy, Indentation);
+ Init->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
prettyPrintAttributes(D);
}
@@ -895,7 +911,7 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
SubPolicy.IncludeTagDefinition = false;
- Init->printPretty(Out, nullptr, SubPolicy, Indentation);
+ Init->printPretty(Out, nullptr, SubPolicy, Indentation, "\n", &Context);
if ((D->getInitStyle() == VarDecl::CallInit) && !isa<ParenListExpr>(Init))
Out << ")";
}
@@ -909,7 +925,8 @@ void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
Out << "__asm (";
- D->getAsmString()->printPretty(Out, nullptr, Policy, Indentation);
+ D->getAsmString()->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
Out << ")";
}
@@ -920,10 +937,11 @@ void DeclPrinter::VisitImportDecl(ImportDecl *D) {
void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
Out << "static_assert(";
- D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation);
+ D->getAssertExpr()->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
if (StringLiteral *SL = D->getMessage()) {
Out << ", ";
- SL->printPretty(Out, nullptr, Policy, Indentation);
+ SL->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
Out << ")";
}
@@ -980,7 +998,9 @@ void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (const auto *TST =
dyn_cast<TemplateSpecializationType>(TSI->getType()))
Args = TST->template_arguments();
- printTemplateArguments(Args);
+ printTemplateArguments(
+ Args, S->getSpecializedTemplate()->getTemplateParameters(),
+ /*TemplOverloaded*/ false);
}
}
@@ -1072,22 +1092,36 @@ void DeclPrinter::printTemplateParameters(const TemplateParameterList *Params,
Out << ' ';
}
-void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgument> Args) {
+void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgument> Args,
+ const TemplateParameterList *Params,
+ bool TemplOverloaded) {
Out << "<";
for (size_t I = 0, E = Args.size(); I < E; ++I) {
if (I)
Out << ", ";
- Args[I].print(Policy, Out);
+ if (TemplOverloaded || !Params)
+ Args[I].print(Policy, Out, /*IncludeType*/ true);
+ else
+ Args[I].print(
+ Policy, Out,
+ TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
}
Out << ">";
}
-void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgumentLoc> Args) {
+void DeclPrinter::printTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
+ const TemplateParameterList *Params,
+ bool TemplOverloaded) {
Out << "<";
for (size_t I = 0, E = Args.size(); I < E; ++I) {
if (I)
Out << ", ";
- Args[I].getArgument().print(Policy, Out);
+ if (TemplOverloaded)
+ Args[I].getArgument().print(Policy, Out, /*IncludeType*/ true);
+ else
+ Args[I].getArgument().print(
+ Policy, Out,
+ TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
}
Out << ">";
}
@@ -1110,9 +1144,8 @@ void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
Visit(TD);
else if (const auto *Concept = dyn_cast<ConceptDecl>(D)) {
Out << "concept " << Concept->getName() << " = " ;
- Concept->getConstraintExpr()->printPretty(Out, nullptr, Policy,
- Indentation);
- Out << ";";
+ Concept->getConstraintExpr()->printPretty(Out, nullptr, Policy, Indentation,
+ "\n", &Context);
}
}
@@ -1271,7 +1304,8 @@ void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
if (OMD->getBody() && !Policy.TerseOutput) {
Out << ' ';
- OMD->getBody()->printPretty(Out, nullptr, Policy);
+ OMD->getBody()->printPretty(Out, nullptr, Policy, Indentation, "\n",
+ &Context);
}
else if (Policy.PolishForDeclaration)
Out << ';';
@@ -1575,6 +1609,10 @@ void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
Out << *D;
}
+void DeclPrinter::VisitUsingEnumDecl(UsingEnumDecl *D) {
+ Out << "using enum " << D->getEnumDecl();
+}
+
void
DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
Out << "using typename ";
@@ -1651,7 +1689,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
Out << " : ";
D->getType().print(Out, Policy);
Out << " : ";
- D->getCombiner()->printPretty(Out, nullptr, Policy, 0);
+ D->getCombiner()->printPretty(Out, nullptr, Policy, 0, "\n", &Context);
Out << ")";
if (auto *Init = D->getInitializer()) {
Out << " initializer(";
@@ -1665,7 +1703,7 @@ void DeclPrinter::VisitOMPDeclareReductionDecl(OMPDeclareReductionDecl *D) {
case OMPDeclareReductionDecl::CallInit:
break;
}
- Init->printPretty(Out, nullptr, Policy, 0);
+ Init->printPretty(Out, nullptr, Policy, 0, "\n", &Context);
if (D->getInitializerKind() == OMPDeclareReductionDecl::DirectInit)
Out << ")";
Out << ")";
@@ -1693,7 +1731,7 @@ void DeclPrinter::VisitOMPDeclareMapperDecl(OMPDeclareMapperDecl *D) {
}
void DeclPrinter::VisitOMPCapturedExprDecl(OMPCapturedExprDecl *D) {
- D->getInit()->printPretty(Out, nullptr, Policy, Indentation);
+ D->getInit()->printPretty(Out, nullptr, Policy, Indentation, "\n", &Context);
}
void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
@@ -1727,6 +1765,7 @@ void DeclPrinter::VisitNonTypeTemplateParmDecl(
if (NTTP->hasDefaultArgument()) {
Out << " = ";
- NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation);
+ NTTP->getDefaultArgument()->printPretty(Out, nullptr, Policy, Indentation,
+ "\n", &Context);
}
}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 25235c56ec46..ec8b00a9eb7d 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -68,12 +68,16 @@ TemplateParameterList::TemplateParameterList(const ASTContext& C,
if (!IsPack &&
TTP->getTemplateParameters()->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
- } else if (const TypeConstraint *TC =
- cast<TemplateTypeParmDecl>(P)->getTypeConstraint()) {
- if (TC->getImmediatelyDeclaredConstraint()
- ->containsUnexpandedParameterPack())
- ContainsUnexpandedParameterPack = true;
- HasConstrainedParameters = true;
+ } else if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(P)) {
+ if (const TypeConstraint *TC = TTP->getTypeConstraint()) {
+ if (TC->getImmediatelyDeclaredConstraint()
+ ->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+ }
+ if (TTP->hasTypeConstraint())
+ HasConstrainedParameters = true;
+ } else {
+ llvm_unreachable("unexpcted template parameter type");
}
// FIXME: If a default argument contains an unexpanded parameter pack, the
// template parameter list does too.
@@ -86,6 +90,30 @@ TemplateParameterList::TemplateParameterList(const ASTContext& C,
}
}
+bool TemplateParameterList::containsUnexpandedParameterPack() const {
+ if (ContainsUnexpandedParameterPack)
+ return true;
+ if (!HasConstrainedParameters)
+ return false;
+
+ // An implicit constrained parameter might have had a use of an unexpanded
+ // pack added to it after the template parameter list was created. All
+ // implicit parameters are at the end of the parameter list.
+ for (const NamedDecl *Param : llvm::reverse(asArray())) {
+ if (!Param->isImplicit())
+ break;
+
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ const auto *TC = TTP->getTypeConstraint();
+ if (TC && TC->getImmediatelyDeclaredConstraint()
+ ->containsUnexpandedParameterPack())
+ return true;
+ }
+ }
+
+ return false;
+}
+
TemplateParameterList *
TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
@@ -167,6 +195,18 @@ bool TemplateParameterList::hasAssociatedConstraints() const {
return HasRequiresClause || HasConstrainedParameters;
}
+bool TemplateParameterList::shouldIncludeTypeForArgument(
+ const TemplateParameterList *TPL, unsigned Idx) {
+ if (!TPL || Idx >= TPL->size())
+ return true;
+ const NamedDecl *TemplParam = TPL->getParam(Idx);
+ if (const auto *ParamValueDecl =
+ dyn_cast<NonTypeTemplateParmDecl>(TemplParam))
+ if (ParamValueDecl->getType()->getContainedDeducedType())
+ return true;
+ return false;
+}
+
namespace clang {
void *allocateDefaultArgStorageChain(const ASTContext &C) {
@@ -1420,8 +1460,9 @@ void TypeConstraint::print(llvm::raw_ostream &OS, PrintingPolicy Policy) const {
ConceptName.printName(OS, Policy);
if (hasExplicitTemplateArgs()) {
OS << "<";
+ // FIXME: Find corresponding parameter for argument
for (auto &ArgLoc : ArgsAsWritten->arguments())
- ArgLoc.getArgument().print(Policy, OS);
+ ArgLoc.getArgument().print(Policy, OS, /*IncludeType*/ false);
OS << ">";
}
}
diff --git a/clang/lib/AST/DeclarationName.cpp b/clang/lib/AST/DeclarationName.cpp
index ecf676c9936d..56cf4b457a48 100644
--- a/clang/lib/AST/DeclarationName.cpp
+++ b/clang/lib/AST/DeclarationName.cpp
@@ -392,14 +392,13 @@ DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- NamedType.TInfo = nullptr;
+ setNamedTypeLoc(nullptr);
break;
case DeclarationName::CXXOperatorName:
- CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
- CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ setCXXOperatorNameRange(SourceRange());
break;
case DeclarationName::CXXLiteralOperatorName:
- CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding();
+ setCXXLiteralOperatorNameLoc(SourceLocation());
break;
case DeclarationName::ObjCZeroArgSelector:
case DeclarationName::ObjCOneArgSelector:
@@ -426,7 +425,7 @@ bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ if (TypeSourceInfo *TInfo = LocInfo.getNamedTypeInfo())
return TInfo->getType()->containsUnexpandedParameterPack();
return Name.getCXXNameType()->containsUnexpandedParameterPack();
@@ -449,7 +448,7 @@ bool DeclarationNameInfo::isInstantiationDependent() const {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ if (TypeSourceInfo *TInfo = LocInfo.getNamedTypeInfo())
return TInfo->getType()->isInstantiationDependentType();
return Name.getCXXNameType()->isInstantiationDependentType();
@@ -486,7 +485,7 @@ void DeclarationNameInfo::printName(raw_ostream &OS, PrintingPolicy Policy) cons
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) {
+ if (TypeSourceInfo *TInfo = LocInfo.getNamedTypeInfo()) {
if (Name.getNameKind() == DeclarationName::CXXDestructorName)
OS << '~';
else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
@@ -508,20 +507,16 @@ SourceLocation DeclarationNameInfo::getEndLocPrivate() const {
case DeclarationName::CXXDeductionGuideName:
return NameLoc;
- case DeclarationName::CXXOperatorName: {
- unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc;
- return SourceLocation::getFromRawEncoding(raw);
- }
+ case DeclarationName::CXXOperatorName:
+ return LocInfo.getCXXOperatorNameEndLoc();
- case DeclarationName::CXXLiteralOperatorName: {
- unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc;
- return SourceLocation::getFromRawEncoding(raw);
- }
+ case DeclarationName::CXXLiteralOperatorName:
+ return LocInfo.getCXXLiteralOperatorNameLoc();
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ if (TypeSourceInfo *TInfo = LocInfo.getNamedTypeInfo())
return TInfo->getTypeLoc().getEndLoc();
else
return NameLoc;
diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp
index adb33036a168..e8b4aaa2b81e 100644
--- a/clang/lib/AST/Expr.cpp
+++ b/clang/lib/AST/Expr.cpp
@@ -32,6 +32,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cstring>
@@ -111,7 +112,7 @@ const Expr *Expr::skipRValueSubobjectAdjustments(
}
} else if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getOpcode() == BO_PtrMemD) {
- assert(BO->getRHS()->isRValue());
+ assert(BO->getRHS()->isPRValue());
E = BO->getLHS();
const MemberPointerType *MPT =
BO->getRHS()->getType()->getAs<MemberPointerType>();
@@ -489,6 +490,8 @@ DeclRefExpr *DeclRefExpr::CreateEmpty(const ASTContext &Context,
void DeclRefExpr::setDecl(ValueDecl *NewD) {
D = NewD;
+ if (getType()->isUndeducedType())
+ setType(NewD->getType());
setDependence(computeDependence(this, NewD->getASTContext()));
}
@@ -503,6 +506,70 @@ SourceLocation DeclRefExpr::getEndLoc() const {
return getNameInfo().getEndLoc();
}
+SYCLUniqueStableNameExpr::SYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ QualType ResultTy,
+ TypeSourceInfo *TSI)
+ : Expr(SYCLUniqueStableNameExprClass, ResultTy, VK_PRValue, OK_Ordinary),
+ OpLoc(OpLoc), LParen(LParen), RParen(RParen) {
+ setTypeSourceInfo(TSI);
+ setDependence(computeDependence(this));
+}
+
+SYCLUniqueStableNameExpr::SYCLUniqueStableNameExpr(EmptyShell Empty,
+ QualType ResultTy)
+ : Expr(SYCLUniqueStableNameExprClass, ResultTy, VK_PRValue, OK_Ordinary) {}
+
+SYCLUniqueStableNameExpr *
+SYCLUniqueStableNameExpr::Create(const ASTContext &Ctx, SourceLocation OpLoc,
+ SourceLocation LParen, SourceLocation RParen,
+ TypeSourceInfo *TSI) {
+ QualType ResultTy = Ctx.getPointerType(Ctx.CharTy.withConst());
+ return new (Ctx)
+ SYCLUniqueStableNameExpr(OpLoc, LParen, RParen, ResultTy, TSI);
+}
+
+SYCLUniqueStableNameExpr *
+SYCLUniqueStableNameExpr::CreateEmpty(const ASTContext &Ctx) {
+ QualType ResultTy = Ctx.getPointerType(Ctx.CharTy.withConst());
+ return new (Ctx) SYCLUniqueStableNameExpr(EmptyShell(), ResultTy);
+}
+
+std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context) const {
+ return SYCLUniqueStableNameExpr::ComputeName(Context,
+ getTypeSourceInfo()->getType());
+}
+
+std::string SYCLUniqueStableNameExpr::ComputeName(ASTContext &Context,
+ QualType Ty) {
+ auto MangleCallback = [](ASTContext &Ctx,
+ const NamedDecl *ND) -> llvm::Optional<unsigned> {
+ // This replaces the 'lambda number' in the mangling with a unique number
+ // based on its order in the declaration. To provide some level of visual
+ // notability (actual uniqueness from normal lambdas isn't necessary, as
+ // these are used differently), we add 10,000 to the number.
+ // For example:
+ // _ZTSZ3foovEUlvE10005_
+ // Demangles to: typeinfo name for foo()::'lambda10005'()
+ // Note that the mangler subtracts 2, since with normal lambdas the lambda
+ // mangling number '0' is an anonymous struct mangle, and '1' is omitted.
+ // So 10,002 results in the first number being 10,000.
+ if (Ctx.IsSYCLKernelNamingDecl(ND))
+ return 10'002 + Ctx.GetSYCLKernelNamingIndex(ND);
+ return llvm::None;
+ };
+ std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
+ Context, Context.getDiagnostics(), MangleCallback)};
+
+ std::string Buffer;
+ Buffer.reserve(128);
+ llvm::raw_string_ostream Out(Buffer);
+ Ctx->mangleTypeName(Ty, Out);
+
+ return Out.str();
+}
+
PredefinedExpr::PredefinedExpr(SourceLocation L, QualType FNTy, IdentKind IK,
StringLiteral *SL)
: Expr(PredefinedExprClass, FNTy, VK_LValue, OK_Ordinary) {
@@ -705,7 +772,9 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
StringRef Param = Params->getParam(i)->getName();
if (Param.empty()) continue;
TOut << Param << " = ";
- Args.get(i).print(Policy, TOut);
+ Args.get(i).print(
+ Policy, TOut,
+ TemplateParameterList::shouldIncludeTypeForArgument(Params, i));
TOut << ", ";
}
}
@@ -721,7 +790,7 @@ std::string PredefinedExpr::ComputeName(IdentKind IK, const Decl *CurrentDecl) {
StringRef Param = Params->getParam(i)->getName();
if (Param.empty()) continue;
TOut << Param << " = ";
- Args->get(i).print(Policy, TOut);
+ Args->get(i).print(Policy, TOut, /*IncludeType*/ true);
TOut << ", ";
}
}
@@ -811,7 +880,7 @@ void APNumericStorage::setIntValue(const ASTContext &C,
IntegerLiteral::IntegerLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l)
- : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l) {
+ : Expr(IntegerLiteralClass, type, VK_PRValue, OK_Ordinary), Loc(l) {
assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
assert(V.getBitWidth() == C.getIntWidth(type) &&
"Integer type is not the correct size for constant.");
@@ -833,7 +902,7 @@ IntegerLiteral::Create(const ASTContext &C, EmptyShell Empty) {
FixedPointLiteral::FixedPointLiteral(const ASTContext &C, const llvm::APInt &V,
QualType type, SourceLocation l,
unsigned Scale)
- : Expr(FixedPointLiteralClass, type, VK_RValue, OK_Ordinary), Loc(l),
+ : Expr(FixedPointLiteralClass, type, VK_PRValue, OK_Ordinary), Loc(l),
Scale(Scale) {
assert(type->isFixedPointType() && "Illegal type in FixedPointLiteral");
assert(V.getBitWidth() == C.getTypeInfo(type).Width &&
@@ -865,9 +934,79 @@ std::string FixedPointLiteral::getValueAsString(unsigned Radix) const {
return std::string(S.str());
}
+void CharacterLiteral::print(unsigned Val, CharacterKind Kind,
+ raw_ostream &OS) {
+ switch (Kind) {
+ case CharacterLiteral::Ascii:
+ break; // no prefix.
+ case CharacterLiteral::Wide:
+ OS << 'L';
+ break;
+ case CharacterLiteral::UTF8:
+ OS << "u8";
+ break;
+ case CharacterLiteral::UTF16:
+ OS << 'u';
+ break;
+ case CharacterLiteral::UTF32:
+ OS << 'U';
+ break;
+ }
+
+ switch (Val) {
+ case '\\':
+ OS << "'\\\\'";
+ break;
+ case '\'':
+ OS << "'\\''";
+ break;
+ case '\a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ OS << "'\\a'";
+ break;
+ case '\b':
+ OS << "'\\b'";
+ break;
+ // Nonstandard escape sequence.
+ /*case '\e':
+ OS << "'\\e'";
+ break;*/
+ case '\f':
+ OS << "'\\f'";
+ break;
+ case '\n':
+ OS << "'\\n'";
+ break;
+ case '\r':
+ OS << "'\\r'";
+ break;
+ case '\t':
+ OS << "'\\t'";
+ break;
+ case '\v':
+ OS << "'\\v'";
+ break;
+ default:
+ // A character literal might be sign-extended, which
+ // would result in an invalid \U escape sequence.
+ // FIXME: multicharacter literals such as '\xFF\xFF\xFF\xFF'
+ // are not correctly handled.
+ if ((Val & ~0xFFu) == ~0xFFu && Kind == CharacterLiteral::Ascii)
+ Val &= 0xFFu;
+ if (Val < 256 && isPrintable((unsigned char)Val))
+ OS << "'" << (char)Val << "'";
+ else if (Val < 256)
+ OS << "'\\x" << llvm::format("%02x", Val) << "'";
+ else if (Val <= 0xFFFF)
+ OS << "'\\u" << llvm::format("%04x", Val) << "'";
+ else
+ OS << "'\\U" << llvm::format("%08x", Val) << "'";
+ }
+}
+
FloatingLiteral::FloatingLiteral(const ASTContext &C, const llvm::APFloat &V,
bool isexact, QualType Type, SourceLocation L)
- : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary), Loc(L) {
+ : Expr(FloatingLiteralClass, Type, VK_PRValue, OK_Ordinary), Loc(L) {
setSemantics(V.getSemantics());
FloatingLiteralBits.IsExact = isexact;
setValue(C, V);
@@ -1261,7 +1400,7 @@ CallExpr::CallExpr(StmtClass SC, Expr *Fn, ArrayRef<Expr *> PreArgs,
for (unsigned I = Args.size(); I != NumArgs; ++I)
setArg(I, nullptr);
- setDependence(computeDependence(this, PreArgs));
+ this->computeDependence();
CallExprBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
if (hasStoredFPFeatures())
@@ -1391,8 +1530,15 @@ QualType CallExpr::getCallReturnType(const ASTContext &Ctx) const {
if (isa<CXXPseudoDestructorExpr>(Callee->IgnoreParens()))
return Ctx.VoidTy;
+ if (isa<UnresolvedMemberExpr>(Callee->IgnoreParens()))
+ return Ctx.DependentTy;
+
// This should never be overloaded and so should never return null.
CalleeType = Expr::findBoundMemberType(Callee);
+ assert(!CalleeType.isNull());
+ } else if (CalleeType->isDependentType() ||
+ CalleeType->isSpecificPlaceholderType(BuiltinType::Overload)) {
+ return Ctx.DependentTy;
}
const FunctionType *FnType = CalleeType->castAs<FunctionType>();
@@ -1455,7 +1601,7 @@ OffsetOfExpr::OffsetOfExpr(const ASTContext &C, QualType type,
SourceLocation OperatorLoc, TypeSourceInfo *tsi,
ArrayRef<OffsetOfNode> comps, ArrayRef<Expr *> exprs,
SourceLocation RParenLoc)
- : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary),
+ : Expr(OffsetOfExprClass, type, VK_PRValue, OK_Ordinary),
OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
NumComps(comps.size()), NumExprs(exprs.size()) {
for (unsigned i = 0; i != comps.size(); ++i)
@@ -1477,7 +1623,7 @@ IdentifierInfo *OffsetOfNode::getFieldName() const {
UnaryExprOrTypeTraitExpr::UnaryExprOrTypeTraitExpr(
UnaryExprOrTypeTrait ExprKind, Expr *E, QualType resultType,
SourceLocation op, SourceLocation rp)
- : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary),
+ : Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_PRValue, OK_Ordinary),
OpLoc(op), RParenLoc(rp) {
assert(ExprKind <= UETT_Last && "invalid enum value!");
UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
@@ -1578,8 +1724,10 @@ MemberExpr *MemberExpr::CreateEmpty(const ASTContext &Context,
return new (Mem) MemberExpr(EmptyShell());
}
-void MemberExpr::setMemberDecl(ValueDecl *D) {
- MemberDecl = D;
+void MemberExpr::setMemberDecl(ValueDecl *NewD) {
+ MemberDecl = NewD;
+ if (getType()->isUndeducedType())
+ setType(NewD->getType());
setDependence(computeDependence(this));
}
@@ -1663,7 +1811,7 @@ bool CastExpr::CastConsistency() const {
auto Ty = getType();
auto SETy = getSubExpr()->getType();
assert(getValueKindForType(Ty) == Expr::getValueKindForType(SETy));
- if (isRValue() && !Ty->isDependentType() && !SETy->isDependentType()) {
+ if (isPRValue() && !Ty->isDependentType() && !SETy->isDependentType()) {
Ty = Ty->getPointeeType();
SETy = SETy->getPointeeType();
}
@@ -1708,6 +1856,7 @@ bool CastExpr::CastConsistency() const {
case CK_FixedPointCast:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
assert(!getType()->isBooleanType() && "unheralded conversion to bool");
goto CheckNoBasePath;
@@ -1768,6 +1917,7 @@ Expr *CastExpr::getSubExprAsWritten() {
SubExpr =
skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr->IgnoreImplicit())->getArg(0));
else if (E->getCastKind() == CK_UserDefinedConversion) {
+ SubExpr = SubExpr->IgnoreImplicit();
assert((isa<CXXMemberCallExpr>(SubExpr) ||
isa<BlockExpr>(SubExpr)) &&
"Unexpected SubExpr for CK_UserDefinedConversion.");
@@ -2037,7 +2187,7 @@ SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
SourceLocation BLoc, SourceLocation RParenLoc,
DeclContext *ParentContext)
: Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
- VK_RValue, OK_Ordinary),
+ VK_PRValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
SourceLocExprBits.Kind = Kind;
setDependence(ExprDependence::None);
@@ -2105,7 +2255,7 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
InitListExpr::InitListExpr(const ASTContext &C, SourceLocation lbraceloc,
ArrayRef<Expr *> initExprs, SourceLocation rbraceloc)
- : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary),
+ : Expr(InitListExprClass, QualType(), VK_PRValue, OK_Ordinary),
InitExprs(C, initExprs.size()), LBraceLoc(lbraceloc),
RBraceLoc(rbraceloc), AltForm(nullptr, true) {
sawArrayRangeDesignator(false);
@@ -2175,7 +2325,7 @@ bool InitListExpr::isTransparent() const {
// Don't confuse aggregate initialization of a struct X { X &x; }; with a
// transparent struct copy.
- if (!getInit(0)->isRValue() && getType()->isRecordType())
+ if (!getInit(0)->isPRValue() && getType()->isRecordType())
return false;
return getType().getCanonicalType() ==
@@ -2256,7 +2406,7 @@ bool Expr::isReadIfDiscardedInCPlusPlus11() const {
// In C++11, discarded-value expressions of a certain form are special,
// according to [expr]p10:
// The lvalue-to-rvalue conversion (4.1) is applied only if the
- // expression is an lvalue of volatile-qualified type and it has
+ // expression is a glvalue of volatile-qualified type and it has
// one of the following forms:
if (!isGLValue() || !getType().isVolatileQualified())
return false;
@@ -3300,6 +3450,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case SourceLocExprClass:
case ConceptSpecializationExprClass:
case RequiresExprClass:
+ case SYCLUniqueStableNameExprClass:
// These never have a side-effect.
return false;
@@ -3723,8 +3874,7 @@ Expr::isNullPointerConstant(ASTContext &Ctx,
const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
const Expr *E = this;
while (true) {
- assert((E->getValueKind() == VK_LValue &&
- E->getObjectKind() == OK_ObjCProperty) &&
+ assert((E->isLValue() && E->getObjectKind() == OK_ObjCProperty) &&
"expression is not a property reference");
E = E->IgnoreParenCasts();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
@@ -3763,7 +3913,7 @@ FieldDecl *Expr::getSourceBitField() {
while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
if (ICE->getCastKind() == CK_LValueToRValue ||
- (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
+ (ICE->isGLValue() && ICE->getCastKind() == CK_NoOp))
E = ICE->getSubExpr()->IgnoreParens();
else
break;
@@ -3810,8 +3960,7 @@ bool Expr::refersToVectorElement() const {
const Expr *E = this->IgnoreParens();
while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
- if (ICE->getValueKind() != VK_RValue &&
- ICE->getCastKind() == CK_NoOp)
+ if (ICE->isGLValue() && ICE->getCastKind() == CK_NoOp)
E = ICE->getSubExpr()->IgnoreParens();
else
break;
@@ -3860,7 +4009,7 @@ bool Expr::isSameComparisonOperand(const Expr* E1, const Expr* E2) {
// template parameters.
const auto *DRE1 = cast<DeclRefExpr>(E1);
const auto *DRE2 = cast<DeclRefExpr>(E2);
- return DRE1->isRValue() && DRE2->isRValue() &&
+ return DRE1->isPRValue() && DRE2->isPRValue() &&
DRE1->getDecl() == DRE2->getDecl();
}
case ImplicitCastExprClass: {
@@ -4018,7 +4167,7 @@ void ExtVectorElementExpr::getEncodedElementAccess(
ShuffleVectorExpr::ShuffleVectorExpr(const ASTContext &C, ArrayRef<Expr *> args,
QualType Type, SourceLocation BLoc,
SourceLocation RP)
- : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary),
+ : Expr(ShuffleVectorExprClass, Type, VK_PRValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(args.size()) {
SubExprs = new (C) Stmt*[args.size()];
for (unsigned i = 0; i != args.size(); i++)
@@ -4065,7 +4214,7 @@ GenericSelectionExpr::GenericSelectionExpr(
ArrayRef<TypeSourceInfo *> AssocTypes, ArrayRef<Expr *> AssocExprs,
SourceLocation DefaultLoc, SourceLocation RParenLoc,
bool ContainsUnexpandedParameterPack)
- : Expr(GenericSelectionExprClass, Context.DependentTy, VK_RValue,
+ : Expr(GenericSelectionExprClass, Context.DependentTy, VK_PRValue,
OK_Ordinary),
NumAssocs(AssocExprs.size()), ResultIndex(ResultDependentIndex),
DefaultLoc(DefaultLoc), RParenLoc(RParenLoc) {
@@ -4269,7 +4418,7 @@ DesignatedInitUpdateExpr::DesignatedInitUpdateExpr(const ASTContext &C,
SourceLocation lBraceLoc,
Expr *baseExpr,
SourceLocation rBraceLoc)
- : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_RValue,
+ : Expr(DesignatedInitUpdateExprClass, baseExpr->getType(), VK_PRValue,
OK_Ordinary) {
BaseAndUpdaterExprs[0] = baseExpr;
@@ -4291,7 +4440,7 @@ SourceLocation DesignatedInitUpdateExpr::getEndLoc() const {
ParenListExpr::ParenListExpr(SourceLocation LParenLoc, ArrayRef<Expr *> Exprs,
SourceLocation RParenLoc)
- : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary),
+ : Expr(ParenListExprClass, QualType(), VK_PRValue, OK_Ordinary),
LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
ParenListExprBits.NumExprs = Exprs.size();
@@ -4467,7 +4616,7 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
ExprValueKind VK;
if (resultIndex == NoResult) {
type = C.VoidTy;
- VK = VK_RValue;
+ VK = VK_PRValue;
} else {
assert(resultIndex < semantics.size());
type = semantics[resultIndex]->getType();
@@ -4527,7 +4676,7 @@ Stmt::const_child_range UnaryExprOrTypeTraitExpr::children() const {
AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr *> args, QualType t,
AtomicOp op, SourceLocation RP)
- : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary),
+ : Expr(AtomicExprClass, t, VK_PRValue, OK_Ordinary),
NumSubExprs(args.size()), BuiltinLoc(BLoc), RParenLoc(RP), Op(op) {
assert(args.size() == getNumSubExprs(op) && "wrong number of subexpressions");
for (unsigned i = 0; i != args.size(); i++)
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 8dc9d4296e14..c98cfd74dab0 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -187,7 +187,7 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
Expr *Initializer, QualType Ty,
TypeSourceInfo *AllocatedTypeInfo, SourceRange Range,
SourceRange DirectInitRange)
- : Expr(CXXNewExprClass, Ty, VK_RValue, OK_Ordinary),
+ : Expr(CXXNewExprClass, Ty, VK_PRValue, OK_Ordinary),
OperatorNew(OperatorNew), OperatorDelete(OperatorDelete),
AllocatedTypeInfo(AllocatedTypeInfo), Range(Range),
DirectInitRange(DirectInitRange) {
@@ -275,7 +275,8 @@ CXXNewExpr *CXXNewExpr::CreateEmpty(const ASTContext &Ctx, bool IsArray,
}
bool CXXNewExpr::shouldNullCheckAllocation() const {
- return getOperatorNew()
+ return !getOperatorNew()->hasAttr<ReturnsNonNullAttr>() &&
+ getOperatorNew()
->getType()
->castAs<FunctionProtoType>()
->isNothrow() &&
@@ -321,7 +322,7 @@ CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(
SourceLocation OperatorLoc, NestedNameSpecifierLoc QualifierLoc,
TypeSourceInfo *ScopeType, SourceLocation ColonColonLoc,
SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType)
- : Expr(CXXPseudoDestructorExprClass, Context.BoundMemberTy, VK_RValue,
+ : Expr(CXXPseudoDestructorExprClass, Context.BoundMemberTy, VK_PRValue,
OK_Ordinary),
Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
@@ -670,6 +671,7 @@ CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
// FIXME: Will eventually need to cope with member pointers.
+ // NOTE: Update makeTailCallIfSwiftAsync on fixing this.
return nullptr;
}
@@ -951,9 +953,9 @@ CXXDefaultInitExpr::CXXDefaultInitExpr(const ASTContext &Ctx,
SourceLocation Loc, FieldDecl *Field,
QualType Ty, DeclContext *UsedContext)
: Expr(CXXDefaultInitExprClass, Ty.getNonLValueExprType(Ctx),
- Ty->isLValueReferenceType()
- ? VK_LValue
- : Ty->isRValueReferenceType() ? VK_XValue : VK_RValue,
+ Ty->isLValueReferenceType() ? VK_LValue
+ : Ty->isRValueReferenceType() ? VK_XValue
+ : VK_PRValue,
/*FIXME*/ OK_Ordinary),
Field(Field), UsedContext(UsedContext) {
CXXDefaultInitExprBits.Loc = Loc;
@@ -1057,7 +1059,7 @@ CXXConstructExpr::CXXConstructExpr(
bool ListInitialization, bool StdInitListInitialization,
bool ZeroInitialization, ConstructionKind ConstructKind,
SourceRange ParenOrBraceRange)
- : Expr(SC, Ty, VK_RValue, OK_Ordinary), Constructor(Ctor),
+ : Expr(SC, Ty, VK_PRValue, OK_Ordinary), Constructor(Ctor),
ParenOrBraceRange(ParenOrBraceRange), NumArgs(Args.size()) {
CXXConstructExprBits.Elidable = Elidable;
CXXConstructExprBits.HadMultipleCandidates = HadMultipleCandidates;
@@ -1125,7 +1127,7 @@ LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
SourceLocation ClosingBrace,
bool ContainsUnexpandedParameterPack)
- : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary),
+ : Expr(LambdaExprClass, T, VK_PRValue, OK_Ordinary),
IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
ClosingBrace(ClosingBrace) {
LambdaExprBits.NumCaptures = CaptureInits.size();
@@ -1326,10 +1328,9 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(QualType T,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc)
: Expr(CXXUnresolvedConstructExprClass, T,
- (TSI->getType()->isLValueReferenceType()
- ? VK_LValue
- : TSI->getType()->isRValueReferenceType() ? VK_XValue
- : VK_RValue),
+ (TSI->getType()->isLValueReferenceType() ? VK_LValue
+ : TSI->getType()->isRValueReferenceType() ? VK_XValue
+ : VK_PRValue),
OK_Ordinary),
TSI(TSI), LParenLoc(LParenLoc), RParenLoc(RParenLoc) {
CXXUnresolvedConstructExprBits.NumArgs = Args.size();
@@ -1668,7 +1669,7 @@ bool MaterializeTemporaryExpr::isUsableInConstantExpressions(
TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc, bool Value)
- : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary), Loc(Loc),
+ : Expr(TypeTraitExprClass, T, VK_PRValue, OK_Ordinary), Loc(Loc),
RParenLoc(RParenLoc) {
assert(Kind <= TT_Last && "invalid enum value!");
TypeTraitExprBits.Kind = Kind;
diff --git a/clang/lib/AST/ExprClassification.cpp b/clang/lib/AST/ExprClassification.cpp
index 0286c730ce4e..6998e28fd2ea 100644
--- a/clang/lib/AST/ExprClassification.cpp
+++ b/clang/lib/AST/ExprClassification.cpp
@@ -53,8 +53,12 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
// Enable this assertion for testing.
switch (kind) {
- case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
- case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
+ case Cl::CL_LValue:
+ assert(isLValue());
+ break;
+ case Cl::CL_XValue:
+ assert(isXValue());
+ break;
case Cl::CL_Function:
case Cl::CL_Void:
case Cl::CL_AddressableVoid:
@@ -64,7 +68,9 @@ Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
case Cl::CL_ClassTemporary:
case Cl::CL_ArrayTemporary:
case Cl::CL_ObjCMessageRValue:
- case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
+ case Cl::CL_PRValue:
+ assert(isPRValue());
+ break;
}
Cl::ModifiableType modifiable = Cl::CM_Untested;
@@ -89,7 +95,7 @@ static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
const Expr *E,
ExprValueKind Kind) {
switch (Kind) {
- case VK_RValue:
+ case VK_PRValue:
return Lang.CPlusPlus ? ClassifyTemporary(E->getType()) : Cl::CL_PRValue;
case VK_LValue:
return Cl::CL_LValue;
@@ -424,7 +430,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
// contains only one element. In that case, we look at that element
// for an exact classification. Init list creation takes care of the
// value kind for us, so we only need to fine-tune.
- if (E->isRValue())
+ if (E->isPRValue())
return ClassifyExprValueKind(Lang, E, E->getValueKind());
assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
"Only 1-element init lists can be glvalues.");
@@ -433,6 +439,9 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::CoawaitExprClass:
case Expr::CoyieldExprClass:
return ClassifyInternal(Ctx, cast<CoroutineSuspendExpr>(E)->getResumeExpr());
+ case Expr::SYCLUniqueStableNameExprClass:
+ return Cl::CL_PRValue;
+ break;
}
llvm_unreachable("unhandled expression kind in classification");
diff --git a/clang/lib/AST/ExprConcepts.cpp b/clang/lib/AST/ExprConcepts.cpp
index d00d8329095c..8cb8625e2a1a 100644
--- a/clang/lib/AST/ExprConcepts.cpp
+++ b/clang/lib/AST/ExprConcepts.cpp
@@ -37,7 +37,7 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
const ASTTemplateArgumentListInfo *ArgsAsWritten,
ArrayRef<TemplateArgument> ConvertedArgs,
const ConstraintSatisfaction *Satisfaction)
- : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
ConceptReference(NNS, TemplateKWLoc, ConceptNameInfo, FoundDecl,
NamedConcept, ArgsAsWritten),
NumTemplateArgs(ConvertedArgs.size()),
@@ -91,7 +91,7 @@ ConceptSpecializationExpr::ConceptSpecializationExpr(
ArrayRef<TemplateArgument> ConvertedArgs,
const ConstraintSatisfaction *Satisfaction, bool Dependent,
bool ContainsUnexpandedParameterPack)
- : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ : Expr(ConceptSpecializationExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
ConceptReference(NestedNameSpecifierLoc(), SourceLocation(),
DeclarationNameInfo(), NamedConcept, NamedConcept,
nullptr),
@@ -146,7 +146,7 @@ RequiresExpr::RequiresExpr(ASTContext &C, SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation RBraceLoc)
- : Expr(RequiresExprClass, C.BoolTy, VK_RValue, OK_Ordinary),
+ : Expr(RequiresExprClass, C.BoolTy, VK_PRValue, OK_Ordinary),
NumLocalParameters(LocalParameters.size()),
NumRequirements(Requirements.size()), Body(Body), RBraceLoc(RBraceLoc) {
RequiresExprBits.IsSatisfied = false;
diff --git a/clang/lib/AST/ExprConstant.cpp b/clang/lib/AST/ExprConstant.cpp
index 56181bbe1166..01c0168d61a4 100644
--- a/clang/lib/AST/ExprConstant.cpp
+++ b/clang/lib/AST/ExprConstant.cpp
@@ -101,15 +101,18 @@ namespace {
/// Given an expression, determine the type used to store the result of
/// evaluating that expression.
static QualType getStorageType(const ASTContext &Ctx, const Expr *E) {
- if (E->isRValue())
+ if (E->isPRValue())
return E->getType();
return Ctx.getLValueReferenceType(E->getType());
}
/// Given a CallExpr, try to get the alloc_size attribute. May return null.
static const AllocSizeAttr *getAllocSizeAttr(const CallExpr *CE) {
- const FunctionDecl *Callee = CE->getDirectCallee();
- return Callee ? Callee->getAttr<AllocSizeAttr>() : nullptr;
+ if (const FunctionDecl *DirectCallee = CE->getDirectCallee())
+ return DirectCallee->getAttr<AllocSizeAttr>();
+ if (const Decl *IndirectCallee = CE->getCalleeDecl())
+ return IndirectCallee->getAttr<AllocSizeAttr>();
+ return nullptr;
}
/// Attempts to unwrap a CallExpr (with an alloc_size attribute) from an Expr.
@@ -926,6 +929,9 @@ namespace {
/// Whether we're checking for an expression that has undefined behavior.
/// If so, we will produce warnings if we encounter an operation that is
/// always undefined.
+ ///
+ /// Note that we still need to evaluate the expression normally when this
+ /// is set; this is used when evaluating ICEs in C.
bool CheckingForUndefinedBehavior = false;
enum EvaluationMode {
@@ -2294,7 +2300,7 @@ static bool CheckMemberPointerConstantExpression(EvalInfo &Info,
/// produce an appropriate diagnostic.
static bool CheckLiteralType(EvalInfo &Info, const Expr *E,
const LValue *This = nullptr) {
- if (!E->isRValue() || E->getType()->isLiteralType(Info.Ctx))
+ if (!E->isPRValue() || E->getType()->isLiteralType(Info.Ctx))
return true;
// C++1y: A constant initializer for an object o [...] may also invoke
@@ -2499,7 +2505,7 @@ static bool HandleConversionToBool(const APValue &Val, bool &Result) {
static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
+ assert(E->isPRValue() && "missing lvalue-to-rvalue conv in bool condition");
APValue Val;
if (!Evaluate(Val, Info, E))
return false;
@@ -2714,9 +2720,8 @@ static bool CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
if (Info.checkingForUndefinedBehavior())
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_integer_constant_overflow)
- << Result.toString(10) << E->getType();
- else
- return HandleOverflow(Info, E, Value, E->getType());
+ << toString(Result, 10) << E->getType();
+ return HandleOverflow(Info, E, Value, E->getType());
}
return true;
}
@@ -3497,8 +3502,8 @@ static bool diagnoseMutableFields(EvalInfo &Info, const Expr *E, AccessKinds AK,
static bool lifetimeStartedInEvaluation(EvalInfo &Info,
APValue::LValueBase Base,
bool MutableSubobject = false) {
- // A temporary we created.
- if (Base.getCallIndex())
+ // A temporary or transient heap allocation we created.
+ if (Base.getCallIndex() || Base.is<DynamicAllocLValue>())
return true;
switch (Info.IsEvaluatingDecl) {
@@ -4563,7 +4568,7 @@ static bool handleIncDec(EvalInfo &Info, const Expr *E, const LValue &LVal,
/// Build an lvalue for the object argument of a member function call.
static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
LValue &This) {
- if (Object->getType()->isPointerType() && Object->isRValue())
+ if (Object->getType()->isPointerType() && Object->isPRValue())
return EvaluatePointer(Object, This, Info);
if (Object->isGLValue())
@@ -6363,7 +6368,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
// Invent an expression for location purposes.
// FIXME: We shouldn't need to do this.
- OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_RValue);
+ OpaqueValueExpr LocE(CallLoc, Info.Ctx.IntTy, VK_PRValue);
// For arrays, destroy elements right-to-left.
if (const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(T)) {
@@ -6710,9 +6715,12 @@ bool HandleOperatorDeleteCall(EvalInfo &Info, const CallExpr *E) {
if (Pointer.Designator.Invalid)
return false;
- // Deleting a null pointer has no effect.
- if (Pointer.isNullPointer())
+ // Deleting a null pointer would have no effect, but it's not permitted by
+ // std::allocator<T>::deallocate's contract.
+ if (Pointer.isNullPointer()) {
+ Info.CCEDiag(E->getExprLoc(), diag::note_constexpr_deallocate_null);
return true;
+ }
if (!CheckDeleteKind(Info, E, Pointer, DynAlloc::StdAllocator))
return false;
@@ -6956,7 +6964,7 @@ class BufferToAPValueConverter {
llvm::NoneType unrepresentableValue(QualType Ty, const APSInt &Val) {
Info.FFDiag(BCE->getBeginLoc(),
diag::note_constexpr_bit_cast_unrepresentable_value)
- << Ty << Val.toString(/*Radix=*/10);
+ << Ty << toString(Val, /*Radix=*/10);
return None;
}
@@ -7846,8 +7854,8 @@ public:
bool VisitStmtExpr(const StmtExpr *E) {
// We will have checked the full-expressions inside the statement expression
// when they were completed, and don't need to check them again now.
- if (Info.checkingForUndefinedBehavior())
- return Error(E);
+ llvm::SaveAndRestore<bool> NotCheckingForUB(
+ Info.CheckingForUndefinedBehavior, false);
const CompoundStmt *CS = E->getSubStmt();
if (CS->body_empty())
@@ -7940,7 +7948,7 @@ public:
if (E->isArrow()) {
EvalOK = evaluatePointer(E->getBase(), Result);
BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
- } else if (E->getBase()->isRValue()) {
+ } else if (E->getBase()->isPRValue()) {
assert(E->getBase()->getType()->isRecordType());
EvalOK = EvaluateTemporary(E->getBase(), Result, this->Info);
BaseTy = E->getBase()->getType();
@@ -8661,6 +8669,26 @@ public:
return true;
}
+ bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E) {
+ std::string ResultStr = E->ComputeName(Info.Ctx);
+
+ Info.Ctx.SYCLUniqueStableNameEvaluatedValues[E] = ResultStr;
+
+ QualType CharTy = Info.Ctx.CharTy.withConst();
+ APInt Size(Info.Ctx.getTypeSize(Info.Ctx.getSizeType()),
+ ResultStr.size() + 1);
+ QualType ArrayTy = Info.Ctx.getConstantArrayType(CharTy, Size, nullptr,
+ ArrayType::Normal, 0);
+
+ StringLiteral *SL =
+ StringLiteral::Create(Info.Ctx, ResultStr, StringLiteral::Ascii,
+ /*Pascal*/ false, ArrayTy, E->getLocation());
+
+ evaluateLValue(SL, Result);
+ Result.addArray(Info, E, cast<ConstantArrayType>(ArrayTy));
+ return true;
+ }
+
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
@@ -8668,7 +8696,7 @@ public:
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
@@ -9237,7 +9265,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
llvm::APInt::udivrem(OrigN, TSize, N, Remainder);
if (Remainder) {
Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
- << Move << WChar << 0 << T << OrigN.toString(10, /*Signed*/false)
+ << Move << WChar << 0 << T << toString(OrigN, 10, /*Signed*/false)
<< (unsigned)TSize;
return false;
}
@@ -9251,7 +9279,7 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (N.ugt(RemainingSrcSize) || N.ugt(RemainingDestSize)) {
Info.FFDiag(E, diag::note_constexpr_memcpy_unsupported)
<< Move << WChar << (N.ugt(RemainingSrcSize) ? 1 : 2) << T
- << N.toString(10, /*Signed*/false);
+ << toString(N, 10, /*Signed*/false);
return false;
}
uint64_t NElems = N.getZExtValue();
@@ -9426,8 +9454,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
return ZeroInitialization(E);
Info.FFDiag(*ArraySize, diag::note_constexpr_new_too_small)
- << AllocBound.toString(10, /*Signed=*/false)
- << InitBound.toString(10, /*Signed=*/false)
+ << toString(AllocBound, 10, /*Signed=*/false)
+ << toString(InitBound, 10, /*Signed=*/false)
<< (*ArraySize)->getSourceRange();
return false;
}
@@ -9556,7 +9584,7 @@ public:
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isMemberPointerType());
+ assert(E->isPRValue() && E->getType()->isMemberPointerType());
return MemberPointerExprEvaluator(Info, Result).Visit(E);
}
@@ -9798,7 +9826,14 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
isa<CXXDefaultInitExpr>(InitExpr));
- return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
+ if (EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr)) {
+ if (Field->isBitField())
+ return truncateBitfieldValue(Info, InitExpr, Result.getUnionValue(),
+ Field);
+ return true;
+ }
+
+ return false;
}
if (!Result.hasValue())
@@ -10009,6 +10044,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
auto *CaptureInitIt = E->capture_init_begin();
const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
bool Success = true;
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(ClosureClass);
for (const auto *Field : ClosureClass->fields()) {
assert(CaptureInitIt != E->capture_init_end());
// Get the initializer for this field
@@ -10019,8 +10055,13 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
if (!CurFieldInit)
return Error(E);
+ LValue Subobject = This;
+
+ if (!HandleLValueMember(Info, E, Subobject, Field, &Layout))
+ return false;
+
APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
- if (!EvaluateInPlace(FieldVal, Info, This, CurFieldInit)) {
+ if (!EvaluateInPlace(FieldVal, Info, Subobject, CurFieldInit)) {
if (!Info.keepEvaluatingAfterFailure())
return false;
Success = false;
@@ -10033,7 +10074,7 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
static bool EvaluateRecord(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isRecordType() &&
+ assert(E->isPRValue() && E->getType()->isRecordType() &&
"can't evaluate expression as a record rvalue");
return RecordExprEvaluator(Info, This, Result).Visit(E);
}
@@ -10089,7 +10130,7 @@ public:
/// Evaluate an expression of record type as a temporary.
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isRecordType());
+ assert(E->isPRValue() && E->getType()->isRecordType());
return TemporaryExprEvaluator(Info, Result).Visit(E);
}
@@ -10131,7 +10172,8 @@ namespace {
} // end anonymous namespace
static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
- assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
+ assert(E->isPRValue() && E->getType()->isVectorType() &&
+ "not a vector prvalue");
return VectorExprEvaluator(Info, Result).Visit(E);
}
@@ -10193,7 +10235,7 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
else
Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
- Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
+ Elts.push_back(APValue(APSInt(Elt, !EltTy->isSignedIntegerType())));
}
} else {
return Error(E);
@@ -10289,10 +10331,10 @@ bool VectorExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
"Must both be vector types");
// Checking JUST the types are the same would be fine, except shifts don't
// need to have their types be the same (since you always shift by an int).
- assert(LHS->getType()->getAs<VectorType>()->getNumElements() ==
- E->getType()->getAs<VectorType>()->getNumElements() &&
- RHS->getType()->getAs<VectorType>()->getNumElements() ==
- E->getType()->getAs<VectorType>()->getNumElements() &&
+ assert(LHS->getType()->castAs<VectorType>()->getNumElements() ==
+ E->getType()->castAs<VectorType>()->getNumElements() &&
+ RHS->getType()->castAs<VectorType>()->getNumElements() ==
+ E->getType()->castAs<VectorType>()->getNumElements() &&
"All operands must be the same size.");
APValue LHSValue;
@@ -10346,7 +10388,8 @@ namespace {
Result = APValue(APValue::UninitArray(), 0,
CAT->getSize().getZExtValue());
- if (!Result.hasArrayFiller()) return true;
+ if (!Result.hasArrayFiller())
+ return true;
// Zero-initialize all elements.
LValue Subobject = This;
@@ -10376,7 +10419,8 @@ namespace {
static bool EvaluateArray(const Expr *E, const LValue &This,
APValue &Result, EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
+ assert(E->isPRValue() && E->getType()->isArrayType() &&
+ "not an array prvalue");
return ArrayExprEvaluator(Info, This, Result).Visit(E);
}
@@ -10384,8 +10428,8 @@ static bool EvaluateArrayNewInitList(EvalInfo &Info, LValue &This,
APValue &Result, const InitListExpr *ILE,
QualType AllocType) {
assert(!ILE->isValueDependent());
- assert(ILE->isRValue() && ILE->getType()->isArrayType() &&
- "not an array rvalue");
+ assert(ILE->isPRValue() && ILE->getType()->isArrayType() &&
+ "not an array prvalue");
return ArrayExprEvaluator(Info, This, Result)
.VisitInitListExpr(ILE, AllocType);
}
@@ -10395,8 +10439,8 @@ static bool EvaluateArrayNewConstructExpr(EvalInfo &Info, LValue &This,
const CXXConstructExpr *CCE,
QualType AllocType) {
assert(!CCE->isValueDependent());
- assert(CCE->isRValue() && CCE->getType()->isArrayType() &&
- "not an array rvalue");
+ assert(CCE->isPRValue() && CCE->getType()->isArrayType() &&
+ "not an array prvalue");
return ArrayExprEvaluator(Info, This, Result)
.VisitCXXConstructExpr(CCE, This, &Result, AllocType);
}
@@ -10773,7 +10817,7 @@ class FixedPointExprEvaluator
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
+ assert(E->isPRValue() && E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(E);
}
@@ -10971,6 +11015,8 @@ EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts) {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
return GCCTypeClass::None;
case BuiltinType::Dependent:
@@ -11140,7 +11186,7 @@ static QualType getObjectType(APValue::LValueBase B) {
///
/// Always returns an RValue with a pointer representation.
static const Expr *ignorePointerCastsAndParens(const Expr *E) {
- assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ assert(E->isPRValue() && E->getType()->hasPointerRepresentation());
auto *NoParens = E->IgnoreParens();
auto *Cast = dyn_cast<CastExpr>(NoParens);
@@ -11155,7 +11201,7 @@ static const Expr *ignorePointerCastsAndParens(const Expr *E) {
return NoParens;
auto *SubExpr = Cast->getSubExpr();
- if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isRValue())
+ if (!SubExpr->getType()->hasPointerRepresentation() || !SubExpr->isPRValue())
return NoParens;
return ignorePointerCastsAndParens(SubExpr);
}
@@ -11990,9 +12036,6 @@ bool IntExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
Success(0, E) : Error(E);
}
- case Builtin::BIomp_is_initial_device:
- // We can decide statically which value the runtime would return if called.
- return Success(Info.getLangOpts().OpenMPIsDevice ? 0 : 1, E);
case Builtin::BI__builtin_add_overflow:
case Builtin::BI__builtin_sub_overflow:
case Builtin::BI__builtin_mul_overflow:
@@ -12193,7 +12236,7 @@ public:
/// with integral or enumeration type.
static bool shouldEnqueue(const BinaryOperator *E) {
return E->getOpcode() == BO_Comma || E->isLogicalOp() ||
- (E->isRValue() && E->getType()->isIntegralOrEnumerationType() &&
+ (E->isPRValue() && E->getType()->isIntegralOrEnumerationType() &&
E->getLHS()->getType()->isIntegralOrEnumerationType() &&
E->getRHS()->getType()->isIntegralOrEnumerationType());
}
@@ -12452,25 +12495,6 @@ void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
}
namespace {
-/// Used when we determine that we should fail, but can keep evaluating prior to
-/// noting that we had a failure.
-class DelayedNoteFailureRAII {
- EvalInfo &Info;
- bool NoteFailure;
-
-public:
- DelayedNoteFailureRAII(EvalInfo &Info, bool NoteFailure = true)
- : Info(Info), NoteFailure(NoteFailure) {}
- ~DelayedNoteFailureRAII() {
- if (NoteFailure) {
- bool ContinueAfterFailure = Info.noteFailure();
- (void)ContinueAfterFailure;
- assert(ContinueAfterFailure &&
- "Shouldn't have kept evaluating on failure.");
- }
- }
-};
-
enum class CmpResult {
Unequal,
Less,
@@ -12838,12 +12862,14 @@ bool RecordExprEvaluator::VisitBinCmp(const BinaryOperator *E) {
}
bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- // We don't call noteFailure immediately because the assignment happens after
- // we evaluate LHS and RHS.
- if (!Info.keepEvaluatingAfterFailure() && E->isAssignmentOp())
- return Error(E);
+ // We don't support assignment in C. C++ assignments don't get here because
+ // assignment is an lvalue in C++.
+ if (E->isAssignmentOp()) {
+ Error(E);
+ if (!Info.noteFailure())
+ return false;
+ }
- DelayedNoteFailureRAII MaybeNoteFailureLater(Info, E->isAssignmentOp());
if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
@@ -13180,6 +13206,7 @@ bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FixedPointToFloating:
case CK_FixedPointCast:
case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
llvm_unreachable("invalid cast kind for integral value");
case CK_BitCast:
@@ -13397,7 +13424,7 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_fixedpoint_constant_overflow)
<< Result.toString() << E->getType();
- else if (!HandleOverflow(Info, E, Result, E->getType()))
+ if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
return Success(Result, E);
@@ -13416,7 +13443,7 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_fixedpoint_constant_overflow)
<< IntResult.toString() << E->getType();
- else if (!HandleOverflow(Info, E, IntResult, E->getType()))
+ if (!HandleOverflow(Info, E, IntResult, E->getType()))
return false;
}
@@ -13436,7 +13463,7 @@ bool FixedPointExprEvaluator::VisitCastExpr(const CastExpr *E) {
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_fixedpoint_constant_overflow)
<< Result.toString() << E->getType();
- else if (!HandleOverflow(Info, E, Result, E->getType()))
+ if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
@@ -13524,7 +13551,7 @@ bool FixedPointExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
Info.Ctx.getDiagnostics().Report(E->getExprLoc(),
diag::warn_fixedpoint_constant_overflow)
<< Result.toString() << E->getType();
- else if (!HandleOverflow(Info, E, Result, E->getType()))
+ if (!HandleOverflow(Info, E, Result, E->getType()))
return false;
}
return Success(Result, E);
@@ -13568,7 +13595,7 @@ public:
static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isRealFloatingType());
+ assert(E->isPRValue() && E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(E);
}
@@ -13665,6 +13692,9 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
Result.changeSign();
return true;
+ case Builtin::BI__arithmetic_fence:
+ return EvaluateFloat(E->getArg(0), Result, Info);
+
// FIXME: Builtin::BI__builtin_powi
// FIXME: Builtin::BI__builtin_powif
// FIXME: Builtin::BI__builtin_powil
@@ -13821,7 +13851,7 @@ public:
static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isAnyComplexType());
+ assert(E->isPRValue() && E->getType()->isAnyComplexType());
return ComplexExprEvaluator(Info, Result).Visit(E);
}
@@ -13920,6 +13950,7 @@ bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
llvm_unreachable("invalid cast kind for complex value");
case CK_LValueToRValue:
@@ -14348,7 +14379,7 @@ public:
static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isAtomicType());
+ assert(E->isPRValue() && E->getType()->isAtomicType());
return AtomicExprEvaluator(Info, This, Result).Visit(E);
}
@@ -14473,7 +14504,7 @@ bool VoidExprEvaluator::VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
assert(!E->isValueDependent());
- assert(E->isRValue() && E->getType()->isVoidType());
+ assert(E->isPRValue() && E->getType()->isVoidType());
return VoidExprEvaluator(Info).Visit(E);
}
@@ -14573,7 +14604,7 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E, &This))
return false;
- if (E->isRValue()) {
+ if (E->isPRValue()) {
// Evaluate arrays and record types in-place, so that later initializers can
// refer to earlier-initialized members of the object.
QualType T = E->getType();
@@ -14643,8 +14674,8 @@ static bool FastEvaluateAsRValue(const Expr *Exp, Expr::EvalResult &Result,
// FIXME: Evaluating values of large array and record types can cause
// performance problems. Only do so in C++11 for now.
- if (Exp->isRValue() && (Exp->getType()->isArrayType() ||
- Exp->getType()->isRecordType()) &&
+ if (Exp->isPRValue() &&
+ (Exp->getType()->isArrayType() || Exp->getType()->isRecordType()) &&
!Ctx.getLangOpts().CPlusPlus11) {
IsConst = false;
return true;
@@ -14786,11 +14817,14 @@ bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx,
static bool EvaluateDestruction(const ASTContext &Ctx, APValue::LValueBase Base,
APValue DestroyedValue, QualType Type,
- SourceLocation Loc, Expr::EvalStatus &EStatus) {
- EvalInfo Info(Ctx, EStatus, EvalInfo::EM_ConstantExpression);
+ SourceLocation Loc, Expr::EvalStatus &EStatus,
+ bool IsConstantDestruction) {
+ EvalInfo Info(Ctx, EStatus,
+ IsConstantDestruction ? EvalInfo::EM_ConstantExpression
+ : EvalInfo::EM_ConstantFold);
Info.setEvaluatingDecl(Base, DestroyedValue,
EvalInfo::EvaluatingDeclKind::Dtor);
- Info.InConstantContext = true;
+ Info.InConstantContext = IsConstantDestruction;
LValue LVal;
LVal.set(Base);
@@ -14844,7 +14878,8 @@ bool Expr::EvaluateAsConstantExpr(EvalResult &Result, const ASTContext &Ctx,
// If this is a class template argument, it's required to have constant
// destruction too.
if (Kind == ConstantExprKind::ClassTemplateArgument &&
- (!EvaluateDestruction(Ctx, Base, Result.Val, T, getBeginLoc(), Result) ||
+ (!EvaluateDestruction(Ctx, Base, Result.Val, T, getBeginLoc(), Result,
+ true) ||
Result.HasSideEffects)) {
// FIXME: Prefix a note to indicate that the problem is lack of constant
// destruction.
@@ -14863,7 +14898,7 @@ bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
// FIXME: Evaluating initializers for large array and record types can cause
// performance problems. Only do so in C++11 for now.
- if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ if (isPRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
!Ctx.getLangOpts().CPlusPlus11)
return false;
@@ -14910,6 +14945,10 @@ bool VarDecl::evaluateDestruction(
Expr::EvalStatus EStatus;
EStatus.Diag = &Notes;
+ // Only treat the destruction as constant destruction if we formally have
+ // constant initialization (or are usable in a constant expression).
+ bool IsConstantDestruction = hasConstantInitialization();
+
// Make a copy of the value for the destructor to mutate, if we know it.
// Otherwise, treat the value as default-initialized; if the destructor works
// anyway, then the destruction is constant (and must be essentially empty).
@@ -14920,7 +14959,8 @@ bool VarDecl::evaluateDestruction(
return false;
if (!EvaluateDestruction(getASTContext(), this, std::move(DestroyedValue),
- getType(), getLocation(), EStatus) ||
+ getType(), getLocation(), EStatus,
+ IsConstantDestruction) ||
EStatus.HasSideEffects)
return false;
@@ -15140,6 +15180,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::CoawaitExprClass:
case Expr::DependentCoawaitExprClass:
case Expr::CoyieldExprClass:
+ case Expr::SYCLUniqueStableNameExprClass:
return ICEDiag(IK_NotICE, E->getBeginLoc());
case Expr::InitListExprClass: {
@@ -15147,7 +15188,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
// form "T x = { a };" is equivalent to "T x = a;".
// Unless we're initializing a reference, T is a scalar as it is known to be
// of integral or enumeration type.
- if (E->isRValue())
+ if (E->isPRValue())
if (cast<InitListExpr>(E)->getNumInits() == 1)
return CheckICE(cast<InitListExpr>(E)->getInit(0), Ctx);
return ICEDiag(IK_NotICE, E->getBeginLoc());
diff --git a/clang/lib/AST/ExprObjC.cpp b/clang/lib/AST/ExprObjC.cpp
index 662bc325f12c..7d932c8b059d 100644
--- a/clang/lib/AST/ExprObjC.cpp
+++ b/clang/lib/AST/ExprObjC.cpp
@@ -27,7 +27,7 @@ using namespace clang;
ObjCArrayLiteral::ObjCArrayLiteral(ArrayRef<Expr *> Elements, QualType T,
ObjCMethodDecl *Method, SourceRange SR)
- : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary),
+ : Expr(ObjCArrayLiteralClass, T, VK_PRValue, OK_Ordinary),
NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method) {
Expr **SaveElements = getElements();
for (unsigned I = 0, N = Elements.size(); I != N; ++I)
@@ -54,7 +54,7 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
bool HasPackExpansions, QualType T,
ObjCMethodDecl *method,
SourceRange SR)
- : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary),
+ : Expr(ObjCDictionaryLiteralClass, T, VK_PRValue, OK_Ordinary),
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
DictWithObjectsMethod(method) {
KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
@@ -281,7 +281,7 @@ QualType ObjCMessageExpr::getCallReturnType(ASTContext &Ctx) const {
return Ctx.getLValueReferenceType(QT);
case VK_XValue:
return Ctx.getRValueReferenceType(QT);
- case VK_RValue:
+ case VK_PRValue:
return QT;
}
llvm_unreachable("Unsupported ExprValueKind");
diff --git a/clang/lib/AST/ExternalASTMerger.cpp b/clang/lib/AST/ExternalASTMerger.cpp
index 88bbe90a4e90..c7789b707b21 100644
--- a/clang/lib/AST/ExternalASTMerger.cpp
+++ b/clang/lib/AST/ExternalASTMerger.cpp
@@ -64,24 +64,24 @@ LookupSameContext(Source<TranslationUnitDecl *> SourceTU, const DeclContext *DC,
Source<DeclarationName> SourceName = *SourceNameOrErr;
DeclContext::lookup_result SearchResult =
SourceParentDC.get()->lookup(SourceName.get());
- size_t SearchResultSize = SearchResult.size();
- if (SearchResultSize == 0 || SearchResultSize > 1) {
- // There are two cases here. First, we might not find the name.
- // We might also find multiple copies, in which case we have no
- // guarantee that the one we wanted is the one we pick. (E.g.,
- // if we have two specializations of the same template it is
- // very hard to determine which is the one you want.)
- //
- // The Origins map fixes this problem by allowing the origin to be
- // explicitly recorded, so we trigger that recording by returning
- // nothing (rather than a possibly-inaccurate guess) here.
- return nullptr;
- } else {
- NamedDecl *SearchResultDecl = SearchResult[0];
+
+ // There are two cases here. First, we might not find the name.
+ // We might also find multiple copies, in which case we have no
+ // guarantee that the one we wanted is the one we pick. (E.g.,
+ // if we have two specializations of the same template it is
+ // very hard to determine which is the one you want.)
+ //
+ // The Origins map fixes this problem by allowing the origin to be
+ // explicitly recorded, so we trigger that recording by returning
+ // nothing (rather than a possibly-inaccurate guess) here.
+ if (SearchResult.isSingleResult()) {
+ NamedDecl *SearchResultDecl = SearchResult.front();
if (isa<DeclContext>(SearchResultDecl) &&
SearchResultDecl->getKind() == DC->getDeclKind())
return cast<DeclContext>(SearchResultDecl)->getPrimaryContext();
return nullptr; // This type of lookup is unsupported
+ } else {
+ return nullptr;
}
}
diff --git a/clang/lib/AST/Interp/Context.h b/clang/lib/AST/Interp/Context.h
index e4d831cbb991..e8238eea716a 100644
--- a/clang/lib/AST/Interp/Context.h
+++ b/clang/lib/AST/Interp/Context.h
@@ -16,7 +16,6 @@
#ifndef LLVM_CLANG_AST_INTERP_CONTEXT_H
#define LLVM_CLANG_AST_INTERP_CONTEXT_H
-#include "Context.h"
#include "InterpStack.h"
#include "clang/AST/APValue.h"
#include "llvm/ADT/PointerIntPair.h"
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index a63c5a871ba3..e2f7bf0dc26a 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -118,7 +118,8 @@ bool AddSubMulHelper(InterpState &S, CodePtr OpPC, unsigned Bits, const T &LHS,
const Expr *E = S.Current->getExpr(OpPC);
QualType Type = E->getType();
if (S.checkingForUndefinedBehavior()) {
- auto Trunc = Value.trunc(Result.bitWidth()).toString(10);
+ SmallString<32> Trunc;
+ Value.trunc(Result.bitWidth()).toString(Trunc, 10);
auto Loc = E->getExprLoc();
S.report(Loc, diag::warn_integer_constant_overflow) << Trunc << Type;
return true;
diff --git a/clang/lib/AST/ItaniumCXXABI.cpp b/clang/lib/AST/ItaniumCXXABI.cpp
index 069add8464ae..be10258a2d77 100644
--- a/clang/lib/AST/ItaniumCXXABI.cpp
+++ b/clang/lib/AST/ItaniumCXXABI.cpp
@@ -258,3 +258,9 @@ public:
CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) {
return new ItaniumCXXABI(Ctx);
}
+
+std::unique_ptr<MangleNumberingContext>
+clang::createItaniumNumberingContext(MangleContext *Mangler) {
+ return std::make_unique<ItaniumNumberingContext>(
+ cast<ItaniumMangleContext>(Mangler));
+}
diff --git a/clang/lib/AST/ItaniumMangle.cpp b/clang/lib/AST/ItaniumMangle.cpp
index 6c8d5687c64a..8cbac66fcf00 100644
--- a/clang/lib/AST/ItaniumMangle.cpp
+++ b/clang/lib/AST/ItaniumMangle.cpp
@@ -14,7 +14,6 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/Mangle.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
@@ -23,14 +22,16 @@
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
-#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Thunk.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -124,11 +125,16 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
typedef std::pair<const DeclContext*, IdentifierInfo*> DiscriminatorKeyTy;
llvm::DenseMap<DiscriminatorKeyTy, unsigned> Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+ const DiscriminatorOverrideTy DiscriminatorOverride = nullptr;
+
+ bool NeedsUniqueInternalLinkageNames = false;
public:
- explicit ItaniumMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags)
- : ItaniumMangleContext(Context, Diags) {}
+ explicit ItaniumMangleContextImpl(
+ ASTContext &Context, DiagnosticsEngine &Diags,
+ DiscriminatorOverrideTy DiscriminatorOverride)
+ : ItaniumMangleContext(Context, Diags),
+ DiscriminatorOverride(DiscriminatorOverride) {}
/// @name Mangler Entry Points
/// @{
@@ -137,6 +143,12 @@ public:
bool shouldMangleStringLiteral(const StringLiteral *) override {
return false;
}
+
+ bool isUniqueInternalLinkageDecl(const NamedDecl *ND) override;
+ void needsUniqueInternalLinkageNames() override {
+ NeedsUniqueInternalLinkageNames = true;
+ }
+
void mangleCXXName(GlobalDecl GD, raw_ostream &) override;
void mangleThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk,
raw_ostream &) override;
@@ -203,6 +215,40 @@ public:
disc = discriminator-2;
return true;
}
+
+ std::string getLambdaString(const CXXRecordDecl *Lambda) override {
+ // This function matches the one in MicrosoftMangle, which returns
+ // the string that is used in lambda mangled names.
+ assert(Lambda->isLambda() && "RD must be a lambda!");
+ std::string Name("<lambda");
+ Decl *LambdaContextDecl = Lambda->getLambdaContextDecl();
+ unsigned LambdaManglingNumber = Lambda->getLambdaManglingNumber();
+ unsigned LambdaId;
+ const ParmVarDecl *Parm = dyn_cast_or_null<ParmVarDecl>(LambdaContextDecl);
+ const FunctionDecl *Func =
+ Parm ? dyn_cast<FunctionDecl>(Parm->getDeclContext()) : nullptr;
+
+ if (Func) {
+ unsigned DefaultArgNo =
+ Func->getNumParams() - Parm->getFunctionScopeIndex();
+ Name += llvm::utostr(DefaultArgNo);
+ Name += "_";
+ }
+
+ if (LambdaManglingNumber)
+ LambdaId = LambdaManglingNumber;
+ else
+ LambdaId = getAnonymousStructIdForDebugInfo(Lambda);
+
+ Name += llvm::utostr(LambdaId);
+ Name += '>';
+ return Name;
+ }
+
+ DiscriminatorOverrideTy getDiscriminatorOverride() const override {
+ return DiscriminatorOverride;
+ }
+
/// @}
};
@@ -496,11 +542,16 @@ private:
void mangleNestedName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
+ void mangleNestedNameWithClosurePrefix(GlobalDecl GD,
+ const NamedDecl *PrefixND,
+ const AbiTagList *AdditionalAbiTags);
void manglePrefix(NestedNameSpecifier *qualifier);
void manglePrefix(const DeclContext *DC, bool NoFunction=false);
void manglePrefix(QualType type);
void mangleTemplatePrefix(GlobalDecl GD, bool NoFunction=false);
void mangleTemplatePrefix(TemplateName Template);
+ const NamedDecl *getClosurePrefix(const Decl *ND);
+ void mangleClosurePrefix(const NamedDecl *ND, bool NoFunction = false);
bool mangleUnresolvedTypeOrSimpleId(QualType DestroyedType,
StringRef Prefix = "");
void mangleOperatorName(DeclarationName Name, unsigned Arity);
@@ -546,8 +597,8 @@ private:
unsigned knownArity);
void mangleCastExpression(const Expr *E, StringRef CastEncoding);
void mangleInitListElements(const InitListExpr *InitList);
- void mangleDeclRefExpr(const NamedDecl *D);
- void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity,
+ bool AsTemplateArg = false);
void mangleCXXCtorType(CXXCtorType T, const CXXRecordDecl *InheritedFrom);
void mangleCXXDtorType(CXXDtorType T);
@@ -558,6 +609,7 @@ private:
unsigned NumTemplateArgs);
void mangleTemplateArgs(TemplateName TN, const TemplateArgumentList &AL);
void mangleTemplateArg(TemplateArgument A, bool NeedExactType);
+ void mangleTemplateArgExpr(const Expr *E);
void mangleValueInTemplateArg(QualType T, const APValue &V, bool TopLevel,
bool NeedExactType = false);
@@ -576,6 +628,36 @@ private:
}
+static bool isInternalLinkageDecl(const NamedDecl *ND) {
+ if (ND && ND->getFormalLinkage() == InternalLinkage &&
+ !ND->isExternallyVisible() &&
+ getEffectiveDeclContext(ND)->isFileContext() &&
+ !ND->isInAnonymousNamespace())
+ return true;
+ return false;
+}
+
+// Check if this Function Decl needs a unique internal linkage name.
+bool ItaniumMangleContextImpl::isUniqueInternalLinkageDecl(
+ const NamedDecl *ND) {
+ if (!NeedsUniqueInternalLinkageNames || !ND)
+ return false;
+
+ const auto *FD = dyn_cast<FunctionDecl>(ND);
+ if (!FD)
+ return false;
+
+ // For C functions without prototypes, return false as their
+ // names should not be mangled.
+ if (!FD->getType()->getAs<FunctionProtoType>())
+ return false;
+
+ if (isInternalLinkageDecl(ND))
+ return true;
+
+ return false;
+}
+
bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (FD) {
@@ -726,9 +808,17 @@ void CXXNameMangler::mangleFunctionEncodingBareType(const FunctionDecl *FD) {
EnableIfAttr *EIA = dyn_cast<EnableIfAttr>(*I);
if (!EIA)
continue;
- Out << 'X';
- mangleExpression(EIA->getCond());
- Out << 'E';
+ if (Context.getASTContext().getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11) {
+ mangleTemplateArgExpr(EIA->getCond());
+ } else {
+ // Prior to Clang 12, we hardcoded the X/E around enable-if's argument,
+ // even though <template-arg> should not include an X/E around
+ // <expr-primary>.
+ Out << 'X';
+ mangleExpression(EIA->getCond());
+ Out << 'E';
+ }
}
Out << 'E';
FunctionTypeDepth.pop(Saved);
@@ -901,6 +991,13 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
if (Module *M = ND->getOwningModuleForLinkage())
mangleModuleName(M);
+ // Closures can require a nested-name mangling even if they're semantically
+ // in the global namespace.
+ if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
+ mangleNestedNameWithClosurePrefix(GD, PrefixND, AdditionalAbiTags);
+ return;
+ }
+
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
@@ -1331,10 +1428,7 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// 12_GLOBAL__N_1 mangling is quite sufficient there, and this better
// matches GCC anyway, because GCC does not treat anonymous namespaces as
// implying internal linkage.
- if (ND && ND->getFormalLinkage() == InternalLinkage &&
- !ND->isExternallyVisible() &&
- getEffectiveDeclContext(ND)->isFileContext() &&
- !ND->isInAnonymousNamespace())
+ if (isInternalLinkageDecl(ND))
Out << 'L';
auto *FD = dyn_cast<FunctionDecl>(ND);
@@ -1424,7 +1518,9 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
// <lambda-sig> ::= <template-param-decl>* <parameter-type>+
// # Parameter types or 'v' for 'void'.
if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
- if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ if (Record->isLambda() && (Record->getLambdaManglingNumber() ||
+ Context.getDiscriminatorOverride()(
+ Context.getASTContext(), Record))) {
assert(!AdditionalAbiTags &&
"Lambda type cannot have additional abi tags");
mangleLambda(Record);
@@ -1578,8 +1674,7 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD,
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
- }
- else {
+ } else {
manglePrefix(DC, NoFunction);
mangleUnqualifiedName(GD, AdditionalAbiTags);
}
@@ -1599,6 +1694,23 @@ void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
Out << 'E';
}
+void CXXNameMangler::mangleNestedNameWithClosurePrefix(
+ GlobalDecl GD, const NamedDecl *PrefixND,
+ const AbiTagList *AdditionalAbiTags) {
+ // A <closure-prefix> represents a variable or field, not a regular
+ // DeclContext, so needs special handling. In this case we're mangling a
+ // limited form of <nested-name>:
+ //
+ // <nested-name> ::= N <closure-prefix> <closure-type-name> E
+
+ Out << 'N';
+
+ mangleClosurePrefix(PrefixND);
+ mangleUnqualifiedName(GD, AdditionalAbiTags);
+
+ Out << 'E';
+}
+
static GlobalDecl getParentOfLocalEntity(const DeclContext *DC) {
GlobalDecl GD;
// The Itanium spec says:
@@ -1674,7 +1786,10 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
if (D == RD) {
mangleUnqualifiedName(RD, AdditionalAbiTags);
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
- manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/);
+ if (const NamedDecl *PrefixND = getClosurePrefix(BD))
+ mangleClosurePrefix(PrefixND, true /*NoFunction*/);
+ else
+ manglePrefix(getEffectiveDeclContext(BD), true /*NoFunction*/);
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
@@ -1724,13 +1839,20 @@ void CXXNameMangler::mangleBlockForPrefix(const BlockDecl *Block) {
mangleLocalName(Block, /* AdditionalAbiTags */ nullptr);
return;
}
- manglePrefix(getEffectiveDeclContext(Block));
+ if (const NamedDecl *PrefixND = getClosurePrefix(Block))
+ mangleClosurePrefix(PrefixND);
+ else
+ manglePrefix(DC);
mangleUnqualifiedBlock(Block);
}
void CXXNameMangler::mangleUnqualifiedBlock(const BlockDecl *Block) {
+ // When trying to be ABI-compatibility with clang 12 and before, mangle a
+ // <data-member-prefix> now, with no substitutions and no <template-args>.
if (Decl *Context = Block->getBlockManglingContextDecl()) {
- if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ if (getASTContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver12 &&
+ (isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
Context->getDeclContext()->isRecord()) {
const auto *ND = cast<NamedDecl>(Context);
if (ND->getIdentifier()) {
@@ -1803,20 +1925,13 @@ void CXXNameMangler::mangleTemplateParamDecl(const NamedDecl *Decl) {
}
void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
- // If the context of a closure type is an initializer for a class member
- // (static or nonstatic), it is encoded in a qualified name with a final
- // <prefix> of the form:
- //
- // <data-member-prefix> := <member source-name> M
- //
- // Technically, the data-member-prefix is part of the <prefix>. However,
- // since a closure type will always be mangled with a prefix, it's easier
- // to emit that last part of the prefix here.
+ // When trying to be ABI-compatibility with clang 12 and before, mangle a
+ // <data-member-prefix> now, with no substitutions.
if (Decl *Context = Lambda->getLambdaContextDecl()) {
- if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ if (getASTContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver12 &&
+ (isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
!isa<ParmVarDecl>(Context)) {
- // FIXME: 'inline auto [a, b] = []{ return ... };' does not get a
- // reasonable mangling here.
if (const IdentifierInfo *Name
= cast<NamedDecl>(Context)->getIdentifier()) {
mangleSourceName(Name);
@@ -1837,7 +1952,17 @@ void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
// (in lexical order) with that same <lambda-sig> and context.
//
// The AST keeps track of the number for us.
- unsigned Number = Lambda->getLambdaManglingNumber();
+ //
+ // In CUDA/HIP, to ensure the consistent lamba numbering between the device-
+ // and host-side compilations, an extra device mangle context may be created
+ // if the host-side CXX ABI has different numbering for lambda. In such case,
+ // if the mangle context is that device-side one, use the device-side lambda
+ // mangling number for this lambda.
+ llvm::Optional<unsigned> DeviceNumber =
+ Context.getDiscriminatorOverride()(Context.getASTContext(), Lambda);
+ unsigned Number = DeviceNumber.hasValue() ? *DeviceNumber
+ : Lambda->getLambdaManglingNumber();
+
assert(Number > 0 && "Lambda should be mangled as an unnamed class");
if (Number > 1)
mangleNumber(Number - 2);
@@ -1891,6 +2016,7 @@ void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
// <prefix> ::= <prefix> <unqualified-name>
// ::= <template-prefix> <template-args>
+ // ::= <closure-prefix>
// ::= <template-param>
// ::= # empty
// ::= <substitution>
@@ -1909,11 +2035,14 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (mangleSubstitution(ND))
return;
- // Check if we have a template.
+ // Check if we have a template-prefix or a closure-prefix.
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(ND, TemplateArgs)) {
mangleTemplatePrefix(TD);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
+ } else if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
+ mangleClosurePrefix(PrefixND, NoFunction);
+ mangleUnqualifiedName(ND, nullptr);
} else {
manglePrefix(getEffectiveDeclContext(ND), NoFunction);
mangleUnqualifiedName(ND, nullptr);
@@ -1979,6 +2108,50 @@ void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
addSubstitution(ND);
}
+const NamedDecl *CXXNameMangler::getClosurePrefix(const Decl *ND) {
+ if (getASTContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver12)
+ return nullptr;
+
+ const NamedDecl *Context = nullptr;
+ if (auto *Block = dyn_cast<BlockDecl>(ND)) {
+ Context = dyn_cast_or_null<NamedDecl>(Block->getBlockManglingContextDecl());
+ } else if (auto *RD = dyn_cast<CXXRecordDecl>(ND)) {
+ if (RD->isLambda())
+ Context = dyn_cast_or_null<NamedDecl>(RD->getLambdaContextDecl());
+ }
+ if (!Context)
+ return nullptr;
+
+ // Only lambdas within the initializer of a non-local variable or non-static
+ // data member get a <closure-prefix>.
+ if ((isa<VarDecl>(Context) && cast<VarDecl>(Context)->hasGlobalStorage()) ||
+ isa<FieldDecl>(Context))
+ return Context;
+
+ return nullptr;
+}
+
+void CXXNameMangler::mangleClosurePrefix(const NamedDecl *ND, bool NoFunction) {
+ // <closure-prefix> ::= [ <prefix> ] <unqualified-name> M
+ // ::= <template-prefix> <template-args> M
+ if (mangleSubstitution(ND))
+ return;
+
+ const TemplateArgumentList *TemplateArgs = nullptr;
+ if (GlobalDecl TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD, NoFunction);
+ mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
+ } else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND, nullptr);
+ }
+
+ Out << 'M';
+
+ addSubstitution(ND);
+}
+
/// Mangles a template name under the production <type>. Required for
/// template template arguments.
/// <type> ::= <class-enum-type>
@@ -2376,7 +2549,8 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals, const DependentAddressSp
if (Context.getASTContext().addressSpaceMapManglingFor(AS)) {
// <target-addrspace> ::= "AS" <address-space-number>
unsigned TargetAS = Context.getASTContext().getTargetAddressSpace(AS);
- if (TargetAS != 0)
+ if (TargetAS != 0 ||
+ Context.getASTContext().getTargetAddressSpace(LangAS::Default) != 0)
ASString = "AS" + llvm::utostr(TargetAS);
} else {
switch (AS) {
@@ -2405,6 +2579,23 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals, const DependentAddressSp
case LangAS::opencl_generic:
ASString = "CLgeneric";
break;
+ // <SYCL-addrspace> ::= "SY" [ "global" | "local" | "private" |
+ // "device" | "host" ]
+ case LangAS::sycl_global:
+ ASString = "SYglobal";
+ break;
+ case LangAS::sycl_global_device:
+ ASString = "SYdevice";
+ break;
+ case LangAS::sycl_global_host:
+ ASString = "SYhost";
+ break;
+ case LangAS::sycl_local:
+ ASString = "SYlocal";
+ break;
+ case LangAS::sycl_private:
+ ASString = "SYprivate";
+ break;
// <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
case LangAS::cuda_device:
ASString = "CUdevice";
@@ -2868,6 +3059,13 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
Out << 'u' << type_name.size() << type_name; \
break;
#include "clang/Basic/PPCTypes.def"
+ // TODO: Check the mangling scheme for RISC-V V.
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ type_name = Name; \
+ Out << 'u' << type_name.size() << type_name; \
+ break;
+#include "clang/Basic/RISCVVTypes.def"
}
}
@@ -2908,6 +3106,8 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
return "ms_abi";
case CC_Swift:
return "swiftcall";
+ case CC_SwiftAsync:
+ return "swiftasynccall";
}
llvm_unreachable("bad calling convention");
}
@@ -2942,6 +3142,7 @@ CXXNameMangler::mangleExtParameterInfo(FunctionProtoType::ExtParameterInfo PI) {
// All of these start with "swift", so they come before "ns_consumed".
case ParameterABI::SwiftContext:
+ case ParameterABI::SwiftAsyncContext:
case ParameterABI::SwiftErrorResult:
case ParameterABI::SwiftIndirectResult:
mangleVendorQualifier(getParameterABISpelling(PI.getABI()));
@@ -3111,7 +3312,11 @@ void CXXNameMangler::mangleType(const VariableArrayType *T) {
}
void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
Out << 'A';
- mangleExpression(T->getSizeExpr());
+ // A DependentSizedArrayType might not have size expression as below
+ //
+ // template<int ...N> int arr[] = {N...};
+ if (T->getSizeExpr())
+ mangleExpression(T->getSizeExpr());
Out << '_';
mangleType(T->getElementType());
}
@@ -3528,8 +3733,8 @@ void CXXNameMangler::mangleType(const DependentSizedMatrixType *T) {
Out << "u" << VendorQualifier.size() << VendorQualifier;
Out << "I";
- mangleTemplateArg(T->getRowExpr(), false);
- mangleTemplateArg(T->getColumnExpr(), false);
+ mangleTemplateArgExpr(T->getRowExpr());
+ mangleTemplateArgExpr(T->getColumnExpr());
mangleType(T->getElementType());
Out << "E";
}
@@ -3871,33 +4076,8 @@ void CXXNameMangler::mangleInitListElements(const InitListExpr *InitList) {
mangleExpression(InitList->getInit(i));
}
-void CXXNameMangler::mangleDeclRefExpr(const NamedDecl *D) {
- switch (D->getKind()) {
- default:
- // <expr-primary> ::= L <mangled-name> E # external name
- Out << 'L';
- mangle(D);
- Out << 'E';
- break;
-
- case Decl::ParmVar:
- mangleFunctionParam(cast<ParmVarDecl>(D));
- break;
-
- case Decl::EnumConstant: {
- const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
- mangleIntegerLiteral(ED->getType(), ED->getInitVal());
- break;
- }
-
- case Decl::NonTypeTemplateParm:
- const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
- mangleTemplateParameter(PD->getDepth(), PD->getIndex());
- break;
- }
-}
-
-void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity,
+ bool AsTemplateArg) {
// <expression> ::= <unary operator-name> <expression>
// ::= <binary operator-name> <expression> <expression>
// ::= <trinary operator-name> <expression> <expression> <expression>
@@ -3911,18 +4091,64 @@ void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
// ::= at <type> # alignof (a type)
// ::= <template-param>
// ::= <function-param>
+ // ::= fpT # 'this' expression (part of <function-param>)
// ::= sr <type> <unqualified-name> # dependent name
// ::= sr <type> <unqualified-name> <template-args> # dependent template-id
// ::= ds <expression> <expression> # expr.*expr
// ::= sZ <template-param> # size of a parameter pack
// ::= sZ <function-param> # size of a function parameter pack
+ // ::= u <source-name> <template-arg>* E # vendor extended expression
// ::= <expr-primary>
// <expr-primary> ::= L <type> <value number> E # integer literal
- // ::= L <type <value float> E # floating literal
+ // ::= L <type> <value float> E # floating literal
+ // ::= L <type> <string type> E # string literal
+ // ::= L <nullptr type> E # nullptr literal "LDnE"
+ // ::= L <pointer type> 0 E # null pointer template argument
+ // ::= L <type> <real-part float> _ <imag-part float> E # complex floating point literal (C99); not used by clang
// ::= L <mangled-name> E # external name
- // ::= fpT # 'this' expression
QualType ImplicitlyConvertedToType;
+ // A top-level expression that's not <expr-primary> needs to be wrapped in
+ // X...E in a template arg.
+ bool IsPrimaryExpr = true;
+ auto NotPrimaryExpr = [&] {
+ if (AsTemplateArg && IsPrimaryExpr)
+ Out << 'X';
+ IsPrimaryExpr = false;
+ };
+
+ auto MangleDeclRefExpr = [&](const NamedDecl *D) {
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D);
+ Out << 'E';
+ break;
+
+ case Decl::ParmVar:
+ NotPrimaryExpr();
+ mangleFunctionParam(cast<ParmVarDecl>(D));
+ break;
+
+ case Decl::EnumConstant: {
+ // <expr-primary>
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm:
+ NotPrimaryExpr();
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getDepth(), PD->getIndex());
+ break;
+ }
+ };
+
+ // 'goto recurse' is used when handling a simple "unwrapping" node which
+ // produces no output, where ImplicitlyConvertedToType and AsTemplateArg need
+ // to be preserved.
recurse:
switch (E->getStmtClass()) {
case Expr::NoStmtClass:
@@ -3994,6 +4220,7 @@ recurse:
case Expr::SourceLocExprClass:
case Expr::BuiltinBitCastExprClass:
{
+ NotPrimaryExpr();
if (!NullOut) {
// As bad as this diagnostic is, it's better than crashing.
DiagnosticsEngine &Diags = Context.getDiags();
@@ -4001,33 +4228,48 @@ recurse:
"cannot yet mangle expression type %0");
Diags.Report(E->getExprLoc(), DiagID)
<< E->getStmtClassName() << E->getSourceRange();
+ return;
}
break;
}
case Expr::CXXUuidofExprClass: {
+ NotPrimaryExpr();
const CXXUuidofExpr *UE = cast<CXXUuidofExpr>(E);
- if (UE->isTypeOperand()) {
- QualType UuidT = UE->getTypeOperand(Context.getASTContext());
- Out << "u8__uuidoft";
- mangleType(UuidT);
+ // As of clang 12, uuidof uses the vendor extended expression
+ // mangling. Previously, it used a special-cased nonstandard extension.
+ if (Context.getASTContext().getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11) {
+ Out << "u8__uuidof";
+ if (UE->isTypeOperand())
+ mangleType(UE->getTypeOperand(Context.getASTContext()));
+ else
+ mangleTemplateArgExpr(UE->getExprOperand());
+ Out << 'E';
} else {
- Expr *UuidExp = UE->getExprOperand();
- Out << "u8__uuidofz";
- mangleExpression(UuidExp, Arity);
+ if (UE->isTypeOperand()) {
+ QualType UuidT = UE->getTypeOperand(Context.getASTContext());
+ Out << "u8__uuidoft";
+ mangleType(UuidT);
+ } else {
+ Expr *UuidExp = UE->getExprOperand();
+ Out << "u8__uuidofz";
+ mangleExpression(UuidExp);
+ }
}
break;
}
// Even gcc-4.5 doesn't mangle this.
case Expr::BinaryConditionalOperatorClass: {
+ NotPrimaryExpr();
DiagnosticsEngine &Diags = Context.getDiags();
unsigned DiagID =
Diags.getCustomDiagID(DiagnosticsEngine::Error,
"?: operator with omitted middle operand cannot be mangled");
Diags.Report(E->getExprLoc(), DiagID)
<< E->getStmtClassName() << E->getSourceRange();
- break;
+ return;
}
// These are used for internal purposes and cannot be meaningfully mangled.
@@ -4035,6 +4277,7 @@ recurse:
llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
case Expr::InitListExprClass: {
+ NotPrimaryExpr();
Out << "il";
mangleInitListElements(cast<InitListExpr>(E));
Out << "E";
@@ -4042,6 +4285,7 @@ recurse:
}
case Expr::DesignatedInitExprClass: {
+ NotPrimaryExpr();
auto *DIE = cast<DesignatedInitExpr>(E);
for (const auto &Designator : DIE->designators()) {
if (Designator.isFieldDesignator()) {
@@ -4063,27 +4307,27 @@ recurse:
}
case Expr::CXXDefaultArgExprClass:
- mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
- break;
+ E = cast<CXXDefaultArgExpr>(E)->getExpr();
+ goto recurse;
case Expr::CXXDefaultInitExprClass:
- mangleExpression(cast<CXXDefaultInitExpr>(E)->getExpr(), Arity);
- break;
+ E = cast<CXXDefaultInitExpr>(E)->getExpr();
+ goto recurse;
case Expr::CXXStdInitializerListExprClass:
- mangleExpression(cast<CXXStdInitializerListExpr>(E)->getSubExpr(), Arity);
- break;
+ E = cast<CXXStdInitializerListExpr>(E)->getSubExpr();
+ goto recurse;
case Expr::SubstNonTypeTemplateParmExprClass:
- mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
- Arity);
- break;
+ E = cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement();
+ goto recurse;
case Expr::UserDefinedLiteralClass:
// We follow g++'s approach of mangling a UDL as a call to the literal
// operator.
case Expr::CXXMemberCallExprClass: // fallthrough
case Expr::CallExprClass: {
+ NotPrimaryExpr();
const CallExpr *CE = cast<CallExpr>(E);
// <expression> ::= cp <simple-id> <expression>* E
@@ -4114,6 +4358,7 @@ recurse:
}
case Expr::CXXNewExprClass: {
+ NotPrimaryExpr();
const CXXNewExpr *New = cast<CXXNewExpr>(E);
if (New->isGlobalNew()) Out << "gs";
Out << (New->isArray() ? "na" : "nw");
@@ -4149,6 +4394,7 @@ recurse:
}
case Expr::CXXPseudoDestructorExprClass: {
+ NotPrimaryExpr();
const auto *PDE = cast<CXXPseudoDestructorExpr>(E);
if (const Expr *Base = PDE->getBase())
mangleMemberExprBase(Base, PDE->isArrow());
@@ -4175,6 +4421,7 @@ recurse:
}
case Expr::MemberExprClass: {
+ NotPrimaryExpr();
const MemberExpr *ME = cast<MemberExpr>(E);
mangleMemberExpr(ME->getBase(), ME->isArrow(),
ME->getQualifier(), nullptr,
@@ -4185,6 +4432,7 @@ recurse:
}
case Expr::UnresolvedMemberExprClass: {
+ NotPrimaryExpr();
const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
ME->isArrow(), ME->getQualifier(), nullptr,
@@ -4195,6 +4443,7 @@ recurse:
}
case Expr::CXXDependentScopeMemberExprClass: {
+ NotPrimaryExpr();
const CXXDependentScopeMemberExpr *ME
= cast<CXXDependentScopeMemberExpr>(E);
mangleMemberExpr(ME->isImplicitAccess() ? nullptr : ME->getBase(),
@@ -4207,6 +4456,7 @@ recurse:
}
case Expr::UnresolvedLookupExprClass: {
+ NotPrimaryExpr();
const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
mangleUnresolvedName(ULE->getQualifier(), ULE->getName(),
ULE->getTemplateArgs(), ULE->getNumTemplateArgs(),
@@ -4215,6 +4465,7 @@ recurse:
}
case Expr::CXXUnresolvedConstructExprClass: {
+ NotPrimaryExpr();
const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
unsigned N = CE->getNumArgs();
@@ -4225,7 +4476,7 @@ recurse:
mangleType(CE->getType());
mangleInitListElements(IL);
Out << "E";
- return;
+ break;
}
Out << "cv";
@@ -4237,14 +4488,17 @@ recurse:
}
case Expr::CXXConstructExprClass: {
+ // An implicit cast is silent, thus may contain <expr-primary>.
const auto *CE = cast<CXXConstructExpr>(E);
if (!CE->isListInitialization() || CE->isStdInitListInitialization()) {
assert(
CE->getNumArgs() >= 1 &&
(CE->getNumArgs() == 1 || isa<CXXDefaultArgExpr>(CE->getArg(1))) &&
"implicit CXXConstructExpr must have one argument");
- return mangleExpression(cast<CXXConstructExpr>(E)->getArg(0));
+ E = cast<CXXConstructExpr>(E)->getArg(0);
+ goto recurse;
}
+ NotPrimaryExpr();
Out << "il";
for (auto *E : CE->arguments())
mangleExpression(E);
@@ -4253,6 +4507,7 @@ recurse:
}
case Expr::CXXTemporaryObjectExprClass: {
+ NotPrimaryExpr();
const auto *CE = cast<CXXTemporaryObjectExpr>(E);
unsigned N = CE->getNumArgs();
bool List = CE->isListInitialization();
@@ -4282,17 +4537,20 @@ recurse:
}
case Expr::CXXScalarValueInitExprClass:
+ NotPrimaryExpr();
Out << "cv";
mangleType(E->getType());
Out << "_E";
break;
case Expr::CXXNoexceptExprClass:
+ NotPrimaryExpr();
Out << "nx";
mangleExpression(cast<CXXNoexceptExpr>(E)->getOperand());
break;
case Expr::UnaryExprOrTypeTraitExprClass: {
+ // Non-instantiation-dependent traits are an <expr-primary> integer literal.
const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
if (!SAE->isInstantiationDependent()) {
@@ -4312,13 +4570,41 @@ recurse:
break;
}
+ NotPrimaryExpr(); // But otherwise, they are not.
+
+ auto MangleAlignofSizeofArg = [&] {
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ };
+
switch(SAE->getKind()) {
case UETT_SizeOf:
Out << 's';
+ MangleAlignofSizeofArg();
break;
case UETT_PreferredAlignOf:
+ // As of clang 12, we mangle __alignof__ differently than alignof. (They
+ // have acted differently since Clang 8, but were previously mangled the
+ // same.)
+ if (Context.getASTContext().getLangOpts().getClangABICompat() >
+ LangOptions::ClangABI::Ver11) {
+ Out << "u11__alignof__";
+ if (SAE->isArgumentType())
+ mangleType(SAE->getArgumentType());
+ else
+ mangleTemplateArgExpr(SAE->getArgumentExpr());
+ Out << 'E';
+ break;
+ }
+ LLVM_FALLTHROUGH;
case UETT_AlignOf:
Out << 'a';
+ MangleAlignofSizeofArg();
break;
case UETT_VecStep: {
DiagnosticsEngine &Diags = Context.getDiags();
@@ -4336,17 +4622,11 @@ recurse:
return;
}
}
- if (SAE->isArgumentType()) {
- Out << 't';
- mangleType(SAE->getArgumentType());
- } else {
- Out << 'z';
- mangleExpression(SAE->getArgumentExpr());
- }
break;
}
case Expr::CXXThrowExprClass: {
+ NotPrimaryExpr();
const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
// <expression> ::= tw <expression> # throw expression
// ::= tr # rethrow
@@ -4360,6 +4640,7 @@ recurse:
}
case Expr::CXXTypeidExprClass: {
+ NotPrimaryExpr();
const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
// <expression> ::= ti <type> # typeid (type)
// ::= te <expression> # typeid (expression)
@@ -4374,6 +4655,7 @@ recurse:
}
case Expr::CXXDeleteExprClass: {
+ NotPrimaryExpr();
const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
// <expression> ::= [gs] dl <expression> # [::] delete expr
// ::= [gs] da <expression> # [::] delete [] expr
@@ -4384,6 +4666,7 @@ recurse:
}
case Expr::UnaryOperatorClass: {
+ NotPrimaryExpr();
const UnaryOperator *UO = cast<UnaryOperator>(E);
mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
/*Arity=*/1);
@@ -4392,6 +4675,7 @@ recurse:
}
case Expr::ArraySubscriptExprClass: {
+ NotPrimaryExpr();
const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
// Array subscript is treated as a syntactically weird form of
@@ -4403,6 +4687,7 @@ recurse:
}
case Expr::MatrixSubscriptExprClass: {
+ NotPrimaryExpr();
const MatrixSubscriptExpr *ME = cast<MatrixSubscriptExpr>(E);
Out << "ixix";
mangleExpression(ME->getBase());
@@ -4413,6 +4698,7 @@ recurse:
case Expr::CompoundAssignOperatorClass: // fallthrough
case Expr::BinaryOperatorClass: {
+ NotPrimaryExpr();
const BinaryOperator *BO = cast<BinaryOperator>(E);
if (BO->getOpcode() == BO_PtrMemD)
Out << "ds";
@@ -4425,6 +4711,7 @@ recurse:
}
case Expr::CXXRewrittenBinaryOperatorClass: {
+ NotPrimaryExpr();
// The mangled form represents the original syntax.
CXXRewrittenBinaryOperator::DecomposedForm Decomposed =
cast<CXXRewrittenBinaryOperator>(E)->getDecomposedForm();
@@ -4436,6 +4723,7 @@ recurse:
}
case Expr::ConditionalOperatorClass: {
+ NotPrimaryExpr();
const ConditionalOperator *CO = cast<ConditionalOperator>(E);
mangleOperatorName(OO_Conditional, /*Arity=*/3);
mangleExpression(CO->getCond());
@@ -4451,19 +4739,22 @@ recurse:
}
case Expr::ObjCBridgedCastExprClass: {
+ NotPrimaryExpr();
// Mangle ownership casts as a vendor extended operator __bridge,
// __bridge_transfer, or __bridge_retain.
StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
Out << "v1U" << Kind.size() << Kind;
+ mangleCastExpression(E, "cv");
+ break;
}
- // Fall through to mangle the cast itself.
- LLVM_FALLTHROUGH;
case Expr::CStyleCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "cv");
break;
case Expr::CXXFunctionalCastExprClass: {
+ NotPrimaryExpr();
auto *Sub = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreImplicit();
// FIXME: Add isImplicit to CXXConstructExpr.
if (auto *CCE = dyn_cast<CXXConstructExpr>(Sub))
@@ -4483,22 +4774,28 @@ recurse:
}
case Expr::CXXStaticCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "sc");
break;
case Expr::CXXDynamicCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "dc");
break;
case Expr::CXXReinterpretCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "rc");
break;
case Expr::CXXConstCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "cc");
break;
case Expr::CXXAddrspaceCastExprClass:
+ NotPrimaryExpr();
mangleCastExpression(E, "ac");
break;
case Expr::CXXOperatorCallExprClass: {
+ NotPrimaryExpr();
const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
unsigned NumArgs = CE->getNumArgs();
// A CXXOperatorCallExpr for OO_Arrow models only semantics, not syntax
@@ -4512,9 +4809,8 @@ recurse:
}
case Expr::ParenExprClass:
- mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
- break;
-
+ E = cast<ParenExpr>(E)->getSubExpr();
+ goto recurse;
case Expr::ConceptSpecializationExprClass: {
// <expr-primary> ::= L <mangled-name> E # external name
@@ -4528,10 +4824,12 @@ recurse:
}
case Expr::DeclRefExprClass:
- mangleDeclRefExpr(cast<DeclRefExpr>(E)->getDecl());
+ // MangleDeclRefExpr helper handles primary-vs-nonprimary
+ MangleDeclRefExpr(cast<DeclRefExpr>(E)->getDecl());
break;
case Expr::SubstNonTypeTemplateParmPackExprClass:
+ NotPrimaryExpr();
// FIXME: not clear how to mangle this!
// template <unsigned N...> class A {
// template <class U...> void foo(U (&x)[N]...);
@@ -4540,14 +4838,16 @@ recurse:
break;
case Expr::FunctionParmPackExprClass: {
+ NotPrimaryExpr();
// FIXME: not clear how to mangle this!
const FunctionParmPackExpr *FPPE = cast<FunctionParmPackExpr>(E);
Out << "v110_SUBSTPACK";
- mangleDeclRefExpr(FPPE->getParameterPack());
+ MangleDeclRefExpr(FPPE->getParameterPack());
break;
}
case Expr::DependentScopeDeclRefExprClass: {
+ NotPrimaryExpr();
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
mangleUnresolvedName(DRE->getQualifier(), DRE->getDeclName(),
DRE->getTemplateArgs(), DRE->getNumTemplateArgs(),
@@ -4556,24 +4856,27 @@ recurse:
}
case Expr::CXXBindTemporaryExprClass:
- mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
- break;
+ E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+ goto recurse;
case Expr::ExprWithCleanupsClass:
- mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
- break;
+ E = cast<ExprWithCleanups>(E)->getSubExpr();
+ goto recurse;
case Expr::FloatingLiteralClass: {
+ // <expr-primary>
const FloatingLiteral *FL = cast<FloatingLiteral>(E);
mangleFloatLiteral(FL->getType(), FL->getValue());
break;
}
case Expr::FixedPointLiteralClass:
+ // Currently unimplemented -- might be <expr-primary> in future?
mangleFixedPointLiteral();
break;
case Expr::CharacterLiteralClass:
+ // <expr-primary>
Out << 'L';
mangleType(E->getType());
Out << cast<CharacterLiteral>(E)->getValue();
@@ -4582,18 +4885,21 @@ recurse:
// FIXME. __objc_yes/__objc_no are mangled same as true/false
case Expr::ObjCBoolLiteralExprClass:
+ // <expr-primary>
Out << "Lb";
Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
Out << 'E';
break;
case Expr::CXXBoolLiteralExprClass:
+ // <expr-primary>
Out << "Lb";
Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
Out << 'E';
break;
case Expr::IntegerLiteralClass: {
+ // <expr-primary>
llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
if (E->getType()->isSignedIntegerType())
Value.setIsSigned(true);
@@ -4602,6 +4908,7 @@ recurse:
}
case Expr::ImaginaryLiteralClass: {
+ // <expr-primary>
const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
// Mangle as if a complex literal.
// Proposal from David Vandevoorde, 2010.06.30.
@@ -4625,6 +4932,7 @@ recurse:
}
case Expr::StringLiteralClass: {
+ // <expr-primary>
// Revised proposal from David Vandervoorde, 2010.07.15.
Out << 'L';
assert(isa<ConstantArrayType>(E->getType()));
@@ -4634,21 +4942,25 @@ recurse:
}
case Expr::GNUNullExprClass:
+ // <expr-primary>
// Mangle as if an integer literal 0.
mangleIntegerLiteral(E->getType(), llvm::APSInt(32));
break;
case Expr::CXXNullPtrLiteralExprClass: {
+ // <expr-primary>
Out << "LDnE";
break;
}
case Expr::PackExpansionExprClass:
+ NotPrimaryExpr();
Out << "sp";
mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
break;
case Expr::SizeOfPackExprClass: {
+ NotPrimaryExpr();
auto *SPE = cast<SizeOfPackExpr>(E);
if (SPE->isPartiallySubstituted()) {
Out << "sP";
@@ -4673,12 +4985,12 @@ recurse:
break;
}
- case Expr::MaterializeTemporaryExprClass: {
- mangleExpression(cast<MaterializeTemporaryExpr>(E)->getSubExpr());
- break;
- }
+ case Expr::MaterializeTemporaryExprClass:
+ E = cast<MaterializeTemporaryExpr>(E)->getSubExpr();
+ goto recurse;
case Expr::CXXFoldExprClass: {
+ NotPrimaryExpr();
auto *FE = cast<CXXFoldExpr>(E);
if (FE->isLeftFold())
Out << (FE->getInit() ? "fL" : "fl");
@@ -4700,27 +5012,44 @@ recurse:
}
case Expr::CXXThisExprClass:
+ NotPrimaryExpr();
Out << "fpT";
break;
case Expr::CoawaitExprClass:
// FIXME: Propose a non-vendor mangling.
+ NotPrimaryExpr();
Out << "v18co_await";
mangleExpression(cast<CoawaitExpr>(E)->getOperand());
break;
case Expr::DependentCoawaitExprClass:
// FIXME: Propose a non-vendor mangling.
+ NotPrimaryExpr();
Out << "v18co_await";
mangleExpression(cast<DependentCoawaitExpr>(E)->getOperand());
break;
case Expr::CoyieldExprClass:
// FIXME: Propose a non-vendor mangling.
+ NotPrimaryExpr();
Out << "v18co_yield";
mangleExpression(cast<CoawaitExpr>(E)->getOperand());
break;
+ case Expr::SYCLUniqueStableNameExprClass: {
+ const auto *USN = cast<SYCLUniqueStableNameExpr>(E);
+ NotPrimaryExpr();
+
+ Out << "u33__builtin_sycl_unique_stable_name";
+ mangleType(USN->getTypeSourceInfo()->getType());
+
+ Out << "E";
+ break;
+ }
}
+
+ if (AsTemplateArg && !IsPrimaryExpr)
+ Out << 'E';
}
/// Mangle an expression which refers to a parameter variable.
@@ -4970,26 +5299,9 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
Out << "Dp";
mangleType(A.getAsTemplateOrTemplatePattern());
break;
- case TemplateArgument::Expression: {
- // It's possible to end up with a DeclRefExpr here in certain
- // dependent cases, in which case we should mangle as a
- // declaration.
- const Expr *E = A.getAsExpr()->IgnoreParenImpCasts();
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
- const ValueDecl *D = DRE->getDecl();
- if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
- Out << 'L';
- mangle(D);
- Out << 'E';
- break;
- }
- }
-
- Out << 'X';
- mangleExpression(E);
- Out << 'E';
+ case TemplateArgument::Expression:
+ mangleTemplateArgExpr(A.getAsExpr());
break;
- }
case TemplateArgument::Integral:
mangleIntegerLiteral(A.getIntegralType(), A.getAsIntegral());
break;
@@ -5044,6 +5356,38 @@ void CXXNameMangler::mangleTemplateArg(TemplateArgument A, bool NeedExactType) {
}
}
+void CXXNameMangler::mangleTemplateArgExpr(const Expr *E) {
+ ASTContext &Ctx = Context.getASTContext();
+ if (Ctx.getLangOpts().getClangABICompat() > LangOptions::ClangABI::Ver11) {
+ mangleExpression(E, UnknownArity, /*AsTemplateArg=*/true);
+ return;
+ }
+
+ // Prior to Clang 12, we didn't omit the X .. E around <expr-primary>
+ // correctly in cases where the template argument was
+ // constructed from an expression rather than an already-evaluated
+ // literal. In such a case, we would then e.g. emit 'XLi0EE' instead of
+ // 'Li0E'.
+ //
+ // We did special-case DeclRefExpr to attempt to DTRT for that one
+ // expression-kind, but while doing so, unfortunately handled ParmVarDecl
+ // (subtype of VarDecl) _incorrectly_, and emitted 'L_Z .. E' instead of
+ // the proper 'Xfp_E'.
+ E = E->IgnoreParenImpCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *D = DRE->getDecl();
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
+ Out << 'L';
+ mangle(D);
+ Out << 'E';
+ return;
+ }
+ }
+ Out << 'X';
+ mangleExpression(E);
+ Out << 'E';
+}
+
/// Determine whether a given value is equivalent to zero-initialization for
/// the purpose of discarding a trailing portion of a 'tl' mangling.
///
@@ -6054,7 +6398,17 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
+ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
+ DiagnosticsEngine &Diags) {
+ return new ItaniumMangleContextImpl(
+ Context, Diags,
+ [](ASTContext &, const NamedDecl *) -> llvm::Optional<unsigned> {
+ return llvm::None;
+ });
+}
+
ItaniumMangleContext *
-ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
- return new ItaniumMangleContextImpl(Context, Diags);
+ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags,
+ DiscriminatorOverrideTy DiscriminatorOverride) {
+ return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride);
}
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 7b99546bbe2d..f09f9d38759f 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -59,7 +59,9 @@ void JSONNodeDumper::Visit(const Stmt *S) {
switch (E->getValueKind()) {
case VK_LValue: Category = "lvalue"; break;
case VK_XValue: Category = "xvalue"; break;
- case VK_RValue: Category = "rvalue"; break;
+ case VK_PRValue:
+ Category = "prvalue";
+ break;
}
JOS.attribute("valueCategory", Category);
}
@@ -183,6 +185,35 @@ void JSONNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
attributeOnlyIfTrue("selected", A.isSelected());
}
+void JSONNodeDumper::Visit(const concepts::Requirement *R) {
+ if (!R)
+ return;
+
+ switch (R->getKind()) {
+ case concepts::Requirement::RK_Type:
+ JOS.attribute("kind", "TypeRequirement");
+ break;
+ case concepts::Requirement::RK_Simple:
+ JOS.attribute("kind", "SimpleRequirement");
+ break;
+ case concepts::Requirement::RK_Compound:
+ JOS.attribute("kind", "CompoundRequirement");
+ break;
+ case concepts::Requirement::RK_Nested:
+ JOS.attribute("kind", "NestedRequirement");
+ break;
+ }
+
+ if (auto *ER = dyn_cast<concepts::ExprRequirement>(R))
+ attributeOnlyIfTrue("noexcept", ER->hasNoexceptRequirement());
+
+ attributeOnlyIfTrue("isDependent", R->isDependent());
+ if (!R->isDependent())
+ JOS.attribute("satisfied", R->isSatisfied());
+ attributeOnlyIfTrue("containsUnexpandedPack",
+ R->containsUnexpandedParameterPack());
+}
+
void JSONNodeDumper::Visit(const APValue &Value, QualType Ty) {
std::string Str;
llvm::raw_string_ostream OS(Str);
@@ -711,9 +742,13 @@ void JSONNodeDumper::VisitMemberPointerType(const MemberPointerType *MPT) {
void JSONNodeDumper::VisitNamedDecl(const NamedDecl *ND) {
if (ND && ND->getDeclName()) {
JOS.attribute("name", ND->getNameAsString());
- std::string MangledName = ASTNameGen.getName(ND);
- if (!MangledName.empty())
- JOS.attribute("mangledName", MangledName);
+ // FIXME: There are likely other contexts in which it makes no sense to ask
+ // for a mangled name.
+ if (!isa<RequiresExprBodyDecl>(ND->getDeclContext())) {
+ std::string MangledName = ASTNameGen.getName(ND);
+ if (!MangledName.empty())
+ JOS.attribute("mangledName", MangledName);
+ }
}
}
@@ -756,6 +791,10 @@ void JSONNodeDumper::VisitUsingDecl(const UsingDecl *UD) {
JOS.attribute("name", Name);
}
+void JSONNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *UED) {
+ JOS.attribute("target", createBareDeclRef(UED->getEnumDecl()));
+}
+
void JSONNodeDumper::VisitUsingShadowDecl(const UsingShadowDecl *USD) {
JOS.attribute("target", createBareDeclRef(USD->getTargetDecl()));
}
@@ -887,9 +926,10 @@ void JSONNodeDumper::VisitTemplateTemplateParmDecl(
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
+ const auto *InheritedFrom = D->getDefaultArgStorage().getInheritedFrom();
Visit(D->getDefaultArgument().getArgument(),
- D->getDefaultArgStorage().getInheritedFrom()->getSourceRange(),
- D->getDefaultArgStorage().getInheritedFrom(),
+ InheritedFrom ? InheritedFrom->getSourceRange() : SourceLocation{},
+ InheritedFrom,
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
}
@@ -1163,6 +1203,12 @@ void JSONNodeDumper::VisitDeclRefExpr(const DeclRefExpr *DRE) {
}
}
+void JSONNodeDumper::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *E) {
+ JOS.attribute("typeSourceInfo",
+ createQualType(E->getTypeSourceInfo()->getType()));
+}
+
void JSONNodeDumper::VisitPredefinedExpr(const PredefinedExpr *PE) {
JOS.attribute("name", PredefinedExpr::getIdentKindName(PE->getIdentKind()));
}
@@ -1402,10 +1448,16 @@ void JSONNodeDumper::VisitCXXDependentScopeMemberExpr(
}
}
+void JSONNodeDumper::VisitRequiresExpr(const RequiresExpr *RE) {
+ if (!RE->isValueDependent())
+ JOS.attribute("satisfied", RE->isSatisfied());
+}
+
void JSONNodeDumper::VisitIntegerLiteral(const IntegerLiteral *IL) {
- JOS.attribute("value",
- IL->getValue().toString(
- /*Radix=*/10, IL->getType()->isSignedIntegerType()));
+ llvm::SmallString<16> Buffer;
+ IL->getValue().toString(Buffer,
+ /*Radix=*/10, IL->getType()->isSignedIntegerType());
+ JOS.attribute("value", Buffer);
}
void JSONNodeDumper::VisitCharacterLiteral(const CharacterLiteral *CL) {
// FIXME: This should probably print the character literal as a string,
@@ -1450,6 +1502,7 @@ void JSONNodeDumper::VisitCaseStmt(const CaseStmt *CS) {
void JSONNodeDumper::VisitLabelStmt(const LabelStmt *LS) {
JOS.attribute("name", LS->getName());
JOS.attribute("declId", createPointerRepresentation(LS->getDecl()));
+ attributeOnlyIfTrue("sideEntry", LS->isSideEntry());
}
void JSONNodeDumper::VisitGotoStmt(const GotoStmt *GS) {
JOS.attribute("targetLabelDeclId",
diff --git a/clang/lib/AST/Mangle.cpp b/clang/lib/AST/Mangle.cpp
index 3282fcbd584f..54dbf484f377 100644
--- a/clang/lib/AST/Mangle.cpp
+++ b/clang/lib/AST/Mangle.cpp
@@ -116,6 +116,12 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
if (!D->hasExternalFormalLinkage() && D->getOwningModuleForLinkage())
return true;
+ // C functions with internal linkage have to be mangled with option
+ // -funique-internal-linkage-names.
+ if (!getASTContext().getLangOpts().CPlusPlus &&
+ isUniqueInternalLinkageDecl(D))
+ return true;
+
// In C, functions with no attributes never need to be mangled. Fastpath them.
if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
return false;
@@ -133,7 +139,9 @@ bool MangleContext::shouldMangleDeclName(const NamedDecl *D) {
}
void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
+ const ASTContext &ASTContext = getASTContext();
const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
+
// Any decl can be declared with __asm("foo") on it, and this takes precedence
// over all other naming in the .o file.
if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
@@ -151,9 +159,16 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
// tricks normally used for producing aliases (PR9177). Fortunately the
// llvm mangler on ELF is a nop, so we can just avoid adding the \01
// marker.
+ StringRef UserLabelPrefix =
+ getASTContext().getTargetInfo().getUserLabelPrefix();
+#ifndef NDEBUG
char GlobalPrefix =
- getASTContext().getTargetInfo().getDataLayout().getGlobalPrefix();
- if (GlobalPrefix)
+ llvm::DataLayout(getASTContext().getTargetInfo().getDataLayoutString())
+ .getGlobalPrefix();
+ assert((UserLabelPrefix.empty() && !GlobalPrefix) ||
+ (UserLabelPrefix.size() == 1 && UserLabelPrefix[0] == GlobalPrefix));
+#endif
+ if (!UserLabelPrefix.empty())
Out << '\01'; // LLVM IR Marker for __asm("foo")
Out << ALA->getLabel();
@@ -163,7 +178,6 @@ void MangleContext::mangleName(GlobalDecl GD, raw_ostream &Out) {
if (auto *GD = dyn_cast<MSGuidDecl>(D))
return mangleMSGuidDecl(GD, Out);
- const ASTContext &ASTContext = getASTContext();
CCMangling CC = getCallingConvMangling(ASTContext, D);
if (CC == CCM_WasmMainArgcArgv) {
@@ -377,8 +391,8 @@ class ASTNameGenerator::Implementation {
public:
explicit Implementation(ASTContext &Ctx)
- : MC(Ctx.createMangleContext()), DL(Ctx.getTargetInfo().getDataLayout()) {
- }
+ : MC(Ctx.createMangleContext()),
+ DL(Ctx.getTargetInfo().getDataLayoutString()) {}
bool writeName(const Decl *D, raw_ostream &OS) {
// First apply frontend mangling.
diff --git a/clang/lib/AST/MicrosoftCXXABI.cpp b/clang/lib/AST/MicrosoftCXXABI.cpp
index f9f9fe985b6f..166aa3b3bd60 100644
--- a/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/Attr.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Mangle.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/Type.h"
@@ -64,6 +65,19 @@ public:
}
};
+class MSHIPNumberingContext : public MicrosoftNumberingContext {
+ std::unique_ptr<MangleNumberingContext> DeviceCtx;
+
+public:
+ MSHIPNumberingContext(MangleContext *DeviceMangler) {
+ DeviceCtx = createItaniumNumberingContext(DeviceMangler);
+ }
+
+ unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override {
+ return DeviceCtx->getManglingNumber(CallOperator);
+ }
+};
+
class MicrosoftCXXABI : public CXXABI {
ASTContext &Context;
llvm::SmallDenseMap<CXXRecordDecl *, CXXConstructorDecl *> RecordToCopyCtor;
@@ -73,8 +87,20 @@ class MicrosoftCXXABI : public CXXABI {
llvm::SmallDenseMap<TagDecl *, TypedefNameDecl *>
UnnamedTagDeclToTypedefNameDecl;
+ // MangleContext for device numbering context, which is based on Itanium C++
+ // ABI.
+ std::unique_ptr<MangleContext> DeviceMangler;
+
public:
- MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+ MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) {
+ if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
+ assert(Context.getTargetInfo().getCXXABI().isMicrosoft() &&
+ Context.getAuxTargetInfo()->getCXXABI().isItaniumFamily() &&
+ "Unexpected combination of C++ ABIs.");
+ DeviceMangler.reset(
+ Context.createMangleContext(Context.getAuxTargetInfo()));
+ }
+ }
MemberPointerInfo
getMemberPointerInfo(const MemberPointerType *MPT) const override;
@@ -133,6 +159,10 @@ public:
std::unique_ptr<MangleNumberingContext>
createMangleNumberingContext() const override {
+ if (Context.getLangOpts().CUDA && Context.getAuxTargetInfo()) {
+ assert(DeviceMangler && "Missing device mangler");
+ return std::make_unique<MSHIPNumberingContext>(DeviceMangler.get());
+ }
return std::make_unique<MicrosoftNumberingContext>();
}
};
@@ -266,4 +296,3 @@ CXXABI::MemberPointerInfo MicrosoftCXXABI::getMemberPointerInfo(
CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) {
return new MicrosoftCXXABI(Ctx);
}
-
diff --git a/clang/lib/AST/MicrosoftMangle.cpp b/clang/lib/AST/MicrosoftMangle.cpp
index df6c566abc7d..d89cddd2adda 100644
--- a/clang/lib/AST/MicrosoftMangle.cpp
+++ b/clang/lib/AST/MicrosoftMangle.cpp
@@ -228,6 +228,34 @@ public:
return true;
}
+ std::string getLambdaString(const CXXRecordDecl *Lambda) override {
+ assert(Lambda->isLambda() && "RD must be a lambda!");
+ std::string Name("<lambda_");
+
+ Decl *LambdaContextDecl = Lambda->getLambdaContextDecl();
+ unsigned LambdaManglingNumber = Lambda->getLambdaManglingNumber();
+ unsigned LambdaId;
+ const ParmVarDecl *Parm = dyn_cast_or_null<ParmVarDecl>(LambdaContextDecl);
+ const FunctionDecl *Func =
+ Parm ? dyn_cast<FunctionDecl>(Parm->getDeclContext()) : nullptr;
+
+ if (Func) {
+ unsigned DefaultArgNo =
+ Func->getNumParams() - Parm->getFunctionScopeIndex();
+ Name += llvm::utostr(DefaultArgNo);
+ Name += "_";
+ }
+
+ if (LambdaManglingNumber)
+ LambdaId = LambdaManglingNumber;
+ else
+ LambdaId = getLambdaIdForDebugInfo(Lambda);
+
+ Name += llvm::utostr(LambdaId);
+ Name += ">";
+ return Name;
+ }
+
unsigned getLambdaId(const CXXRecordDecl *RD) {
assert(RD->isLambda() && "RD must be a lambda!");
assert(!RD->isExternallyVisible() && "RD must not be visible!");
@@ -238,6 +266,19 @@ public:
return Result.first->second;
}
+ unsigned getLambdaIdForDebugInfo(const CXXRecordDecl *RD) {
+ assert(RD->isLambda() && "RD must be a lambda!");
+ assert(!RD->isExternallyVisible() && "RD must not be visible!");
+ assert(RD->getLambdaManglingNumber() == 0 &&
+ "RD must not have a mangling number!");
+ llvm::DenseMap<const CXXRecordDecl *, unsigned>::iterator Result =
+ LambdaIds.find(RD);
+ // The lambda should exist, but return 0 in case it doesn't.
+ if (Result == LambdaIds.end())
+ return 0;
+ return Result->second;
+ }
+
/// Return a character sequence that is (somewhat) unique to the TU suitable
/// for mangling anonymous namespaces.
StringRef getAnonymousNamespaceHash() const {
@@ -2398,6 +2439,8 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -2684,6 +2727,9 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
// ::= I # __fastcall
// ::= J # __export __fastcall
// ::= Q # __vectorcall
+ // ::= S # __attribute__((__swiftcall__)) // Clang-only
+ // ::= T # __attribute__((__swiftasynccall__))
+ // // Clang-only
// ::= w # __regcall
// The 'export' calling conventions are from a bygone era
// (*cough*Win16*cough*) when functions were declared for export with
@@ -2703,6 +2749,7 @@ void MicrosoftCXXNameMangler::mangleCallingConvention(CallingConv CC) {
case CC_X86FastCall: Out << 'I'; break;
case CC_X86VectorCall: Out << 'Q'; break;
case CC_Swift: Out << 'S'; break;
+ case CC_SwiftAsync: Out << 'W'; break;
case CC_PreserveMost: Out << 'U'; break;
case CC_X86RegCall: Out << 'w'; break;
}
@@ -3632,7 +3679,7 @@ void MicrosoftMangleContextImpl::mangleCXXRTTICompleteObjectLocator(
assert(VFTableMangling.startswith("??_7") ||
VFTableMangling.startswith("??_S"));
- Out << "??_R4" << StringRef(VFTableMangling).drop_front(4);
+ Out << "??_R4" << VFTableMangling.str().drop_front(4);
}
void MicrosoftMangleContextImpl::mangleSEHFilterExpression(
diff --git a/clang/lib/AST/NSAPI.cpp b/clang/lib/AST/NSAPI.cpp
index cf4b42d25148..861060d7c875 100644
--- a/clang/lib/AST/NSAPI.cpp
+++ b/clang/lib/AST/NSAPI.cpp
@@ -477,6 +477,8 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
case BuiltinType::Overload:
diff --git a/clang/lib/AST/NestedNameSpecifier.cpp b/clang/lib/AST/NestedNameSpecifier.cpp
index 08e8819a4d69..21afdd1570f4 100644
--- a/clang/lib/AST/NestedNameSpecifier.cpp
+++ b/clang/lib/AST/NestedNameSpecifier.cpp
@@ -288,8 +288,9 @@ void NestedNameSpecifier::print(raw_ostream &OS, const PrintingPolicy &Policy,
if (ResolveTemplateArguments && Record) {
// Print the type trait with resolved template parameters.
Record->printName(OS);
- printTemplateArgumentList(OS, Record->getTemplateArgs().asArray(),
- Policy);
+ printTemplateArgumentList(
+ OS, Record->getTemplateArgs().asArray(), Policy,
+ Record->getSpecializedTemplate()->getTemplateParameters());
break;
}
const Type *T = getAsType();
@@ -355,7 +356,7 @@ NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
assert(Qualifier && "Expected a non-NULL qualifier");
// Location of the trailing '::'.
- unsigned Length = sizeof(unsigned);
+ unsigned Length = sizeof(SourceLocation::UIntTy);
switch (Qualifier->getKind()) {
case NestedNameSpecifier::Global:
@@ -367,7 +368,7 @@ NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Super:
// The location of the identifier or namespace name.
- Length += sizeof(unsigned);
+ Length += sizeof(SourceLocation::UIntTy);
break;
case NestedNameSpecifier::TypeSpecWithTemplate:
@@ -392,8 +393,8 @@ NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
/// Load a (possibly unaligned) source location from a given address
/// and offset.
static SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
- unsigned Raw;
- memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned));
+ SourceLocation::UIntTy Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(Raw));
return SourceLocation::getFromRawEncoding(Raw);
}
@@ -430,8 +431,9 @@ SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
case NestedNameSpecifier::Namespace:
case NestedNameSpecifier::NamespaceAlias:
case NestedNameSpecifier::Super:
- return SourceRange(LoadSourceLocation(Data, Offset),
- LoadSourceLocation(Data, Offset + sizeof(unsigned)));
+ return SourceRange(
+ LoadSourceLocation(Data, Offset),
+ LoadSourceLocation(Data, Offset + sizeof(SourceLocation::UIntTy)));
case NestedNameSpecifier::TypeSpecWithTemplate:
case NestedNameSpecifier::TypeSpec: {
@@ -486,10 +488,10 @@ static void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
/// Save a source location to the given buffer.
static void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
unsigned &BufferSize, unsigned &BufferCapacity) {
- unsigned Raw = Loc.getRawEncoding();
+ SourceLocation::UIntTy Raw = Loc.getRawEncoding();
Append(reinterpret_cast<char *>(&Raw),
- reinterpret_cast<char *>(&Raw) + sizeof(unsigned),
- Buffer, BufferSize, BufferCapacity);
+ reinterpret_cast<char *>(&Raw) + sizeof(Raw), Buffer, BufferSize,
+ BufferCapacity);
}
/// Save a pointer to the given buffer.
diff --git a/clang/lib/AST/OpenMPClause.cpp b/clang/lib/AST/OpenMPClause.cpp
index cab5db6244b6..50f40395a197 100644
--- a/clang/lib/AST/OpenMPClause.cpp
+++ b/clang/lib/AST/OpenMPClause.cpp
@@ -96,10 +96,17 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
return static_cast<const OMPFinalClause *>(C);
case OMPC_priority:
return static_cast<const OMPPriorityClause *>(C);
+ case OMPC_novariants:
+ return static_cast<const OMPNovariantsClause *>(C);
+ case OMPC_nocontext:
+ return static_cast<const OMPNocontextClause *>(C);
+ case OMPC_filter:
+ return static_cast<const OMPFilterClause *>(C);
case OMPC_default:
case OMPC_proc_bind:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_allocate:
case OMPC_collapse:
@@ -188,6 +195,7 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_allocate:
case OMPC_collapse:
@@ -242,6 +250,8 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_nontemporal:
case OMPC_order:
case OMPC_destroy:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
@@ -298,6 +308,18 @@ OMPClause::child_range OMPPriorityClause::used_children() {
return child_range(&Priority, &Priority + 1);
}
+OMPClause::child_range OMPNovariantsClause::used_children() {
+ if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
+ return child_range(C, C + 1);
+ return child_range(&Condition, &Condition + 1);
+}
+
+OMPClause::child_range OMPNocontextClause::used_children() {
+ if (Stmt **C = getAddrOfExprAsWritten(getPreInitStmt()))
+ return child_range(C, C + 1);
+ return child_range(&Condition, &Condition + 1);
+}
+
OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num,
unsigned NumLoops,
SourceLocation StartLoc,
@@ -901,6 +923,55 @@ OMPInReductionClause *OMPInReductionClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPInReductionClause(N);
}
+OMPSizesClause *OMPSizesClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ ArrayRef<Expr *> Sizes) {
+ OMPSizesClause *Clause = CreateEmpty(C, Sizes.size());
+ Clause->setLocStart(StartLoc);
+ Clause->setLParenLoc(LParenLoc);
+ Clause->setLocEnd(EndLoc);
+ Clause->setSizesRefs(Sizes);
+ return Clause;
+}
+
+OMPSizesClause *OMPSizesClause::CreateEmpty(const ASTContext &C,
+ unsigned NumSizes) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumSizes));
+ return new (Mem) OMPSizesClause(NumSizes);
+}
+
+OMPFullClause *OMPFullClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ OMPFullClause *Clause = CreateEmpty(C);
+ Clause->setLocStart(StartLoc);
+ Clause->setLocEnd(EndLoc);
+ return Clause;
+}
+
+OMPFullClause *OMPFullClause::CreateEmpty(const ASTContext &C) {
+ return new (C) OMPFullClause();
+}
+
+OMPPartialClause *OMPPartialClause::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc,
+ Expr *Factor) {
+ OMPPartialClause *Clause = CreateEmpty(C);
+ Clause->setLocStart(StartLoc);
+ Clause->setLParenLoc(LParenLoc);
+ Clause->setLocEnd(EndLoc);
+ Clause->setFactor(Factor);
+ return Clause;
+}
+
+OMPPartialClause *OMPPartialClause::CreateEmpty(const ASTContext &C) {
+ return new (C) OMPPartialClause();
+}
+
OMPAllocateClause *
OMPAllocateClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, Expr *Allocator,
@@ -1492,6 +1563,27 @@ OMPAffinityClause *OMPAffinityClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPAffinityClause(N);
}
+OMPInitClause *OMPInitClause::Create(const ASTContext &C, Expr *InteropVar,
+ ArrayRef<Expr *> PrefExprs, bool IsTarget,
+ bool IsTargetSync, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
+
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(PrefExprs.size() + 1));
+ auto *Clause =
+ new (Mem) OMPInitClause(IsTarget, IsTargetSync, StartLoc, LParenLoc,
+ VarLoc, EndLoc, PrefExprs.size() + 1);
+ Clause->setInteropVar(InteropVar);
+ llvm::copy(PrefExprs, Clause->getTrailingObjects<Expr *>() + 1);
+ return Clause;
+}
+
+OMPInitClause *OMPInitClause::CreateEmpty(const ASTContext &C, unsigned N) {
+ void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
+ return new (Mem) OMPInitClause(N);
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clauses printing methods
//===----------------------------------------------------------------------===//
@@ -1528,6 +1620,30 @@ void OMPClausePrinter::VisitOMPSimdlenClause(OMPSimdlenClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPSizesClause(OMPSizesClause *Node) {
+ OS << "sizes(";
+ bool First = true;
+ for (auto Size : Node->getSizesRefs()) {
+ if (!First)
+ OS << ", ";
+ Size->printPretty(OS, nullptr, Policy, 0);
+ First = false;
+ }
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPFullClause(OMPFullClause *Node) { OS << "full"; }
+
+void OMPClausePrinter::VisitOMPPartialClause(OMPPartialClause *Node) {
+ OS << "partial";
+
+ if (Expr *Factor = Node->getFactor()) {
+ OS << '(';
+ Factor->printPretty(OS, nullptr, Policy, 0);
+ OS << ')';
+ }
+}
+
void OMPClausePrinter::VisitOMPAllocatorClause(OMPAllocatorClause *Node) {
OS << "allocator(";
Node->getAllocator()->printPretty(OS, nullptr, Policy, 0);
@@ -1722,8 +1838,62 @@ void OMPClausePrinter::VisitOMPHintClause(OMPHintClause *Node) {
OS << ")";
}
-void OMPClausePrinter::VisitOMPDestroyClause(OMPDestroyClause *) {
+void OMPClausePrinter::VisitOMPInitClause(OMPInitClause *Node) {
+ OS << "init(";
+ bool First = true;
+ for (const Expr *E : Node->prefs()) {
+ if (First)
+ OS << "prefer_type(";
+ else
+ OS << ",";
+ E->printPretty(OS, nullptr, Policy);
+ First = false;
+ }
+ if (!First)
+ OS << "), ";
+ if (Node->getIsTarget())
+ OS << "target";
+ if (Node->getIsTargetSync()) {
+ if (Node->getIsTarget())
+ OS << ", ";
+ OS << "targetsync";
+ }
+ OS << " : ";
+ Node->getInteropVar()->printPretty(OS, nullptr, Policy);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPUseClause(OMPUseClause *Node) {
+ OS << "use(";
+ Node->getInteropVar()->printPretty(OS, nullptr, Policy);
+ OS << ")";
+}
+
+void OMPClausePrinter::VisitOMPDestroyClause(OMPDestroyClause *Node) {
OS << "destroy";
+ if (Expr *E = Node->getInteropVar()) {
+ OS << "(";
+ E->printPretty(OS, nullptr, Policy);
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPNovariantsClause(OMPNovariantsClause *Node) {
+ OS << "novariants";
+ if (Expr *E = Node->getCondition()) {
+ OS << "(";
+ E->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
+}
+
+void OMPClausePrinter::VisitOMPNocontextClause(OMPNocontextClause *Node) {
+ OS << "nocontext";
+ if (Expr *E = Node->getCondition()) {
+ OS << "(";
+ E->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+ }
}
template<typename T>
@@ -2119,6 +2289,12 @@ void OMPClausePrinter::VisitOMPAffinityClause(OMPAffinityClause *Node) {
OS << ")";
}
+void OMPClausePrinter::VisitOMPFilterClause(OMPFilterClause *Node) {
+ OS << "filter(";
+ Node->getThreadID()->printPretty(OS, nullptr, Policy, 0);
+ OS << ")";
+}
+
void OMPTraitInfo::getAsVariantMatchInfo(ASTContext &ASTCtx,
VariantMatchInfo &VMI) const {
for (const OMPTraitSet &Set : Sets) {
diff --git a/clang/lib/AST/ParentMapContext.cpp b/clang/lib/AST/ParentMapContext.cpp
index cb4995312efa..4a3e0a99c8a6 100644
--- a/clang/lib/AST/ParentMapContext.cpp
+++ b/clang/lib/AST/ParentMapContext.cpp
@@ -49,7 +49,17 @@ DynTypedNode ParentMapContext::traverseIgnored(const DynTypedNode &N) const {
return N;
}
+template <typename T, typename... U>
+std::tuple<bool, DynTypedNodeList, const T *, const U *...>
+matchParents(const DynTypedNodeList &NodeList,
+ ParentMapContext::ParentMap *ParentMap);
+
+template <typename, typename...> struct MatchParents;
+
class ParentMapContext::ParentMap {
+
+ template <typename, typename...> friend struct ::MatchParents;
+
/// Contains parents of a node.
using ParentVector = llvm::SmallVector<DynTypedNode, 2>;
@@ -117,11 +127,72 @@ public:
if (Node.getNodeKind().hasPointerIdentity()) {
auto ParentList =
getDynNodeFromMap(Node.getMemoizationData(), PointerParents);
- if (ParentList.size() == 1 && TK == TK_IgnoreUnlessSpelledInSource) {
- const auto *E = ParentList[0].get<Expr>();
- const auto *Child = Node.get<Expr>();
- if (E && Child)
- return AscendIgnoreUnlessSpelledInSource(E, Child);
+ if (ParentList.size() > 0 && TK == TK_IgnoreUnlessSpelledInSource) {
+
+ const auto *ChildExpr = Node.get<Expr>();
+
+ {
+ // Don't match explicit node types because different stdlib
+ // implementations implement this in different ways and have
+ // different intermediate nodes.
+ // Look up 4 levels for a cxxRewrittenBinaryOperator as that is
+ // enough for the major stdlib implementations.
+ auto RewrittenBinOpParentsList = ParentList;
+ int I = 0;
+ while (ChildExpr && RewrittenBinOpParentsList.size() == 1 &&
+ I++ < 4) {
+ const auto *S = RewrittenBinOpParentsList[0].get<Stmt>();
+ if (!S)
+ break;
+
+ const auto *RWBO = dyn_cast<CXXRewrittenBinaryOperator>(S);
+ if (!RWBO) {
+ RewrittenBinOpParentsList = getDynNodeFromMap(S, PointerParents);
+ continue;
+ }
+ if (RWBO->getLHS()->IgnoreUnlessSpelledInSource() != ChildExpr &&
+ RWBO->getRHS()->IgnoreUnlessSpelledInSource() != ChildExpr)
+ break;
+ return DynTypedNode::create(*RWBO);
+ }
+ }
+
+ const auto *ParentExpr = ParentList[0].get<Expr>();
+ if (ParentExpr && ChildExpr)
+ return AscendIgnoreUnlessSpelledInSource(ParentExpr, ChildExpr);
+
+ {
+ auto AncestorNodes =
+ matchParents<DeclStmt, CXXForRangeStmt>(ParentList, this);
+ if (std::get<bool>(AncestorNodes) &&
+ std::get<const CXXForRangeStmt *>(AncestorNodes)
+ ->getLoopVarStmt() ==
+ std::get<const DeclStmt *>(AncestorNodes))
+ return std::get<DynTypedNodeList>(AncestorNodes);
+ }
+ {
+ auto AncestorNodes = matchParents<VarDecl, DeclStmt, CXXForRangeStmt>(
+ ParentList, this);
+ if (std::get<bool>(AncestorNodes) &&
+ std::get<const CXXForRangeStmt *>(AncestorNodes)
+ ->getRangeStmt() ==
+ std::get<const DeclStmt *>(AncestorNodes))
+ return std::get<DynTypedNodeList>(AncestorNodes);
+ }
+ {
+ auto AncestorNodes =
+ matchParents<CXXMethodDecl, CXXRecordDecl, LambdaExpr>(ParentList,
+ this);
+ if (std::get<bool>(AncestorNodes))
+ return std::get<DynTypedNodeList>(AncestorNodes);
+ }
+ {
+ auto AncestorNodes =
+ matchParents<FunctionTemplateDecl, CXXRecordDecl, LambdaExpr>(
+ ParentList, this);
+ if (std::get<bool>(AncestorNodes))
+ return std::get<DynTypedNodeList>(AncestorNodes);
+ }
}
return ParentList;
}
@@ -194,6 +265,59 @@ public:
}
};
+template <typename Tuple, std::size_t... Is>
+auto tuple_pop_front_impl(const Tuple &tuple, std::index_sequence<Is...>) {
+ return std::make_tuple(std::get<1 + Is>(tuple)...);
+}
+
+template <typename Tuple> auto tuple_pop_front(const Tuple &tuple) {
+ return tuple_pop_front_impl(
+ tuple, std::make_index_sequence<std::tuple_size<Tuple>::value - 1>());
+}
+
+template <typename T, typename... U> struct MatchParents {
+ static std::tuple<bool, DynTypedNodeList, const T *, const U *...>
+ match(const DynTypedNodeList &NodeList,
+ ParentMapContext::ParentMap *ParentMap) {
+ if (const auto *TypedNode = NodeList[0].get<T>()) {
+ auto NextParentList =
+ ParentMap->getDynNodeFromMap(TypedNode, ParentMap->PointerParents);
+ if (NextParentList.size() == 1) {
+ auto TailTuple = MatchParents<U...>::match(NextParentList, ParentMap);
+ if (std::get<bool>(TailTuple)) {
+ return std::tuple_cat(
+ std::make_tuple(true, std::get<DynTypedNodeList>(TailTuple),
+ TypedNode),
+ tuple_pop_front(tuple_pop_front(TailTuple)));
+ }
+ }
+ }
+ return std::tuple_cat(std::make_tuple(false, NodeList),
+ std::tuple<const T *, const U *...>());
+ }
+};
+
+template <typename T> struct MatchParents<T> {
+ static std::tuple<bool, DynTypedNodeList, const T *>
+ match(const DynTypedNodeList &NodeList,
+ ParentMapContext::ParentMap *ParentMap) {
+ if (const auto *TypedNode = NodeList[0].get<T>()) {
+ auto NextParentList =
+ ParentMap->getDynNodeFromMap(TypedNode, ParentMap->PointerParents);
+ if (NextParentList.size() == 1)
+ return std::make_tuple(true, NodeList, TypedNode);
+ }
+ return std::make_tuple(false, NodeList, nullptr);
+ }
+};
+
+template <typename T, typename... U>
+std::tuple<bool, DynTypedNodeList, const T *, const U *...>
+matchParents(const DynTypedNodeList &NodeList,
+ ParentMapContext::ParentMap *ParentMap) {
+ return MatchParents<T, U...>::match(NodeList, ParentMap);
+}
+
/// Template specializations to abstract away from pointers and TypeLocs.
/// @{
template <typename T> static DynTypedNode createDynTypedNode(const T &Node) {
diff --git a/clang/lib/AST/PrintfFormatString.cpp b/clang/lib/AST/PrintfFormatString.cpp
index a1abaf2f0943..4806c554a2a1 100644
--- a/clang/lib/AST/PrintfFormatString.cpp
+++ b/clang/lib/AST/PrintfFormatString.cpp
@@ -794,6 +794,8 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
#define SIGNED_TYPE(Id, SingletonId)
#define UNSIGNED_TYPE(Id, SingletonId)
#define FLOATING_TYPE(Id, SingletonId)
diff --git a/clang/lib/AST/QualTypeNames.cpp b/clang/lib/AST/QualTypeNames.cpp
index 73a33a208233..9a1b418f5ac1 100644
--- a/clang/lib/AST/QualTypeNames.cpp
+++ b/clang/lib/AST/QualTypeNames.cpp
@@ -356,11 +356,19 @@ NestedNameSpecifier *createNestedNameSpecifier(const ASTContext &Ctx,
const TypeDecl *TD,
bool FullyQualify,
bool WithGlobalNsPrefix) {
+ const Type *TypePtr = TD->getTypeForDecl();
+ if (isa<const TemplateSpecializationType>(TypePtr) ||
+ isa<const RecordType>(TypePtr)) {
+ // We are asked to fully qualify and we have a Record Type (which
+ // may point to a template specialization) or Template
+ // Specialization Type. We need to fully qualify their arguments.
+
+ TypePtr = getFullyQualifiedTemplateType(Ctx, TypePtr, WithGlobalNsPrefix);
+ }
+
return NestedNameSpecifier::Create(
- Ctx,
- createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix),
- false /*No TemplateKeyword*/,
- TD->getTypeForDecl());
+ Ctx, createOuterNNS(Ctx, TD, FullyQualify, WithGlobalNsPrefix),
+ false /*No TemplateKeyword*/, TypePtr);
}
/// Return the fully qualified type, including fully-qualified
diff --git a/clang/lib/AST/RecordLayoutBuilder.cpp b/clang/lib/AST/RecordLayoutBuilder.cpp
index 95d69fa5b11a..972690becf9e 100644
--- a/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -1528,12 +1528,17 @@ void ItaniumRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
UpdateAlignment(TypeAlign);
}
+static bool isAIXLayout(const ASTContext &Context) {
+ return Context.getTargetInfo().getTriple().getOS() == llvm::Triple::AIX;
+}
+
void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldSize = D->getBitWidthValue(Context);
TypeInfo FieldInfo = Context.getTypeInfo(D->getType());
uint64_t StorageUnitSize = FieldInfo.Width;
unsigned FieldAlign = FieldInfo.Align;
+ bool AlignIsRequired = FieldInfo.AlignIsRequired;
// UnfilledBitsInLastUnit is the difference between the end of the
// last allocated bitfield (i.e. the first bit offset available for
@@ -1611,9 +1616,33 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
}
}
+ if (isAIXLayout(Context)) {
+ if (StorageUnitSize < Context.getTypeSize(Context.UnsignedIntTy)) {
+ // On AIX, [bool, char, short] bitfields have the same alignment
+ // as [unsigned].
+ StorageUnitSize = Context.getTypeSize(Context.UnsignedIntTy);
+ } else if (StorageUnitSize > Context.getTypeSize(Context.UnsignedIntTy) &&
+ Context.getTargetInfo().getTriple().isArch32Bit() &&
+ FieldSize <= 32) {
+ // Under 32-bit compile mode, the bitcontainer is 32 bits if a single
+ // long long bitfield has length no greater than 32 bits.
+ StorageUnitSize = 32;
+
+ if (!AlignIsRequired)
+ FieldAlign = 32;
+ }
+
+ if (FieldAlign < StorageUnitSize) {
+ // The bitfield alignment should always be greater than or equal to
+ // bitcontainer size.
+ FieldAlign = StorageUnitSize;
+ }
+ }
+
// If the field is wider than its declared type, it follows
- // different rules in all cases.
- if (FieldSize > StorageUnitSize) {
+ // different rules in all cases, except on AIX.
+ // On AIX, wide bitfield follows the same rules as normal bitfield.
+ if (FieldSize > StorageUnitSize && !isAIXLayout(Context)) {
LayoutWideBitField(FieldSize, StorageUnitSize, FieldPacked, D);
return;
}
@@ -1627,12 +1656,17 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// Some such targets do honor it on zero-width bitfields.
if (FieldSize == 0 &&
Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
- // The alignment to round up to is the max of the field's natural
- // alignment and a target-specific fixed value (sometimes zero).
- unsigned ZeroLengthBitfieldBoundary =
- Context.getTargetInfo().getZeroLengthBitfieldBoundary();
- FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
-
+ // Some targets don't honor leading zero-width bitfield.
+ if (!IsUnion && FieldOffset == 0 &&
+ !Context.getTargetInfo().useLeadingZeroLengthBitfield())
+ FieldAlign = 1;
+ else {
+ // The alignment to round up to is the max of the field's natural
+ // alignment and a target-specific fixed value (sometimes zero).
+ unsigned ZeroLengthBitfieldBoundary =
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+ FieldAlign = std::max(FieldAlign, ZeroLengthBitfieldBoundary);
+ }
// If that doesn't apply, just ignore the field alignment.
} else {
FieldAlign = 1;
@@ -1741,6 +1775,12 @@ void ItaniumRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
!D->getIdentifier())
FieldAlign = UnpackedFieldAlign = 1;
+ // On AIX, zero-width bitfields pad out to the alignment boundary, but then
+ // do not affect overall record alignment if there is a pragma pack or
+ // pragma align(packed).
+ if (isAIXLayout(Context) && !MaxFieldAlignment.isZero() && !FieldSize)
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+
// Diagnose differences in layout due to padding or packing.
if (!UseExternalLayout)
CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
@@ -2288,7 +2328,8 @@ static const CXXMethodDecl *computeKeyFunction(ASTContext &Context,
// If the key function is dllimport but the class isn't, then the class has
// no key function. The DLL that exports the key function won't export the
// vtable in this case.
- if (MD->hasAttr<DLLImportAttr>() && !RD->hasAttr<DLLImportAttr>())
+ if (MD->hasAttr<DLLImportAttr>() && !RD->hasAttr<DLLImportAttr>() &&
+ !Context.getTargetInfo().hasPS4DLLImportExport())
return nullptr;
// We found it.
@@ -3542,7 +3583,10 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
} else {
PrintOffset(OS, FieldOffset, IndentLevel);
}
- OS << Field.getType().getAsString() << ' ' << Field << '\n';
+ const QualType &FieldType = C.getLangOpts().DumpRecordLayoutsCanonical
+ ? Field.getType().getCanonicalType()
+ : Field.getType();
+ OS << FieldType.getAsString() << ' ' << Field << '\n';
}
// Dump virtual bases.
diff --git a/clang/lib/AST/Stmt.cpp b/clang/lib/AST/Stmt.cpp
index 83821ea6f5fc..47693ef9fee3 100644
--- a/clang/lib/AST/Stmt.cpp
+++ b/clang/lib/AST/Stmt.cpp
@@ -646,6 +646,8 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
continue;
}
+ const TargetInfo &TI = C.getTargetInfo();
+
// Escaped "%" character in asm string.
if (CurPtr == StrEnd) {
// % at end of string is invalid (no escape).
@@ -656,6 +658,11 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
char EscapedChar = *CurPtr++;
switch (EscapedChar) {
default:
+ // Handle target-specific escaped characters.
+ if (auto MaybeReplaceStr = TI.handleAsmEscapedChar(EscapedChar)) {
+ CurStringPiece += *MaybeReplaceStr;
+ continue;
+ }
break;
case '%': // %% -> %
case '{': // %{ -> {
@@ -688,7 +695,6 @@ unsigned GCCAsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
EscapedChar = *CurPtr++;
}
- const TargetInfo &TI = C.getTargetInfo();
const SourceManager &SM = C.getSourceManager();
const LangOptions &LO = C.getLangOpts();
@@ -989,12 +995,20 @@ bool IfStmt::isObjCAvailabilityCheck() const {
return isa<ObjCAvailabilityCheckExpr>(getCond());
}
-Optional<const Stmt*> IfStmt::getNondiscardedCase(const ASTContext &Ctx) const {
+Optional<Stmt *> IfStmt::getNondiscardedCase(const ASTContext &Ctx) {
if (!isConstexpr() || getCond()->isValueDependent())
return None;
return !getCond()->EvaluateKnownConstInt(Ctx) ? getElse() : getThen();
}
+Optional<const Stmt *>
+IfStmt::getNondiscardedCase(const ASTContext &Ctx) const {
+ if (Optional<Stmt *> Result =
+ const_cast<IfStmt *>(this)->getNondiscardedCase(Ctx))
+ return *Result;
+ return None;
+}
+
ForStmt::ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP)
@@ -1266,13 +1280,6 @@ CapturedStmt::Capture::Capture(SourceLocation Loc, VariableCaptureKind Kind,
break;
case VCK_ByCopy:
assert(Var && "capturing by copy must have a variable!");
- assert(
- (Var->getType()->isScalarType() || (Var->getType()->isReferenceType() &&
- Var->getType()
- ->castAs<ReferenceType>()
- ->getPointeeType()
- ->isScalarType())) &&
- "captures by copy are expected to have a scalar type!");
break;
case VCK_VLAType:
assert(!Var &&
diff --git a/clang/lib/AST/StmtOpenMP.cpp b/clang/lib/AST/StmtOpenMP.cpp
index c858261f2387..b0ef2f49ba04 100644
--- a/clang/lib/AST/StmtOpenMP.cpp
+++ b/clang/lib/AST/StmtOpenMP.cpp
@@ -74,8 +74,9 @@ Stmt *OMPExecutableDirective::getStructuredBlock() {
return getRawStmt();
}
-Stmt *OMPLoopDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
- bool TryImperfectlyNestedLoops) {
+Stmt *
+OMPLoopBasedDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
+ bool TryImperfectlyNestedLoops) {
Stmt *OrigStmt = CurStmt;
CurStmt = CurStmt->IgnoreContainers();
// Additional work for imperfectly nested loops, introduced in OpenMP 5.0.
@@ -91,7 +92,10 @@ Stmt *OMPLoopDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
for (Stmt *S : CS->body()) {
if (!S)
continue;
- if (isa<ForStmt>(S) || isa<CXXForRangeStmt>(S)) {
+ if (auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S))
+ S = CanonLoop->getLoopStmt();
+ if (isa<ForStmt>(S) || isa<CXXForRangeStmt>(S) ||
+ (isa<OMPLoopBasedDirective>(S) && !isa<OMPLoopDirective>(S))) {
// Only single loop construct is allowed.
if (CurStmt) {
CurStmt = OrigStmt;
@@ -118,75 +122,133 @@ Stmt *OMPLoopDirective::tryToFindNextInnerLoop(Stmt *CurStmt,
return CurStmt;
}
-Stmt *OMPLoopDirective::getBody() {
- // This relies on the loop form is already checked by Sema.
- Stmt *Body = Data->getRawStmt()->IgnoreContainers();
- if (auto *For = dyn_cast<ForStmt>(Body)) {
- Body = For->getBody();
- } else {
- assert(isa<CXXForRangeStmt>(Body) &&
- "Expected canonical for loop or range-based for loop.");
- Body = cast<CXXForRangeStmt>(Body)->getBody();
- }
- for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
- Body = tryToFindNextInnerLoop(Body, /*TryImperfectlyNestedLoops=*/true);
- if (auto *For = dyn_cast<ForStmt>(Body)) {
- Body = For->getBody();
+bool OMPLoopBasedDirective::doForAllLoops(
+ Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
+ llvm::function_ref<bool(unsigned, Stmt *)> Callback,
+ llvm::function_ref<void(OMPLoopBasedDirective *)>
+ OnTransformationCallback) {
+ CurStmt = CurStmt->IgnoreContainers();
+ for (unsigned Cnt = 0; Cnt < NumLoops; ++Cnt) {
+ while (true) {
+ auto *OrigStmt = CurStmt;
+ if (auto *Dir = dyn_cast<OMPTileDirective>(OrigStmt)) {
+ OnTransformationCallback(Dir);
+ CurStmt = Dir->getTransformedStmt();
+ } else if (auto *Dir = dyn_cast<OMPUnrollDirective>(OrigStmt)) {
+ OnTransformationCallback(Dir);
+ CurStmt = Dir->getTransformedStmt();
+ } else {
+ break;
+ }
+
+ if (!CurStmt) {
+ // May happen if the loop transformation does not result in a generated
+ // loop (such as full unrolling).
+ CurStmt = OrigStmt;
+ break;
+ }
+ }
+ if (auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(CurStmt))
+ CurStmt = CanonLoop->getLoopStmt();
+ if (Callback(Cnt, CurStmt))
+ return false;
+ // Move on to the next nested for loop, or to the loop body.
+ // OpenMP [2.8.1, simd construct, Restrictions]
+ // All loops associated with the construct must be perfectly nested; that
+ // is, there must be no intervening code nor any OpenMP directive between
+ // any two loops.
+ if (auto *For = dyn_cast<ForStmt>(CurStmt)) {
+ CurStmt = For->getBody();
} else {
- assert(isa<CXXForRangeStmt>(Body) &&
- "Expected canonical for loop or range-based for loop.");
- Body = cast<CXXForRangeStmt>(Body)->getBody();
+ assert(isa<CXXForRangeStmt>(CurStmt) &&
+ "Expected canonical for or range-based for loops.");
+ CurStmt = cast<CXXForRangeStmt>(CurStmt)->getBody();
}
+ CurStmt = OMPLoopBasedDirective::tryToFindNextInnerLoop(
+ CurStmt, TryImperfectlyNestedLoops);
}
+ return true;
+}
+
+void OMPLoopBasedDirective::doForAllLoopsBodies(
+ Stmt *CurStmt, bool TryImperfectlyNestedLoops, unsigned NumLoops,
+ llvm::function_ref<void(unsigned, Stmt *, Stmt *)> Callback) {
+ bool Res = OMPLoopBasedDirective::doForAllLoops(
+ CurStmt, TryImperfectlyNestedLoops, NumLoops,
+ [Callback](unsigned Cnt, Stmt *Loop) {
+ Stmt *Body = nullptr;
+ if (auto *For = dyn_cast<ForStmt>(Loop)) {
+ Body = For->getBody();
+ } else {
+ assert(isa<CXXForRangeStmt>(Loop) &&
+ "Expected canonical for or range-based for loops.");
+ Body = cast<CXXForRangeStmt>(Loop)->getBody();
+ }
+ if (auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(Body))
+ Body = CanonLoop->getLoopStmt();
+ Callback(Cnt, Loop, Body);
+ return false;
+ });
+ assert(Res && "Expected only loops");
+ (void)Res;
+}
+
+Stmt *OMPLoopDirective::getBody() {
+ // This relies on the loop form is already checked by Sema.
+ Stmt *Body = nullptr;
+ OMPLoopBasedDirective::doForAllLoopsBodies(
+ Data->getRawStmt(), /*TryImperfectlyNestedLoops=*/true,
+ NumAssociatedLoops,
+ [&Body](unsigned, Stmt *, Stmt *BodyStmt) { Body = BodyStmt; });
return Body;
}
void OMPLoopDirective::setCounters(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of loop counters is not the same as the collapsed number");
llvm::copy(A, getCounters().begin());
}
void OMPLoopDirective::setPrivateCounters(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() && "Number of loop private counters "
- "is not the same as the collapsed "
- "number");
+ assert(A.size() == getLoopsNumber() && "Number of loop private counters "
+ "is not the same as the collapsed "
+ "number");
llvm::copy(A, getPrivateCounters().begin());
}
void OMPLoopDirective::setInits(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of counter inits is not the same as the collapsed number");
llvm::copy(A, getInits().begin());
}
void OMPLoopDirective::setUpdates(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of counter updates is not the same as the collapsed number");
llvm::copy(A, getUpdates().begin());
}
void OMPLoopDirective::setFinals(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of counter finals is not the same as the collapsed number");
llvm::copy(A, getFinals().begin());
}
void OMPLoopDirective::setDependentCounters(ArrayRef<Expr *> A) {
assert(
- A.size() == getCollapsedNumber() &&
+ A.size() == getLoopsNumber() &&
"Number of dependent counters is not the same as the collapsed number");
llvm::copy(A, getDependentCounters().begin());
}
void OMPLoopDirective::setDependentInits(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of dependent inits is not the same as the collapsed number");
llvm::copy(A, getDependentInits().begin());
}
void OMPLoopDirective::setFinalsConditions(ArrayRef<Expr *> A) {
- assert(A.size() == getCollapsedNumber() &&
+ assert(A.size() == getLoopsNumber() &&
"Number of finals conditions is not the same as the collapsed number");
llvm::copy(A, getFinalsConditions().begin());
}
@@ -291,6 +353,46 @@ OMPForDirective *OMPForDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_for) + 1, CollapsedNum);
}
+OMPTileDirective *
+OMPTileDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ unsigned NumLoops, Stmt *AssociatedStmt,
+ Stmt *TransformedStmt, Stmt *PreInits) {
+ OMPTileDirective *Dir = createDirective<OMPTileDirective>(
+ C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc,
+ NumLoops);
+ Dir->setTransformedStmt(TransformedStmt);
+ Dir->setPreInits(PreInits);
+ return Dir;
+}
+
+OMPTileDirective *OMPTileDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned NumLoops) {
+ return createEmptyDirective<OMPTileDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1,
+ SourceLocation(), SourceLocation(), NumLoops);
+}
+
+OMPUnrollDirective *
+OMPUnrollDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Stmt *TransformedStmt,
+ Stmt *PreInits) {
+ auto *Dir = createDirective<OMPUnrollDirective>(
+ C, Clauses, AssociatedStmt, TransformedStmtOffset + 1, StartLoc, EndLoc);
+ Dir->setTransformedStmt(TransformedStmt);
+ Dir->setPreInits(PreInits);
+ return Dir;
+}
+
+OMPUnrollDirective *OMPUnrollDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses) {
+ return createEmptyDirective<OMPUnrollDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, TransformedStmtOffset + 1,
+ SourceLocation(), SourceLocation());
+}
+
OMPForSimdDirective *
OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
@@ -1880,3 +1982,53 @@ OMPTargetTeamsDistributeSimdDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_target_teams_distribute_simd),
CollapsedNum);
}
+
+OMPInteropDirective *
+OMPInteropDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses) {
+ return createDirective<OMPInteropDirective>(
+ C, Clauses, /*AssociatedStmt=*/nullptr, /*NumChildren=*/0, StartLoc,
+ EndLoc);
+}
+
+OMPInteropDirective *OMPInteropDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPInteropDirective>(C, NumClauses);
+}
+
+OMPDispatchDirective *OMPDispatchDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ SourceLocation TargetCallLoc) {
+ auto *Dir = createDirective<OMPDispatchDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/0, StartLoc, EndLoc);
+ Dir->setTargetCallLoc(TargetCallLoc);
+ return Dir;
+}
+
+OMPDispatchDirective *OMPDispatchDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPDispatchDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true,
+ /*NumChildren=*/0);
+}
+
+OMPMaskedDirective *OMPMaskedDirective::Create(const ASTContext &C,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt) {
+ return createDirective<OMPMaskedDirective>(C, Clauses, AssociatedStmt,
+ /*NumChildren=*/0, StartLoc,
+ EndLoc);
+}
+
+OMPMaskedDirective *OMPMaskedDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ EmptyShell) {
+ return createEmptyDirective<OMPMaskedDirective>(C, NumClauses,
+ /*HasAssociatedStmt=*/true);
+}
diff --git a/clang/lib/AST/StmtPrinter.cpp b/clang/lib/AST/StmtPrinter.cpp
index 55a721194ccf..45b15171aa97 100644
--- a/clang/lib/AST/StmtPrinter.cpp
+++ b/clang/lib/AST/StmtPrinter.cpp
@@ -47,11 +47,11 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <string>
@@ -636,6 +636,10 @@ void StmtPrinter::VisitSEHLeaveStmt(SEHLeaveStmt *Node) {
// OpenMP directives printing methods
//===----------------------------------------------------------------------===//
+void StmtPrinter::VisitOMPCanonicalLoop(OMPCanonicalLoop *Node) {
+ PrintStmt(Node->getLoopStmt());
+}
+
void StmtPrinter::PrintOMPExecutableDirective(OMPExecutableDirective *S,
bool ForceNoStmt) {
OMPClausePrinter Printer(OS, Policy);
@@ -660,6 +664,16 @@ void StmtPrinter::VisitOMPSimdDirective(OMPSimdDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPTileDirective(OMPTileDirective *Node) {
+ Indent() << "#pragma omp tile";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPUnrollDirective(OMPUnrollDirective *Node) {
+ Indent() << "#pragma omp unroll";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPForDirective(OMPForDirective *Node) {
Indent() << "#pragma omp for";
PrintOMPExecutableDirective(Node);
@@ -953,6 +967,21 @@ void StmtPrinter::VisitOMPTargetTeamsDistributeSimdDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPInteropDirective(OMPInteropDirective *Node) {
+ Indent() << "#pragma omp interop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPDispatchDirective(OMPDispatchDirective *Node) {
+ Indent() << "#pragma omp dispatch";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPMaskedDirective(OMPMaskedDirective *Node) {
+ Indent() << "#pragma omp masked";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -979,8 +1008,13 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getNameInfo();
- if (Node->hasExplicitTemplateArgs())
- printTemplateArgumentList(OS, Node->template_arguments(), Policy);
+ if (Node->hasExplicitTemplateArgs()) {
+ const TemplateParameterList *TPL = nullptr;
+ if (!Node->hadMultipleCandidates())
+ if (auto *TD = dyn_cast<TemplateDecl>(Node->getDecl()))
+ TPL = TD->getTemplateParameters();
+ printTemplateArgumentList(OS, Node->template_arguments(), Policy, TPL);
+ }
}
void StmtPrinter::VisitDependentScopeDeclRefExpr(
@@ -1053,70 +1087,19 @@ void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
OS << "]";
}
+void StmtPrinter::VisitSYCLUniqueStableNameExpr(
+ SYCLUniqueStableNameExpr *Node) {
+ OS << "__builtin_sycl_unique_stable_name(";
+ Node->getTypeSourceInfo()->getType().print(OS, Policy);
+ OS << ")";
+}
+
void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
OS << PredefinedExpr::getIdentKindName(Node->getIdentKind());
}
void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
- unsigned value = Node->getValue();
-
- switch (Node->getKind()) {
- case CharacterLiteral::Ascii: break; // no prefix.
- case CharacterLiteral::Wide: OS << 'L'; break;
- case CharacterLiteral::UTF8: OS << "u8"; break;
- case CharacterLiteral::UTF16: OS << 'u'; break;
- case CharacterLiteral::UTF32: OS << 'U'; break;
- }
-
- switch (value) {
- case '\\':
- OS << "'\\\\'";
- break;
- case '\'':
- OS << "'\\''";
- break;
- case '\a':
- // TODO: K&R: the meaning of '\\a' is different in traditional C
- OS << "'\\a'";
- break;
- case '\b':
- OS << "'\\b'";
- break;
- // Nonstandard escape sequence.
- /*case '\e':
- OS << "'\\e'";
- break;*/
- case '\f':
- OS << "'\\f'";
- break;
- case '\n':
- OS << "'\\n'";
- break;
- case '\r':
- OS << "'\\r'";
- break;
- case '\t':
- OS << "'\\t'";
- break;
- case '\v':
- OS << "'\\v'";
- break;
- default:
- // A character literal might be sign-extended, which
- // would result in an invalid \U escape sequence.
- // FIXME: multicharacter literals such as '\xFF\xFF\xFF\xFF'
- // are not correctly handled.
- if ((value & ~0xFFu) == ~0xFFu && Node->getKind() == CharacterLiteral::Ascii)
- value &= 0xFFu;
- if (value < 256 && isPrintable((unsigned char)value))
- OS << "'" << (char)value << "'";
- else if (value < 256)
- OS << "'\\x" << llvm::format("%02x", value) << "'";
- else if (value <= 0xFFFF)
- OS << "'\\u" << llvm::format("%04x", value) << "'";
- else
- OS << "'\\U" << llvm::format("%08x", value) << "'";
- }
+ CharacterLiteral::print(Node->getValue(), Node->getKind(), OS);
}
/// Prints the given expression using the original source text. Returns true on
@@ -1140,7 +1123,7 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
if (Policy.ConstantsAsWritten && printExprAsWritten(OS, Node, Context))
return;
bool isSigned = Node->getType()->isSignedIntegerType();
- OS << Node->getValue().toString(10, isSigned);
+ OS << toString(Node->getValue(), 10, isSigned);
// Emit suffixes. Integer literals are always a builtin integer type.
switch (Node->getType()->castAs<BuiltinType>()->getKind()) {
@@ -1156,6 +1139,10 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
case BuiltinType::ULong: OS << "UL"; break;
case BuiltinType::LongLong: OS << "LL"; break;
case BuiltinType::ULongLong: OS << "ULL"; break;
+ case BuiltinType::Int128:
+ break; // no suffix.
+ case BuiltinType::UInt128:
+ break; // no suffix.
}
}
@@ -1438,8 +1425,16 @@ void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
if (Node->hasTemplateKeyword())
OS << "template ";
OS << Node->getMemberNameInfo();
+ const TemplateParameterList *TPL = nullptr;
+ if (auto *FD = dyn_cast<FunctionDecl>(Node->getMemberDecl())) {
+ if (!Node->hadMultipleCandidates())
+ if (auto *FTD = FD->getPrimaryTemplate())
+ TPL = FTD->getTemplateParameters();
+ } else if (auto *VTSD =
+ dyn_cast<VarTemplateSpecializationDecl>(Node->getMemberDecl()))
+ TPL = VTSD->getSpecializedTemplate()->getTemplateParameters();
if (Node->hasExplicitTemplateArgs())
- printTemplateArgumentList(OS, Node->template_arguments(), Policy);
+ printTemplateArgumentList(OS, Node->template_arguments(), Policy, TPL);
}
void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
@@ -1853,8 +1848,12 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
assert(Args);
if (Args->size() != 1) {
+ const TemplateParameterList *TPL = nullptr;
+ if (!DRE->hadMultipleCandidates())
+ if (const auto *TD = dyn_cast<TemplateDecl>(DRE->getDecl()))
+ TPL = TD->getTemplateParameters();
OS << "operator\"\"" << Node->getUDSuffix()->getName();
- printTemplateArgumentList(OS, Args->asArray(), Policy);
+ printTemplateArgumentList(OS, Args->asArray(), Policy, TPL);
OS << "()";
return;
}
@@ -1869,7 +1868,7 @@ void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
case UserDefinedLiteral::LOK_Integer: {
// Print integer literal without suffix.
const auto *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
- OS << Int->getValue().toString(10, /*isSigned*/false);
+ OS << toString(Int->getValue(), 10, /*isSigned*/false);
break;
}
case UserDefinedLiteral::LOK_Floating: {
@@ -2304,7 +2303,8 @@ void StmtPrinter::VisitConceptSpecializationExpr(ConceptSpecializationExpr *E) {
OS << "template ";
OS << E->getFoundDecl()->getName();
printTemplateArgumentList(OS, E->getTemplateArgsAsWritten()->arguments(),
- Policy);
+ Policy,
+ E->getNamedConcept()->getTemplateParameters());
}
void StmtPrinter::VisitRequiresExpr(RequiresExpr *E) {
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index de9de6ff463c..ed000c2467fa 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -462,6 +462,19 @@ void OMPClauseProfiler::VisitOMPSimdlenClause(const OMPSimdlenClause *C) {
Profiler->VisitStmt(C->getSimdlen());
}
+void OMPClauseProfiler::VisitOMPSizesClause(const OMPSizesClause *C) {
+ for (auto E : C->getSizesRefs())
+ if (E)
+ Profiler->VisitExpr(E);
+}
+
+void OMPClauseProfiler::VisitOMPFullClause(const OMPFullClause *C) {}
+
+void OMPClauseProfiler::VisitOMPPartialClause(const OMPPartialClause *C) {
+ if (const Expr *Factor = C->getFactor())
+ Profiler->VisitExpr(Factor);
+}
+
void OMPClauseProfiler::VisitOMPAllocatorClause(const OMPAllocatorClause *C) {
if (C->getAllocator())
Profiler->VisitStmt(C->getAllocator());
@@ -477,6 +490,18 @@ void OMPClauseProfiler::VisitOMPDetachClause(const OMPDetachClause *C) {
Profiler->VisitStmt(Evt);
}
+void OMPClauseProfiler::VisitOMPNovariantsClause(const OMPNovariantsClause *C) {
+ VistOMPClauseWithPreInit(C);
+ if (C->getCondition())
+ Profiler->VisitStmt(C->getCondition());
+}
+
+void OMPClauseProfiler::VisitOMPNocontextClause(const OMPNocontextClause *C) {
+ VistOMPClauseWithPreInit(C);
+ if (C->getCondition())
+ Profiler->VisitStmt(C->getCondition());
+}
+
void OMPClauseProfiler::VisitOMPDefaultClause(const OMPDefaultClause *C) { }
void OMPClauseProfiler::VisitOMPProcBindClause(const OMPProcBindClause *C) { }
@@ -537,7 +562,25 @@ void OMPClauseProfiler::VisitOMPSIMDClause(const OMPSIMDClause *) {}
void OMPClauseProfiler::VisitOMPNogroupClause(const OMPNogroupClause *) {}
-void OMPClauseProfiler::VisitOMPDestroyClause(const OMPDestroyClause *) {}
+void OMPClauseProfiler::VisitOMPInitClause(const OMPInitClause *C) {
+ VisitOMPClauseList(C);
+}
+
+void OMPClauseProfiler::VisitOMPUseClause(const OMPUseClause *C) {
+ if (C->getInteropVar())
+ Profiler->VisitStmt(C->getInteropVar());
+}
+
+void OMPClauseProfiler::VisitOMPDestroyClause(const OMPDestroyClause *C) {
+ if (C->getInteropVar())
+ Profiler->VisitStmt(C->getInteropVar());
+}
+
+void OMPClauseProfiler::VisitOMPFilterClause(const OMPFilterClause *C) {
+ VistOMPClauseWithPreInit(C);
+ if (C->getThreadID())
+ Profiler->VisitStmt(C->getThreadID());
+}
template<typename T>
void OMPClauseProfiler::VisitOMPClauseList(T *Node) {
@@ -848,10 +891,18 @@ StmtProfiler::VisitOMPExecutableDirective(const OMPExecutableDirective *S) {
P.Visit(*I);
}
-void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
+void StmtProfiler::VisitOMPCanonicalLoop(const OMPCanonicalLoop *L) {
+ VisitStmt(L);
+}
+
+void StmtProfiler::VisitOMPLoopBasedDirective(const OMPLoopBasedDirective *S) {
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPLoopDirective(const OMPLoopDirective *S) {
+ VisitOMPLoopBasedDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelDirective(const OMPParallelDirective *S) {
VisitOMPExecutableDirective(S);
}
@@ -860,6 +911,14 @@ void StmtProfiler::VisitOMPSimdDirective(const OMPSimdDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPTileDirective(const OMPTileDirective *S) {
+ VisitOMPLoopBasedDirective(S);
+}
+
+void StmtProfiler::VisitOMPUnrollDirective(const OMPUnrollDirective *S) {
+ VisitOMPLoopBasedDirective(S);
+}
+
void StmtProfiler::VisitOMPForDirective(const OMPForDirective *S) {
VisitOMPLoopDirective(S);
}
@@ -1110,6 +1169,18 @@ void StmtProfiler::VisitOMPTargetTeamsDistributeSimdDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPInteropDirective(const OMPInteropDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPDispatchDirective(const OMPDispatchDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
+void StmtProfiler::VisitOMPMaskedDirective(const OMPMaskedDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -1130,6 +1201,12 @@ void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
}
}
+void StmtProfiler::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getTypeSourceInfo()->getType());
+}
+
void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
VisitExpr(S);
ID.AddInteger(S->getIdentKind());
diff --git a/clang/lib/AST/TemplateBase.cpp b/clang/lib/AST/TemplateBase.cpp
index baf62bd115a8..f44230d1bd03 100644
--- a/clang/lib/AST/TemplateBase.cpp
+++ b/clang/lib/AST/TemplateBase.cpp
@@ -31,6 +31,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
@@ -50,8 +51,11 @@ using namespace clang;
/// \param Out the raw_ostream instance to use for printing.
///
/// \param Policy the printing policy for EnumConstantDecl printing.
-static void printIntegral(const TemplateArgument &TemplArg,
- raw_ostream &Out, const PrintingPolicy& Policy) {
+///
+/// \param IncludeType If set, ensure that the type of the expression printed
+/// matches the type of the template argument.
+static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
+ const PrintingPolicy &Policy, bool IncludeType) {
const Type *T = TemplArg.getIntegralType().getTypePtr();
const llvm::APSInt &Val = TemplArg.getAsIntegral();
@@ -68,16 +72,86 @@ static void printIntegral(const TemplateArgument &TemplArg,
}
}
- if (T->isBooleanType() && !Policy.MSVCFormatting) {
- Out << (Val.getBoolValue() ? "true" : "false");
+ if (Policy.MSVCFormatting)
+ IncludeType = false;
+
+ if (T->isBooleanType()) {
+ if (!Policy.MSVCFormatting)
+ Out << (Val.getBoolValue() ? "true" : "false");
+ else
+ Out << Val;
} else if (T->isCharType()) {
- const char Ch = Val.getZExtValue();
- Out << ((Ch == '\'') ? "'\\" : "'");
- Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
- Out << "'";
- } else {
+ if (IncludeType) {
+ if (T->isSpecificBuiltinType(BuiltinType::SChar))
+ Out << "(signed char)";
+ else if (T->isSpecificBuiltinType(BuiltinType::UChar))
+ Out << "(unsigned char)";
+ }
+ CharacterLiteral::print(Val.getZExtValue(), CharacterLiteral::Ascii, Out);
+ } else if (T->isAnyCharacterType() && !Policy.MSVCFormatting) {
+ CharacterLiteral::CharacterKind Kind;
+ if (T->isWideCharType())
+ Kind = CharacterLiteral::Wide;
+ else if (T->isChar8Type())
+ Kind = CharacterLiteral::UTF8;
+ else if (T->isChar16Type())
+ Kind = CharacterLiteral::UTF16;
+ else if (T->isChar32Type())
+ Kind = CharacterLiteral::UTF32;
+ else
+ Kind = CharacterLiteral::Ascii;
+ CharacterLiteral::print(Val.getExtValue(), Kind, Out);
+ } else if (IncludeType) {
+ if (const auto *BT = T->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::ULongLong:
+ Out << Val << "ULL";
+ break;
+ case BuiltinType::LongLong:
+ Out << Val << "LL";
+ break;
+ case BuiltinType::ULong:
+ Out << Val << "UL";
+ break;
+ case BuiltinType::Long:
+ Out << Val << "L";
+ break;
+ case BuiltinType::UInt:
+ Out << Val << "U";
+ break;
+ case BuiltinType::Int:
+ Out << Val;
+ break;
+ default:
+ Out << "(" << T->getCanonicalTypeInternal().getAsString(Policy) << ")"
+ << Val;
+ break;
+ }
+ } else
+ Out << "(" << T->getCanonicalTypeInternal().getAsString(Policy) << ")"
+ << Val;
+ } else
Out << Val;
+}
+
+static unsigned getArrayDepth(QualType type) {
+ unsigned count = 0;
+ while (const auto *arrayType = type->getAsArrayTypeUnsafe()) {
+ count++;
+ type = arrayType->getElementType();
}
+ return count;
+}
+
+static bool needsAmpersandOnTemplateArg(QualType paramType, QualType argType) {
+ // Generally, if the parameter type is a pointer, we must be taking the
+ // address of something and need a &. However, if the argument is an array,
+ // this could be implicit via array-to-pointer decay.
+ if (!paramType->isPointerType())
+ return paramType->isMemberPointerType();
+ if (argType->isArrayType())
+ return getArrayDepth(argType) == getArrayDepth(paramType->getPointeeType());
+ return true;
}
//===----------------------------------------------------------------------===//
@@ -340,8 +414,9 @@ TemplateArgument TemplateArgument::getPackExpansionPattern() const {
llvm_unreachable("Invalid TemplateArgument Kind!");
}
-void TemplateArgument::print(const PrintingPolicy &Policy,
- raw_ostream &Out) const {
+void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
+ bool IncludeType) const {
+
switch (getKind()) {
case Null:
Out << "(no value)";
@@ -355,21 +430,24 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
}
case Declaration: {
+ // FIXME: Include the type if it's not obvious from the context.
NamedDecl *ND = getAsDecl();
if (getParamTypeForDecl()->isRecordType()) {
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
- // FIXME: Include the type if it's not obvious from the context.
TPO->printAsInit(Out);
break;
}
}
- if (!getParamTypeForDecl()->isReferenceType())
- Out << '&';
+ if (auto *VD = dyn_cast<ValueDecl>(ND)) {
+ if (needsAmpersandOnTemplateArg(getParamTypeForDecl(), VD->getType()))
+ Out << "&";
+ }
ND->printQualifiedName(Out);
break;
}
case NullPtr:
+ // FIXME: Include the type if it's not obvious from the context.
Out << "nullptr";
break;
@@ -383,7 +461,7 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
break;
case Integral:
- printIntegral(*this, Out, Policy);
+ printIntegral(*this, Out, Policy, IncludeType);
break;
case Expression:
@@ -399,7 +477,7 @@ void TemplateArgument::print(const PrintingPolicy &Policy,
else
Out << ", ";
- P.print(Policy, Out);
+ P.print(Policy, Out, IncludeType);
}
Out << ">";
break;
@@ -410,7 +488,7 @@ void TemplateArgument::dump(raw_ostream &Out) const {
LangOptions LO; // FIXME! see also TemplateName::dump().
LO.CPlusPlus = true;
LO.Bool = true;
- print(PrintingPolicy(LO), Out);
+ print(PrintingPolicy(LO), Out, /*IncludeType*/ true);
}
LLVM_DUMP_METHOD void TemplateArgument::dump() const { dump(llvm::errs()); }
@@ -477,7 +555,7 @@ static const T &DiagTemplateArg(const T &DB, const TemplateArgument &Arg) {
return DB << "nullptr";
case TemplateArgument::Integral:
- return DB << Arg.getAsIntegral().toString(10);
+ return DB << toString(Arg.getAsIntegral(), 10);
case TemplateArgument::Template:
return DB << Arg.getAsTemplate();
@@ -505,7 +583,7 @@ static const T &DiagTemplateArg(const T &DB, const TemplateArgument &Arg) {
LangOptions LangOpts;
LangOpts.CPlusPlus = true;
PrintingPolicy Policy(LangOpts);
- Arg.print(Policy, OS);
+ Arg.print(Policy, OS, /*IncludeType*/ true);
return DB << OS.str();
}
}
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index e3132752546f..33f914f9f886 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -21,6 +21,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TypeTraits.h"
+#include "llvm/ADT/StringExtras.h"
#include <algorithm>
#include <utility>
@@ -144,7 +145,7 @@ void TextNodeDumper::Visit(const Stmt *Node) {
{
ColorScope Color(OS, ShowColors, ValueKindColor);
switch (E->getValueKind()) {
- case VK_RValue:
+ case VK_PRValue:
break;
case VK_LValue:
OS << " lvalue";
@@ -355,6 +356,46 @@ void TextNodeDumper::Visit(const GenericSelectionExpr::ConstAssociation &A) {
OS << " selected";
}
+void TextNodeDumper::Visit(const concepts::Requirement *R) {
+ if (!R) {
+ ColorScope Color(OS, ShowColors, NullColor);
+ OS << "<<<NULL>>> Requirement";
+ return;
+ }
+
+ {
+ ColorScope Color(OS, ShowColors, StmtColor);
+ switch (R->getKind()) {
+ case concepts::Requirement::RK_Type:
+ OS << "TypeRequirement";
+ break;
+ case concepts::Requirement::RK_Simple:
+ OS << "SimpleRequirement";
+ break;
+ case concepts::Requirement::RK_Compound:
+ OS << "CompoundRequirement";
+ break;
+ case concepts::Requirement::RK_Nested:
+ OS << "NestedRequirement";
+ break;
+ }
+ }
+
+ dumpPointer(R);
+
+ if (auto *ER = dyn_cast<concepts::ExprRequirement>(R)) {
+ if (ER->hasNoexceptRequirement())
+ OS << " noexcept";
+ }
+
+ if (R->isDependent())
+ OS << " dependent";
+ else
+ OS << (R->isSatisfied() ? " satisfied" : " unsatisfied");
+ if (R->containsUnexpandedParameterPack())
+ OS << " contains_unexpanded_pack";
+}
+
static double GetApproxValue(const llvm::APFloat &F) {
llvm::APFloat V = F;
bool ignored;
@@ -923,6 +964,8 @@ void TextNodeDumper::VisitWhileStmt(const WhileStmt *Node) {
void TextNodeDumper::VisitLabelStmt(const LabelStmt *Node) {
OS << " '" << Node->getName() << "'";
+ if (Node->isSideEntry())
+ OS << " side_entry";
}
void TextNodeDumper::VisitGotoStmt(const GotoStmt *Node) {
@@ -1016,6 +1059,11 @@ void TextNodeDumper::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *Node) {
OS << " isFreeIvar";
}
+void TextNodeDumper::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *Node) {
+ dumpType(Node->getTypeSourceInfo()->getType());
+}
+
void TextNodeDumper::VisitPredefinedExpr(const PredefinedExpr *Node) {
OS << " " << PredefinedExpr::getIdentKindName(Node->getIdentKind());
}
@@ -1028,7 +1076,7 @@ void TextNodeDumper::VisitCharacterLiteral(const CharacterLiteral *Node) {
void TextNodeDumper::VisitIntegerLiteral(const IntegerLiteral *Node) {
bool isSigned = Node->getType()->isSignedIntegerType();
ColorScope Color(OS, ShowColors, ValueColor);
- OS << " " << Node->getValue().toString(10, isSigned);
+ OS << " " << toString(Node->getValue(), 10, isSigned);
}
void TextNodeDumper::VisitFixedPointLiteral(const FixedPointLiteral *Node) {
@@ -1358,6 +1406,12 @@ void TextNodeDumper::VisitConceptSpecializationExpr(
dumpBareDeclRef(Node->getFoundDecl());
}
+void TextNodeDumper::VisitRequiresExpr(
+ const RequiresExpr *Node) {
+ if (!Node->isValueDependent())
+ OS << (Node->isSatisfied() ? " satisfied" : " unsatisfied");
+}
+
void TextNodeDumper::VisitRValueReferenceType(const ReferenceType *T) {
if (T->isSpelledAsLValue())
OS << " written as lvalue reference";
@@ -2051,6 +2105,11 @@ void TextNodeDumper::VisitUsingDecl(const UsingDecl *D) {
OS << D->getDeclName();
}
+void TextNodeDumper::VisitUsingEnumDecl(const UsingEnumDecl *D) {
+ OS << ' ';
+ dumpBareDeclRef(D->getEnumDecl());
+}
+
void TextNodeDumper::VisitUnresolvedUsingTypenameDecl(
const UnresolvedUsingTypenameDecl *D) {
OS << ' ';
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index 034e175f1352..4a2fc5219ef0 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -1801,6 +1801,9 @@ namespace {
}
// Only these types can contain the desired 'auto' type.
+ Type *VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T) {
+ return Visit(T->getReplacementType());
+ }
Type *VisitElaboratedType(const ElaboratedType *T) {
return Visit(T->getNamedType());
@@ -2086,8 +2089,9 @@ bool Type::isUnsignedIntegerOrEnumerationType() const {
bool Type::hasUnsignedIntegerRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
- else
- return isUnsignedIntegerOrEnumerationType();
+ if (const auto *VT = dyn_cast<MatrixType>(CanonicalType))
+ return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
+ return isUnsignedIntegerOrEnumerationType();
}
bool Type::isFloatingType() const {
@@ -2228,10 +2232,11 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
return !Rec->isCompleteDefinition();
}
case ConstantArray:
+ case VariableArray:
// An array is incomplete if its element type is incomplete
// (C++ [dcl.array]p1).
- // We don't handle variable arrays (they're not allowed in C++) or
- // dependent-sized arrays (dependent types are never treated as incomplete).
+ // We don't handle dependent-sized arrays (dependent types are never treated
+ // as incomplete).
return cast<ArrayType>(CanonicalType)->getElementType()
->isIncompleteType(Def);
case IncompleteArray:
@@ -2279,6 +2284,8 @@ bool Type::isSizelessBuiltinType() const {
// SVE Types
#define SVE_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
#include "clang/Basic/AArch64SVEACLETypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
return true;
default:
return false;
@@ -3088,6 +3095,10 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
case Id: \
return #Name;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case Id: \
+ return Name;
+#include "clang/Basic/RISCVVTypes.def"
}
llvm_unreachable("Invalid builtin type.");
@@ -3134,6 +3145,7 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_SpirFunction: return "spir_function";
case CC_OpenCLKernel: return "opencl_kernel";
case CC_Swift: return "swiftcall";
+ case CC_SwiftAsync: return "swiftasynccall";
case CC_PreserveMost: return "preserve_most";
case CC_PreserveAll: return "preserve_all";
}
@@ -3550,6 +3562,7 @@ bool AttributedType::isCallingConv() const {
case attr::ThisCall:
case attr::RegCall:
case attr::SwiftCall:
+ case attr::SwiftAsyncCall:
case attr::VectorCall:
case attr::AArch64VectorPcs:
case attr::Pascal:
@@ -4110,6 +4123,8 @@ bool Type::canHaveNullability(bool ResultIfUnknown) const {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
case BuiltinType::IncompleteMatrixIdx:
@@ -4387,8 +4402,8 @@ AutoType::AutoType(QualType DeducedAsType, AutoTypeKeyword Keyword,
if (TypeConstraintConcept) {
TemplateArgument *ArgBuffer = getArgBuffer();
for (const TemplateArgument &Arg : TypeConstraintArgs) {
- addDependence(toTypeDependence(
- Arg.getDependence() & TemplateArgumentDependence::UnexpandedPack));
+ addDependence(
+ toSyntacticDependence(toTypeDependence(Arg.getDependence())));
new (ArgBuffer++) TemplateArgument(Arg);
}
diff --git a/clang/lib/AST/TypeLoc.cpp b/clang/lib/AST/TypeLoc.cpp
index 222b1abac510..16d953b4bece 100644
--- a/clang/lib/AST/TypeLoc.cpp
+++ b/clang/lib/AST/TypeLoc.cpp
@@ -406,6 +406,8 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::BuiltinFn:
case BuiltinType::IncompleteMatrixIdx:
case BuiltinType::OMPArraySection:
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 25d7874b53fb..5de22f76f458 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -846,6 +846,8 @@ StringRef clang::getParameterABISpelling(ParameterABI ABI) {
llvm_unreachable("asking for spelling of ordinary parameter ABI");
case ParameterABI::SwiftContext:
return "swift_context";
+ case ParameterABI::SwiftAsyncContext:
+ return "swift_async_context";
case ParameterABI::SwiftErrorResult:
return "swift_error_result";
case ParameterABI::SwiftIndirectResult:
@@ -974,6 +976,9 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_Swift:
OS << " __attribute__((swiftcall))";
break;
+ case CC_SwiftAsync:
+ OS << "__attribute__((swiftasynccall))";
+ break;
case CC_PreserveMost:
OS << " __attribute__((preserve_most))";
break;
@@ -1125,7 +1130,9 @@ void TypePrinter::printAutoBefore(const AutoType *T, raw_ostream &OS) {
printBefore(T->getDeducedType(), OS);
} else {
if (T->isConstrained()) {
- OS << T->getTypeConstraintConcept()->getName();
+ // FIXME: Track a TypeConstraint as type sugar, so that we can print the
+ // type as it was written.
+ T->getTypeConstraintConcept()->getDeclName().print(OS, Policy);
auto Args = T->getTypeConstraintArguments();
if (!Args.empty())
printTemplateArgumentList(
@@ -1235,8 +1242,7 @@ void TypePrinter::AppendScope(DeclContext *DC, raw_ostream &OS,
// Only suppress an inline namespace if the name has the same lookup
// results in the enclosing namespace.
if (Policy.SuppressInlineNamespace && NS->isInline() && NameInScope &&
- DC->getParent()->lookup(NameInScope).size() ==
- DC->lookup(NameInScope).size())
+ NS->isRedundantInlineQualifierFor(NameInScope))
return AppendScope(DC->getParent(), OS, NameInScope);
AppendScope(DC->getParent(), OS, NS->getDeclName());
@@ -1304,8 +1310,10 @@ void TypePrinter::printTag(TagDecl *D, raw_ostream &OS) {
if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
OS << "lambda";
HasKindDecoration = true;
- } else {
+ } else if ((isa<RecordDecl>(D) && cast<RecordDecl>(D)->isAnonymousStructOrUnion())) {
OS << "anonymous";
+ } else {
+ OS << "unnamed";
}
if (Policy.AnonymousTagLocations) {
@@ -1444,15 +1452,14 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
T->getTemplateName().print(OS, Policy);
}
- const TemplateParameterList *TPL = TD ? TD->getTemplateParameters() : nullptr;
- printTemplateArgumentList(OS, T->template_arguments(), Policy, TPL);
+ printTemplateArgumentList(OS, T->template_arguments(), Policy);
spaceBeforePlaceHolder(OS);
}
void TypePrinter::printTemplateSpecializationBefore(
const TemplateSpecializationType *T,
raw_ostream &OS) {
- printTemplateId(T, OS, false);
+ printTemplateId(T, OS, Policy.FullyQualifiedName);
}
void TypePrinter::printTemplateSpecializationAfter(
@@ -1702,6 +1709,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::StdCall: OS << "stdcall"; break;
case attr::ThisCall: OS << "thiscall"; break;
case attr::SwiftCall: OS << "swiftcall"; break;
+ case attr::SwiftAsyncCall: OS << "swiftasynccall"; break;
case attr::VectorCall: OS << "vectorcall"; break;
case attr::Pascal: OS << "pascal"; break;
case attr::MSABI: OS << "ms_abi"; break;
@@ -1842,16 +1850,17 @@ static const TemplateArgument &getArgument(const TemplateArgumentLoc &A) {
}
static void printArgument(const TemplateArgument &A, const PrintingPolicy &PP,
- llvm::raw_ostream &OS) {
- A.print(PP, OS);
+ llvm::raw_ostream &OS, bool IncludeType) {
+ A.print(PP, OS, IncludeType);
}
static void printArgument(const TemplateArgumentLoc &A,
- const PrintingPolicy &PP, llvm::raw_ostream &OS) {
+ const PrintingPolicy &PP, llvm::raw_ostream &OS,
+ bool IncludeType) {
const TemplateArgument::ArgKind &Kind = A.getArgument().getKind();
if (Kind == TemplateArgument::ArgKind::Type)
return A.getTypeSourceInfo()->getType().print(OS, PP);
- return A.getArgument().print(PP, OS);
+ return A.getArgument().print(PP, OS, IncludeType);
}
static bool isSubstitutedTemplateArgument(ASTContext &Ctx, TemplateArgument Arg,
@@ -1988,13 +1997,14 @@ static bool isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
return false;
}
-template<typename TA>
+template <typename TA>
static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
const PrintingPolicy &Policy, bool SkipBrackets,
- const TemplateParameterList *TPL) {
+ const TemplateParameterList *TPL, bool IsPack,
+ unsigned ParmIndex) {
// Drop trailing template arguments that match default arguments.
if (TPL && Policy.SuppressDefaultTemplateArgs &&
- !Policy.PrintCanonicalTypes && !Args.empty() &&
+ !Policy.PrintCanonicalTypes && !Args.empty() && !IsPack &&
Args.size() <= TPL->size()) {
ASTContext &Ctx = TPL->getParam(0)->getASTContext();
llvm::SmallVector<TemplateArgument, 8> OrigArgs;
@@ -2021,12 +2031,15 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
if (Argument.getKind() == TemplateArgument::Pack) {
if (Argument.pack_size() && !FirstArg)
OS << Comma;
- printTo(ArgOS, Argument.getPackAsArray(), Policy, true, nullptr);
+ printTo(ArgOS, Argument.getPackAsArray(), Policy, true, TPL,
+ /*IsPack*/ true, ParmIndex);
} else {
if (!FirstArg)
OS << Comma;
// Tries to print the argument with location info if exists.
- printArgument(Arg, Policy, ArgOS);
+ printArgument(
+ Arg, Policy, ArgOS,
+ TemplateParameterList::shouldIncludeTypeForArgument(TPL, ParmIndex));
}
StringRef ArgString = ArgOS.str();
@@ -2043,6 +2056,10 @@ static void printTo(raw_ostream &OS, ArrayRef<TA> Args,
NeedSpace = Policy.SplitTemplateClosers && !ArgString.empty() &&
ArgString.back() == '>';
FirstArg = false;
+
+ // Use same template parameter for all elements of Pack
+ if (!IsPack)
+ ParmIndex++;
}
if (NeedSpace)
@@ -2063,14 +2080,14 @@ void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgument> Args,
const PrintingPolicy &Policy,
const TemplateParameterList *TPL) {
- printTo(OS, Args, Policy, false, TPL);
+ printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0);
}
void clang::printTemplateArgumentList(raw_ostream &OS,
ArrayRef<TemplateArgumentLoc> Args,
const PrintingPolicy &Policy,
const TemplateParameterList *TPL) {
- printTo(OS, Args, Policy, false, TPL);
+ printTo(OS, Args, Policy, false, TPL, /*isPack*/ false, /*parmIndex*/ 0);
}
std::string Qualifiers::getAsString() const {
@@ -2110,18 +2127,23 @@ std::string Qualifiers::getAddrSpaceAsString(LangAS AS) {
case LangAS::Default:
return "";
case LangAS::opencl_global:
+ case LangAS::sycl_global:
return "__global";
case LangAS::opencl_local:
+ case LangAS::sycl_local:
return "__local";
case LangAS::opencl_private:
+ case LangAS::sycl_private:
return "__private";
case LangAS::opencl_constant:
return "__constant";
case LangAS::opencl_generic:
return "__generic";
case LangAS::opencl_global_device:
+ case LangAS::sycl_global_device:
return "__global_device";
case LangAS::opencl_global_host:
+ case LangAS::sycl_global_host:
return "__global_host";
case LangAS::cuda_device:
return "__device__";
diff --git a/clang/lib/AST/VTableBuilder.cpp b/clang/lib/AST/VTableBuilder.cpp
index f5865ce96b64..38d6fc28e098 100644
--- a/clang/lib/AST/VTableBuilder.cpp
+++ b/clang/lib/AST/VTableBuilder.cpp
@@ -487,7 +487,7 @@ static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
const CXXMethodDecl *RHS) {
assert(VTableContextBase::hasVtableSlot(LHS) && "LHS must be virtual!");
- assert(VTableContextBase::hasVtableSlot(RHS) && "LHS must be virtual!");
+ assert(VTableContextBase::hasVtableSlot(RHS) && "RHS must be virtual!");
// A destructor can share a vcall offset with another destructor.
if (isa<CXXDestructorDecl>(LHS))
diff --git a/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index 8ddd3c87e09d..5d6cea54b8ec 100644
--- a/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -243,10 +243,14 @@ public:
return true;
ScopedIncrement ScopedDepth(&CurrentDepth);
if (auto *Init = Node->getInit())
- if (!match(*Init))
+ if (!traverse(*Init))
return false;
- if (!match(*Node->getLoopVariable()) || !match(*Node->getRangeInit()) ||
- !match(*Node->getBody()))
+ if (!match(*Node->getLoopVariable()))
+ return false;
+ if (match(*Node->getRangeInit()))
+ if (!VisitorBase::TraverseStmt(Node->getRangeInit()))
+ return false;
+ if (!match(*Node->getBody()))
return false;
return VisitorBase::TraverseStmt(Node->getBody());
}
@@ -291,7 +295,7 @@ public:
if (!match(*Node->getBody()))
return false;
- return true;
+ return VisitorBase::TraverseStmt(Node->getBody());
}
bool shouldVisitTemplateInstantiations() const { return true; }
@@ -488,15 +492,21 @@ public:
bool dataTraverseNode(Stmt *S, DataRecursionQueue *Queue) {
if (auto *RF = dyn_cast<CXXForRangeStmt>(S)) {
- for (auto *SubStmt : RF->children()) {
- if (SubStmt == RF->getInit() || SubStmt == RF->getLoopVarStmt() ||
- SubStmt == RF->getRangeInit() || SubStmt == RF->getBody()) {
- TraverseStmt(SubStmt, Queue);
- } else {
- ASTNodeNotSpelledInSourceScope RAII(this, true);
- TraverseStmt(SubStmt, Queue);
+ {
+ ASTNodeNotAsIsSourceScope RAII(this, true);
+ TraverseStmt(RF->getInit());
+ // Don't traverse under the loop variable
+ match(*RF->getLoopVariable());
+ TraverseStmt(RF->getRangeInit());
+ }
+ {
+ ASTNodeNotSpelledInSourceScope RAII(this, true);
+ for (auto *SubStmt : RF->children()) {
+ if (SubStmt != RF->getBody())
+ TraverseStmt(SubStmt);
}
}
+ TraverseStmt(RF->getBody());
return true;
} else if (auto *RBO = dyn_cast<CXXRewrittenBinaryOperator>(S)) {
{
@@ -556,9 +566,9 @@ public:
if (LE->hasExplicitResultType())
TraverseTypeLoc(Proto.getReturnLoc());
TraverseStmt(LE->getTrailingRequiresClause());
-
- TraverseStmt(LE->getBody());
}
+
+ TraverseStmt(LE->getBody());
return true;
}
return RecursiveASTVisitor<MatchASTVisitor>::dataTraverseNode(S, Queue);
@@ -697,6 +707,10 @@ public:
bool shouldVisitTemplateInstantiations() const { return true; }
bool shouldVisitImplicitCode() const { return true; }
+ // We visit the lambda body explicitly, so instruct the RAV
+ // to not visit it on our behalf too.
+ bool shouldVisitLambdaBody() const { return false; }
+
bool IsMatchingInASTNodeNotSpelledInSource() const override {
return TraversingASTNodeNotSpelledInSource;
}
@@ -823,6 +837,14 @@ private:
if (EnableCheckProfiling)
Timer.setBucket(&TimeByBucket[MP.second->getID()]);
BoundNodesTreeBuilder Builder;
+
+ {
+ TraversalKindScope RAII(getASTContext(), MP.first.getTraversalKind());
+ if (getASTContext().getParentMapContext().traverseIgnored(DynNode) !=
+ DynNode)
+ continue;
+ }
+
if (MP.first.matches(DynNode, this, &Builder)) {
MatchVisitor Visitor(ActiveASTContext, MP.second);
Builder.visitMatches(&Visitor);
@@ -1014,6 +1036,7 @@ private:
Callback(Callback) {}
void visitMatch(const BoundNodes& BoundNodesView) override {
+ TraversalKindScope RAII(*Context, Callback->getCheckTraversalKind());
Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
}
@@ -1194,6 +1217,8 @@ bool MatchASTVisitor::TraverseDecl(Decl *DeclNode) {
ScopedChildren = true;
if (FD->isTemplateInstantiation())
ScopedTraversal = true;
+ } else if (isa<BindingDecl>(DeclNode)) {
+ ScopedChildren = true;
}
ASTNodeNotSpelledInSourceScope RAII1(this, ScopedTraversal);
@@ -1311,7 +1336,13 @@ MatchFinder::~MatchFinder() {}
void MatchFinder::addMatcher(const DeclarationMatcher &NodeMatch,
MatchCallback *Action) {
- Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
+ llvm::Optional<TraversalKind> TK;
+ if (Action)
+ TK = Action->getCheckTraversalKind();
+ if (TK)
+ Matchers.DeclOrStmt.emplace_back(traverse(*TK, NodeMatch), Action);
+ else
+ Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
Matchers.AllCallbacks.insert(Action);
}
@@ -1323,7 +1354,13 @@ void MatchFinder::addMatcher(const TypeMatcher &NodeMatch,
void MatchFinder::addMatcher(const StatementMatcher &NodeMatch,
MatchCallback *Action) {
- Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
+ llvm::Optional<TraversalKind> TK;
+ if (Action)
+ TK = Action->getCheckTraversalKind();
+ if (TK)
+ Matchers.DeclOrStmt.emplace_back(traverse(*TK, NodeMatch), Action);
+ else
+ Matchers.DeclOrStmt.emplace_back(NodeMatch, Action);
Matchers.AllCallbacks.insert(Action);
}
@@ -1412,5 +1449,10 @@ void MatchFinder::registerTestCallbackAfterParsing(
StringRef MatchFinder::MatchCallback::getID() const { return "<unknown>"; }
+llvm::Optional<TraversalKind>
+MatchFinder::MatchCallback::getCheckTraversalKind() const {
+ return llvm::None;
+}
+
} // end namespace ast_matchers
} // end namespace clang
diff --git a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 6c7e14e3499a..169ce3b83980 100644
--- a/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -732,7 +732,8 @@ const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl>
typeAliasTemplateDecl;
const internal::VariadicAllOfMatcher<Decl> decl;
-const internal::VariadicAllOfMatcher<DecompositionDecl> decompositionDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, BindingDecl> bindingDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl>
linkageSpecDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl;
@@ -755,6 +756,7 @@ const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl>
const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl>
accessSpecDecl;
+const internal::VariadicAllOfMatcher<CXXBaseSpecifier> cxxBaseSpecifier;
const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer;
const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument;
const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc;
@@ -839,6 +841,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr>
substNonTypeTemplateParmExpr;
const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl;
+const internal::VariadicDynCastAllOfMatcher<Decl, UsingEnumDecl> usingEnumDecl;
const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl>
usingDirectiveDecl;
const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr>
@@ -882,6 +885,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CoreturnStmt> coreturnStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt;
@@ -914,6 +918,12 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr>
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr>
cxxNullPtrLiteralExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CoawaitExpr>
+ coawaitExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, DependentCoawaitExpr>
+ dependentCoawaitExpr;
+const internal::VariadicDynCastAllOfMatcher<Stmt, CoyieldExpr>
+ coyieldExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr;
const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr>
genericSelectionExpr;
@@ -924,6 +934,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator>
const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr,
CXXRewrittenBinaryOperator>
binaryOperation;
+const internal::MapAnyOfMatcher<CallExpr, CXXConstructExpr> invocation;
const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator;
const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator>
conditionalOperator;
diff --git a/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp b/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
index 88c2279afb2e..ba2f49e6b623 100644
--- a/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Diagnostics.cpp
@@ -100,6 +100,10 @@ static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) {
return "Value not found: $0";
case Diagnostics::ET_RegistryUnknownEnumWithReplace:
return "Unknown value '$1' for arg $0; did you mean '$2'";
+ case Diagnostics::ET_RegistryNonNodeMatcher:
+ return "Matcher not a node matcher: $0";
+ case Diagnostics::ET_RegistryMatcherNoWithSupport:
+ return "Matcher does not support with call.";
case Diagnostics::ET_ParserStringError:
return "Error parsing string token: <$0>";
@@ -123,6 +127,10 @@ static StringRef errorTypeToFormatString(Diagnostics::ErrorType Type) {
return "Error parsing numeric literal: <$0>";
case Diagnostics::ET_ParserOverloadedType:
return "Input value has unresolved overloaded type: $0";
+ case Diagnostics::ET_ParserMalformedChainedExpr:
+ return "Period not followed by valid chained call.";
+ case Diagnostics::ET_ParserFailedToBuildMatcher:
+ return "Failed to build matcher: $0.";
case Diagnostics::ET_None:
return "<N/A>";
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
index f6fdbe868e2d..40db70e6f4a5 100644
--- a/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -20,7 +20,7 @@ getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
++MaxEditDistance;
llvm::StringRef Res;
for (const llvm::StringRef &Item : Allowed) {
- if (Item.equals_lower(Search)) {
+ if (Item.equals_insensitive(Search)) {
assert(!Item.equals(Search) && "This should be handled earlier on.");
MaxEditDistance = 1;
Res = Item;
@@ -40,7 +40,7 @@ getBestGuess(llvm::StringRef Search, llvm::ArrayRef<llvm::StringRef> Allowed,
auto NoPrefix = Item;
if (!NoPrefix.consume_front(DropPrefix))
continue;
- if (NoPrefix.equals_lower(Search)) {
+ if (NoPrefix.equals_insensitive(Search)) {
if (NoPrefix.equals(Search))
return Item.str();
MaxEditDistance = 1;
diff --git a/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 690b52162e2b..783fb203c408 100644
--- a/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -93,7 +93,7 @@ template <class T> struct ArgTypeTraits<ast_matchers::internal::Matcher<T>> {
}
static ArgKind getKind() {
- return ArgKind(ASTNodeKind::getFromNodeKind<T>());
+ return ArgKind::MakeMatcherArg(ASTNodeKind::getFromNodeKind<T>());
}
static llvm::Optional<std::string> getBestGuess(const VariantValue &) {
@@ -309,6 +309,16 @@ public:
ArrayRef<ParserValue> Args,
Diagnostics *Error) const = 0;
+ virtual ASTNodeKind nodeMatcherType() const { return ASTNodeKind(); }
+
+ virtual bool isBuilderMatcher() const { return false; }
+
+ virtual std::unique_ptr<MatcherDescriptor>
+ buildMatcherCtor(SourceRange NameRange, ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const {
+ return {};
+ }
+
/// Returns whether the matcher is variadic. Variadic matchers can take any
/// number of arguments, but they must be of the same type.
virtual bool isVariadic() const = 0;
@@ -343,7 +353,8 @@ inline bool isRetKindConvertibleTo(ArrayRef<ASTNodeKind> RetKinds,
ASTNodeKind Kind, unsigned *Specificity,
ASTNodeKind *LeastDerivedKind) {
for (const ASTNodeKind &NodeKind : RetKinds) {
- if (ArgKind(NodeKind).isConvertibleTo(Kind, Specificity)) {
+ if (ArgKind::MakeMatcherArg(NodeKind).isConvertibleTo(
+ ArgKind::MakeMatcherArg(Kind), Specificity)) {
if (LeastDerivedKind)
*LeastDerivedKind = NodeKind;
return true;
@@ -481,9 +492,11 @@ template <typename ResultT, typename ArgT,
VariantMatcher
variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
ArrayRef<ParserValue> Args, Diagnostics *Error) {
- ArgT **InnerArgs = new ArgT *[Args.size()]();
+ SmallVector<ArgT *, 8> InnerArgsPtr;
+ InnerArgsPtr.resize_for_overwrite(Args.size());
+ SmallVector<ArgT, 8> InnerArgs;
+ InnerArgs.reserve(Args.size());
- bool HasError = false;
for (size_t i = 0, e = Args.size(); i != e; ++i) {
using ArgTraits = ArgTypeTraits<ArgT>;
@@ -492,8 +505,7 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
if (!ArgTraits::hasCorrectType(Value)) {
Error->addError(Arg.Range, Error->ET_RegistryWrongArgType)
<< (i + 1) << ArgTraits::getKind().asString() << Value.getTypeAsString();
- HasError = true;
- break;
+ return {};
}
if (!ArgTraits::hasCorrectValue(Value)) {
if (llvm::Optional<std::string> BestGuess =
@@ -510,24 +522,12 @@ variadicMatcherDescriptor(StringRef MatcherName, SourceRange NameRange,
<< (i + 1) << ArgTraits::getKind().asString()
<< Value.getTypeAsString();
}
- HasError = true;
- break;
+ return {};
}
-
- InnerArgs[i] = new ArgT(ArgTraits::get(Value));
+ InnerArgs.set_size(i + 1);
+ InnerArgsPtr[i] = new (&InnerArgs[i]) ArgT(ArgTraits::get(Value));
}
-
- VariantMatcher Out;
- if (!HasError) {
- Out = outvalueToVariantMatcher(Func(llvm::makeArrayRef(InnerArgs,
- Args.size())));
- }
-
- for (size_t i = 0, e = Args.size(); i != e; ++i) {
- delete InnerArgs[i];
- }
- delete[] InnerArgs;
- return Out;
+ return outvalueToVariantMatcher(Func(InnerArgsPtr));
}
/// Matcher descriptor for variadic functions.
@@ -575,6 +575,8 @@ public:
LeastDerivedKind);
}
+ ASTNodeKind nodeMatcherType() const override { return RetKinds[0]; }
+
private:
const RunFunc Func;
const std::string MatcherName;
@@ -610,6 +612,8 @@ public:
}
}
+ ASTNodeKind nodeMatcherType() const override { return DerivedKind; }
+
private:
const ASTNodeKind DerivedKind;
};
@@ -904,7 +908,7 @@ public:
void getArgKinds(ASTNodeKind ThisKind, unsigned ArgNo,
std::vector<ArgKind> &Kinds) const override {
- Kinds.push_back(ThisKind);
+ Kinds.push_back(ArgKind::MakeMatcherArg(ThisKind));
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
@@ -976,7 +980,7 @@ public:
void getArgKinds(ASTNodeKind ThisKind, unsigned,
std::vector<ArgKind> &Kinds) const override {
- Kinds.push_back(ThisKind);
+ Kinds.push_back(ArgKind::MakeMatcherArg(ThisKind));
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity,
@@ -989,6 +993,62 @@ public:
}
};
+class MapAnyOfBuilderDescriptor : public MatcherDescriptor {
+public:
+ VariantMatcher create(SourceRange, ArrayRef<ParserValue>,
+ Diagnostics *) const override {
+ return {};
+ }
+
+ bool isBuilderMatcher() const override { return true; }
+
+ std::unique_ptr<MatcherDescriptor>
+ buildMatcherCtor(SourceRange, ArrayRef<ParserValue> Args,
+ Diagnostics *) const override {
+
+ std::vector<ASTNodeKind> NodeKinds;
+ for (auto Arg : Args) {
+ if (!Arg.Value.isNodeKind())
+ return {};
+ NodeKinds.push_back(Arg.Value.getNodeKind());
+ }
+
+ if (NodeKinds.empty())
+ return {};
+
+ ASTNodeKind CladeNodeKind = NodeKinds.front().getCladeKind();
+
+ for (auto NK : NodeKinds)
+ {
+ if (!NK.getCladeKind().isSame(CladeNodeKind))
+ return {};
+ }
+
+ return std::make_unique<MapAnyOfMatcherDescriptor>(CladeNodeKind,
+ NodeKinds);
+ }
+
+ bool isVariadic() const override { return true; }
+
+ unsigned getNumArgs() const override { return 0; }
+
+ void getArgKinds(ASTNodeKind ThisKind, unsigned,
+ std::vector<ArgKind> &ArgKinds) const override {
+ ArgKinds.push_back(ArgKind::MakeNodeArg(ThisKind));
+ return;
+ }
+ bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity = nullptr,
+ ASTNodeKind *LeastDerivedKind = nullptr) const override {
+ if (Specificity)
+ *Specificity = 1;
+ if (LeastDerivedKind)
+ *LeastDerivedKind = Kind;
+ return true;
+ }
+
+ bool isPolymorphic() const override { return false; }
+};
+
/// Helper functions to select the appropriate marshaller functions.
/// They detect the number of arguments, arguments types and return type.
diff --git a/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/clang/lib/ASTMatchers/Dynamic/Parser.cpp
index a0037549ca61..c6a77bb6c2e0 100644
--- a/clang/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -52,6 +52,7 @@ struct Parser::TokenInfo {
/// Some known identifiers.
static const char* const ID_Bind;
+ static const char *const ID_With;
TokenInfo() = default;
@@ -62,6 +63,7 @@ struct Parser::TokenInfo {
};
const char* const Parser::TokenInfo::ID_Bind = "bind";
+const char *const Parser::TokenInfo::ID_With = "with";
/// Simple tokenizer for the parser.
class Parser::CodeTokenizer {
@@ -366,6 +368,29 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
}
std::string BindID;
+ Tokenizer->consumeNextToken();
+ TokenInfo ChainCallToken = Tokenizer->consumeNextToken();
+ if (ChainCallToken.Kind == TokenInfo::TK_CodeCompletion) {
+ addCompletion(ChainCallToken, MatcherCompletion("bind(\"", "bind", 1));
+ return false;
+ }
+
+ if (ChainCallToken.Kind != TokenInfo::TK_Ident ||
+ (ChainCallToken.Text != TokenInfo::ID_Bind &&
+ ChainCallToken.Text != TokenInfo::ID_With)) {
+ Error->addError(ChainCallToken.Range,
+ Error->ET_ParserMalformedChainedExpr);
+ return false;
+ }
+ if (ChainCallToken.Text == TokenInfo::ID_With) {
+
+ Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
+ NameToken.Text, NameToken.Range);
+
+ Error->addError(ChainCallToken.Range,
+ Error->ET_RegistryMatcherNoWithSupport);
+ return false;
+ }
if (!parseBindID(BindID))
return false;
@@ -405,31 +430,28 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
Tokenizer->SkipNewlines();
+ assert(NameToken.Kind == TokenInfo::TK_Ident);
+ TokenInfo OpenToken = Tokenizer->consumeNextToken();
+ if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
+ Error->addError(OpenToken.Range, Error->ET_ParserNoOpenParen)
+ << OpenToken.Text;
+ return false;
+ }
+
+ llvm::Optional<MatcherCtor> Ctor = S->lookupMatcherCtor(NameToken.Text);
+
// Parse as a matcher expression.
- return parseMatcherExpressionImpl(NameToken, Value);
+ return parseMatcherExpressionImpl(NameToken, OpenToken, Ctor, Value);
}
bool Parser::parseBindID(std::string &BindID) {
- // Parse .bind("foo")
- assert(Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period);
- Tokenizer->consumeNextToken(); // consume the period.
- const TokenInfo BindToken = Tokenizer->consumeNextToken();
- if (BindToken.Kind == TokenInfo::TK_CodeCompletion) {
- addCompletion(BindToken, MatcherCompletion("bind(\"", "bind", 1));
- return false;
- }
-
+ // Parse the parenthesized argument to .bind("foo")
const TokenInfo OpenToken = Tokenizer->consumeNextToken();
const TokenInfo IDToken = Tokenizer->consumeNextTokenIgnoreNewlines();
const TokenInfo CloseToken = Tokenizer->consumeNextTokenIgnoreNewlines();
// TODO: We could use different error codes for each/some to be more
// explicit about the syntax error.
- if (BindToken.Kind != TokenInfo::TK_Ident ||
- BindToken.Text != TokenInfo::ID_Bind) {
- Error->addError(BindToken.Range, Error->ET_ParserMalformedBindExpr);
- return false;
- }
if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
Error->addError(OpenToken.Range, Error->ET_ParserMalformedBindExpr);
return false;
@@ -446,28 +468,177 @@ bool Parser::parseBindID(std::string &BindID) {
return true;
}
+bool Parser::parseMatcherBuilder(MatcherCtor Ctor, const TokenInfo &NameToken,
+ const TokenInfo &OpenToken,
+ VariantValue *Value) {
+ std::vector<ParserValue> Args;
+ TokenInfo EndToken;
+
+ Tokenizer->SkipNewlines();
+
+ {
+ ScopedContextEntry SCE(this, Ctor);
+
+ while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
+ if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
+ // End of args.
+ EndToken = Tokenizer->consumeNextToken();
+ break;
+ }
+ if (!Args.empty()) {
+ // We must find a , token to continue.
+ TokenInfo CommaToken = Tokenizer->consumeNextToken();
+ if (CommaToken.Kind != TokenInfo::TK_Comma) {
+ Error->addError(CommaToken.Range, Error->ET_ParserNoComma)
+ << CommaToken.Text;
+ return false;
+ }
+ }
+
+ Diagnostics::Context Ctx(Diagnostics::Context::MatcherArg, Error,
+ NameToken.Text, NameToken.Range,
+ Args.size() + 1);
+ ParserValue ArgValue;
+ Tokenizer->SkipNewlines();
+
+ if (Tokenizer->peekNextToken().Kind == TokenInfo::TK_CodeCompletion) {
+ addExpressionCompletions();
+ return false;
+ }
+
+ TokenInfo NodeMatcherToken = Tokenizer->consumeNextToken();
+
+ if (NodeMatcherToken.Kind != TokenInfo::TK_Ident) {
+ Error->addError(NameToken.Range, Error->ET_ParserFailedToBuildMatcher)
+ << NameToken.Text;
+ return false;
+ }
+
+ ArgValue.Text = NodeMatcherToken.Text;
+ ArgValue.Range = NodeMatcherToken.Range;
+
+ llvm::Optional<MatcherCtor> MappedMatcher =
+ S->lookupMatcherCtor(ArgValue.Text);
+
+ if (!MappedMatcher) {
+ Error->addError(NodeMatcherToken.Range,
+ Error->ET_RegistryMatcherNotFound)
+ << NodeMatcherToken.Text;
+ return false;
+ }
+
+ ASTNodeKind NK = S->nodeMatcherType(*MappedMatcher);
+
+ if (NK.isNone()) {
+ Error->addError(NodeMatcherToken.Range,
+ Error->ET_RegistryNonNodeMatcher)
+ << NodeMatcherToken.Text;
+ return false;
+ }
+
+ ArgValue.Value = NK;
+
+ Tokenizer->SkipNewlines();
+ Args.push_back(ArgValue);
+
+ SCE.nextArg();
+ }
+ }
+
+ if (EndToken.Kind == TokenInfo::TK_Eof) {
+ Error->addError(OpenToken.Range, Error->ET_ParserNoCloseParen);
+ return false;
+ }
+
+ internal::MatcherDescriptorPtr BuiltCtor =
+ S->buildMatcherCtor(Ctor, NameToken.Range, Args, Error);
+
+ if (!BuiltCtor.get()) {
+ Error->addError(NameToken.Range, Error->ET_ParserFailedToBuildMatcher)
+ << NameToken.Text;
+ return false;
+ }
+
+ std::string BindID;
+ if (Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period) {
+ Tokenizer->consumeNextToken();
+ TokenInfo ChainCallToken = Tokenizer->consumeNextToken();
+ if (ChainCallToken.Kind == TokenInfo::TK_CodeCompletion) {
+ addCompletion(ChainCallToken, MatcherCompletion("bind(\"", "bind", 1));
+ addCompletion(ChainCallToken, MatcherCompletion("with(", "with", 1));
+ return false;
+ }
+ if (ChainCallToken.Kind != TokenInfo::TK_Ident ||
+ (ChainCallToken.Text != TokenInfo::ID_Bind &&
+ ChainCallToken.Text != TokenInfo::ID_With)) {
+ Error->addError(ChainCallToken.Range,
+ Error->ET_ParserMalformedChainedExpr);
+ return false;
+ }
+ if (ChainCallToken.Text == TokenInfo::ID_Bind) {
+ if (!parseBindID(BindID))
+ return false;
+ Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
+ NameToken.Text, NameToken.Range);
+ SourceRange MatcherRange = NameToken.Range;
+ MatcherRange.End = ChainCallToken.Range.End;
+ VariantMatcher Result = S->actOnMatcherExpression(
+ BuiltCtor.get(), MatcherRange, BindID, {}, Error);
+ if (Result.isNull())
+ return false;
+
+ *Value = Result;
+ return true;
+ } else if (ChainCallToken.Text == TokenInfo::ID_With) {
+ Tokenizer->SkipNewlines();
+
+ if (Tokenizer->nextTokenKind() != TokenInfo::TK_OpenParen) {
+ StringRef ErrTxt = Tokenizer->nextTokenKind() == TokenInfo::TK_Eof
+ ? StringRef("EOF")
+ : Tokenizer->peekNextToken().Text;
+ Error->addError(Tokenizer->peekNextToken().Range,
+ Error->ET_ParserNoOpenParen)
+ << ErrTxt;
+ return false;
+ }
+
+ TokenInfo WithOpenToken = Tokenizer->consumeNextToken();
+
+ return parseMatcherExpressionImpl(NameToken, WithOpenToken,
+ BuiltCtor.get(), Value);
+ }
+ }
+
+ Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
+ NameToken.Text, NameToken.Range);
+ SourceRange MatcherRange = NameToken.Range;
+ MatcherRange.End = EndToken.Range.End;
+ VariantMatcher Result = S->actOnMatcherExpression(
+ BuiltCtor.get(), MatcherRange, BindID, {}, Error);
+ if (Result.isNull())
+ return false;
+
+ *Value = Result;
+ return true;
+}
+
/// Parse and validate a matcher expression.
/// \return \c true on success, in which case \c Value has the matcher parsed.
/// If the input is malformed, or some argument has an error, it
/// returns \c false.
bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
+ const TokenInfo &OpenToken,
+ llvm::Optional<MatcherCtor> Ctor,
VariantValue *Value) {
- assert(NameToken.Kind == TokenInfo::TK_Ident);
- const TokenInfo OpenToken = Tokenizer->consumeNextToken();
- if (OpenToken.Kind != TokenInfo::TK_OpenParen) {
- Error->addError(OpenToken.Range, Error->ET_ParserNoOpenParen)
- << OpenToken.Text;
- return false;
- }
-
- llvm::Optional<MatcherCtor> Ctor = S->lookupMatcherCtor(NameToken.Text);
-
if (!Ctor) {
Error->addError(NameToken.Range, Error->ET_RegistryMatcherNotFound)
<< NameToken.Text;
// Do not return here. We need to continue to give completion suggestions.
}
+ if (Ctor && *Ctor && S->isBuilderMatcher(*Ctor))
+ return parseMatcherBuilder(*Ctor, NameToken, OpenToken, Value);
+
std::vector<ParserValue> Args;
TokenInfo EndToken;
@@ -516,6 +687,32 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
std::string BindID;
if (Tokenizer->peekNextToken().Kind == TokenInfo::TK_Period) {
+ Tokenizer->consumeNextToken();
+ TokenInfo ChainCallToken = Tokenizer->consumeNextToken();
+ if (ChainCallToken.Kind == TokenInfo::TK_CodeCompletion) {
+ addCompletion(ChainCallToken, MatcherCompletion("bind(\"", "bind", 1));
+ return false;
+ }
+
+ if (ChainCallToken.Kind != TokenInfo::TK_Ident) {
+ Error->addError(ChainCallToken.Range,
+ Error->ET_ParserMalformedChainedExpr);
+ return false;
+ }
+ if (ChainCallToken.Text == TokenInfo::ID_With) {
+
+ Diagnostics::Context Ctx(Diagnostics::Context::ConstructMatcher, Error,
+ NameToken.Text, NameToken.Range);
+
+ Error->addError(ChainCallToken.Range,
+ Error->ET_RegistryMatcherNoWithSupport);
+ return false;
+ }
+ if (ChainCallToken.Text != TokenInfo::ID_Bind) {
+ Error->addError(ChainCallToken.Range,
+ Error->ET_ParserMalformedChainedExpr);
+ return false;
+ }
if (!parseBindID(BindID))
return false;
}
@@ -657,6 +854,21 @@ std::vector<MatcherCompletion> Parser::RegistrySema::getMatcherCompletions(
return Registry::getMatcherCompletions(AcceptedTypes);
}
+bool Parser::RegistrySema::isBuilderMatcher(MatcherCtor Ctor) const {
+ return Registry::isBuilderMatcher(Ctor);
+}
+
+ASTNodeKind Parser::RegistrySema::nodeMatcherType(MatcherCtor Ctor) const {
+ return Registry::nodeMatcherType(Ctor);
+}
+
+internal::MatcherDescriptorPtr
+Parser::RegistrySema::buildMatcherCtor(MatcherCtor Ctor, SourceRange NameRange,
+ ArrayRef<ParserValue> Args,
+ Diagnostics *Error) const {
+ return Registry::buildMatcherCtor(Ctor, NameRange, Args, Error);
+}
+
bool Parser::parseExpression(StringRef &Code, Sema *S,
const NamedValueMap *NamedValues,
VariantValue *Value, Diagnostics *Error) {
diff --git a/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 00a7c74a0b90..0048f1133ca2 100644
--- a/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -102,6 +102,9 @@ RegistryMaps::RegistryMaps() {
// Other:
// equalsNode
+ registerMatcher("mapAnyOf",
+ std::make_unique<internal::MapAnyOfBuilderDescriptor>());
+
REGISTER_OVERLOADED_2(callee);
REGISTER_OVERLOADED_2(hasAnyCapture);
REGISTER_OVERLOADED_2(hasPrefix);
@@ -144,6 +147,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(binaryConditionalOperator);
REGISTER_MATCHER(binaryOperator);
REGISTER_MATCHER(binaryOperation);
+ REGISTER_MATCHER(bindingDecl);
REGISTER_MATCHER(blockDecl);
REGISTER_MATCHER(blockExpr);
REGISTER_MATCHER(blockPointerType);
@@ -162,12 +166,16 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(complexType);
REGISTER_MATCHER(compoundLiteralExpr);
REGISTER_MATCHER(compoundStmt);
+ REGISTER_MATCHER(coawaitExpr);
REGISTER_MATCHER(conditionalOperator);
REGISTER_MATCHER(constantArrayType);
REGISTER_MATCHER(constantExpr);
REGISTER_MATCHER(containsDeclaration);
REGISTER_MATCHER(continueStmt);
+ REGISTER_MATCHER(coreturnStmt);
+ REGISTER_MATCHER(coyieldExpr);
REGISTER_MATCHER(cudaKernelCallExpr);
+ REGISTER_MATCHER(cxxBaseSpecifier);
REGISTER_MATCHER(cxxBindTemporaryExpr);
REGISTER_MATCHER(cxxBoolLiteral);
REGISTER_MATCHER(cxxCatchStmt);
@@ -210,6 +218,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(decltypeType);
REGISTER_MATCHER(deducedTemplateSpecializationType);
REGISTER_MATCHER(defaultStmt);
+ REGISTER_MATCHER(dependentCoawaitExpr);
REGISTER_MATCHER(dependentSizedArrayType);
REGISTER_MATCHER(designatedInitExpr);
REGISTER_MATCHER(designatorCountIs);
@@ -225,7 +234,10 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(expr);
REGISTER_MATCHER(exprWithCleanups);
REGISTER_MATCHER(fieldDecl);
+ REGISTER_MATCHER(fixedPointLiteral);
REGISTER_MATCHER(floatLiteral);
+ REGISTER_MATCHER(forCallable);
+ REGISTER_MATCHER(forDecomposition);
REGISTER_MATCHER(forEach);
REGISTER_MATCHER(forEachArgumentWithParam);
REGISTER_MATCHER(forEachArgumentWithParamType);
@@ -248,6 +260,8 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAncestor);
REGISTER_MATCHER(hasAnyArgument);
REGISTER_MATCHER(hasAnyBase);
+ REGISTER_MATCHER(hasAnyBinding);
+ REGISTER_MATCHER(hasAnyBody);
REGISTER_MATCHER(hasAnyClause);
REGISTER_MATCHER(hasAnyConstructorInitializer);
REGISTER_MATCHER(hasAnyDeclaration);
@@ -266,6 +280,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(hasAttr);
REGISTER_MATCHER(hasAutomaticStorageDuration);
REGISTER_MATCHER(hasBase);
+ REGISTER_MATCHER(hasBinding);
REGISTER_MATCHER(hasBitWidth);
REGISTER_MATCHER(hasBody);
REGISTER_MATCHER(hasCanonicalType);
@@ -358,6 +373,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(injectedClassNameType);
REGISTER_MATCHER(innerType);
REGISTER_MATCHER(integerLiteral);
+ REGISTER_MATCHER(invocation);
REGISTER_MATCHER(isAllowedToContainClauseKind);
REGISTER_MATCHER(isAnonymous);
REGISTER_MATCHER(isAnyCharacter);
@@ -518,6 +534,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(tagType);
REGISTER_MATCHER(templateArgument);
REGISTER_MATCHER(templateArgumentCountIs);
+ REGISTER_MATCHER(templateArgumentLoc);
REGISTER_MATCHER(templateName);
REGISTER_MATCHER(templateSpecializationType);
REGISTER_MATCHER(templateTemplateParmDecl);
@@ -544,6 +561,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(userDefinedLiteral);
REGISTER_MATCHER(usesADL);
REGISTER_MATCHER(usingDecl);
+ REGISTER_MATCHER(usingEnumDecl);
REGISTER_MATCHER(usingDirectiveDecl);
REGISTER_MATCHER(valueDecl);
REGISTER_MATCHER(varDecl);
@@ -557,6 +575,26 @@ RegistryMaps::~RegistryMaps() = default;
static llvm::ManagedStatic<RegistryMaps> RegistryData;
+ASTNodeKind Registry::nodeMatcherType(MatcherCtor Ctor) {
+ return Ctor->nodeMatcherType();
+}
+
+internal::MatcherDescriptorPtr::MatcherDescriptorPtr(MatcherDescriptor *Ptr)
+ : Ptr(Ptr) {}
+
+internal::MatcherDescriptorPtr::~MatcherDescriptorPtr() { delete Ptr; }
+
+bool Registry::isBuilderMatcher(MatcherCtor Ctor) {
+ return Ctor->isBuilderMatcher();
+}
+
+internal::MatcherDescriptorPtr
+Registry::buildMatcherCtor(MatcherCtor Ctor, SourceRange NameRange,
+ ArrayRef<ParserValue> Args, Diagnostics *Error) {
+ return internal::MatcherDescriptorPtr(
+ Ctor->buildMatcherCtor(NameRange, Args, Error).release());
+}
+
// static
llvm::Optional<MatcherCtor> Registry::lookupMatcherCtor(StringRef MatcherName) {
auto it = RegistryData->constructors().find(MatcherName);
@@ -594,7 +632,10 @@ std::vector<ArgKind> Registry::getAcceptedCompletionTypes(
// Starting with the above seed of acceptable top-level matcher types, compute
// the acceptable type set for the argument indicated by each context element.
- std::set<ArgKind> TypeSet(std::begin(InitialTypes), std::end(InitialTypes));
+ std::set<ArgKind> TypeSet;
+ for (auto IT : InitialTypes) {
+ TypeSet.insert(ArgKind::MakeMatcherArg(IT));
+ }
for (const auto &CtxEntry : Context) {
MatcherCtor Ctor = CtxEntry.first;
unsigned ArgNumber = CtxEntry.second;
@@ -625,20 +666,40 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
bool IsPolymorphic = Matcher.isPolymorphic();
std::vector<std::vector<ArgKind>> ArgsKinds(NumArgs);
unsigned MaxSpecificity = 0;
+ bool NodeArgs = false;
for (const ArgKind& Kind : AcceptedTypes) {
- if (Kind.getArgKind() != Kind.AK_Matcher)
+ if (Kind.getArgKind() != Kind.AK_Matcher &&
+ Kind.getArgKind() != Kind.AK_Node) {
continue;
- unsigned Specificity;
- ASTNodeKind LeastDerivedKind;
- if (Matcher.isConvertibleTo(Kind.getMatcherKind(), &Specificity,
- &LeastDerivedKind)) {
- if (MaxSpecificity < Specificity)
- MaxSpecificity = Specificity;
- RetKinds.insert(LeastDerivedKind);
- for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
- Matcher.getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
- if (IsPolymorphic)
- break;
+ }
+
+ if (Kind.getArgKind() == Kind.AK_Node) {
+ NodeArgs = true;
+ unsigned Specificity;
+ ASTNodeKind LeastDerivedKind;
+ if (Matcher.isConvertibleTo(Kind.getNodeKind(), &Specificity,
+ &LeastDerivedKind)) {
+ if (MaxSpecificity < Specificity)
+ MaxSpecificity = Specificity;
+ RetKinds.insert(LeastDerivedKind);
+ for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
+ Matcher.getArgKinds(Kind.getNodeKind(), Arg, ArgsKinds[Arg]);
+ if (IsPolymorphic)
+ break;
+ }
+ } else {
+ unsigned Specificity;
+ ASTNodeKind LeastDerivedKind;
+ if (Matcher.isConvertibleTo(Kind.getMatcherKind(), &Specificity,
+ &LeastDerivedKind)) {
+ if (MaxSpecificity < Specificity)
+ MaxSpecificity = Specificity;
+ RetKinds.insert(LeastDerivedKind);
+ for (unsigned Arg = 0; Arg != NumArgs; ++Arg)
+ Matcher.getArgKinds(Kind.getMatcherKind(), Arg, ArgsKinds[Arg]);
+ if (IsPolymorphic)
+ break;
+ }
}
}
@@ -646,42 +707,49 @@ Registry::getMatcherCompletions(ArrayRef<ArgKind> AcceptedTypes) {
std::string Decl;
llvm::raw_string_ostream OS(Decl);
- if (IsPolymorphic) {
- OS << "Matcher<T> " << Name << "(Matcher<T>";
+ std::string TypedText = std::string(Name);
+
+ if (NodeArgs) {
+ OS << Name;
} else {
- OS << "Matcher<" << RetKinds << "> " << Name << "(";
- for (const std::vector<ArgKind> &Arg : ArgsKinds) {
- if (&Arg != &ArgsKinds[0])
- OS << ", ";
-
- bool FirstArgKind = true;
- std::set<ASTNodeKind> MatcherKinds;
- // Two steps. First all non-matchers, then matchers only.
- for (const ArgKind &AK : Arg) {
- if (AK.getArgKind() == ArgKind::AK_Matcher) {
- MatcherKinds.insert(AK.getMatcherKind());
- } else {
+
+ if (IsPolymorphic) {
+ OS << "Matcher<T> " << Name << "(Matcher<T>";
+ } else {
+ OS << "Matcher<" << RetKinds << "> " << Name << "(";
+ for (const std::vector<ArgKind> &Arg : ArgsKinds) {
+ if (&Arg != &ArgsKinds[0])
+ OS << ", ";
+
+ bool FirstArgKind = true;
+ std::set<ASTNodeKind> MatcherKinds;
+ // Two steps. First all non-matchers, then matchers only.
+ for (const ArgKind &AK : Arg) {
+ if (AK.getArgKind() == ArgKind::AK_Matcher) {
+ MatcherKinds.insert(AK.getMatcherKind());
+ } else {
+ if (!FirstArgKind)
+ OS << "|";
+ FirstArgKind = false;
+ OS << AK.asString();
+ }
+ }
+ if (!MatcherKinds.empty()) {
if (!FirstArgKind) OS << "|";
- FirstArgKind = false;
- OS << AK.asString();
+ OS << "Matcher<" << MatcherKinds << ">";
}
}
- if (!MatcherKinds.empty()) {
- if (!FirstArgKind) OS << "|";
- OS << "Matcher<" << MatcherKinds << ">";
- }
}
+ if (Matcher.isVariadic())
+ OS << "...";
+ OS << ")";
+
+ TypedText += "(";
+ if (ArgsKinds.empty())
+ TypedText += ")";
+ else if (ArgsKinds[0][0].getArgKind() == ArgKind::AK_String)
+ TypedText += "\"";
}
- if (Matcher.isVariadic())
- OS << "...";
- OS << ")";
-
- std::string TypedText = std::string(Name);
- TypedText += "(";
- if (ArgsKinds.empty())
- TypedText += ")";
- else if (ArgsKinds[0][0].getArgKind() == ArgKind::AK_String)
- TypedText += "\"";
Completions.emplace_back(TypedText, OS.str(), MaxSpecificity);
}
diff --git a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
index d1ecb1e00b91..813eb1597756 100644
--- a/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
+++ b/clang/lib/ASTMatchers/Dynamic/VariantValue.cpp
@@ -22,7 +22,9 @@ namespace dynamic {
std::string ArgKind::asString() const {
switch (getArgKind()) {
case AK_Matcher:
- return (Twine("Matcher<") + MatcherKind.asStringRef() + ">").str();
+ return (Twine("Matcher<") + NodeKind.asStringRef() + ">").str();
+ case AK_Node:
+ return NodeKind.asStringRef().str();
case AK_Boolean:
return "boolean";
case AK_Double:
@@ -38,13 +40,13 @@ std::string ArgKind::asString() const {
bool ArgKind::isConvertibleTo(ArgKind To, unsigned *Specificity) const {
if (K != To.K)
return false;
- if (K != AK_Matcher) {
+ if (K != AK_Matcher && K != AK_Node) {
if (Specificity)
*Specificity = 1;
return true;
}
unsigned Distance;
- if (!MatcherKind.isBaseOf(To.MatcherKind, &Distance))
+ if (!NodeKind.isBaseOf(To.NodeKind, &Distance))
return false;
if (Specificity)
@@ -107,8 +109,8 @@ public:
}
bool isConvertibleTo(ASTNodeKind Kind, unsigned *Specificity) const override {
- return ArgKind(Matcher.getSupportedKind())
- .isConvertibleTo(Kind, Specificity);
+ return ArgKind::MakeMatcherArg(Matcher.getSupportedKind())
+ .isConvertibleTo(ArgKind::MakeMatcherArg(Kind), Specificity);
}
private:
@@ -167,8 +169,9 @@ public:
unsigned MaxSpecificity = 0;
for (const DynTypedMatcher &Matcher : Matchers) {
unsigned ThisSpecificity;
- if (ArgKind(Matcher.getSupportedKind())
- .isConvertibleTo(Kind, &ThisSpecificity)) {
+ if (ArgKind::MakeMatcherArg(Matcher.getSupportedKind())
+ .isConvertibleTo(ArgKind::MakeMatcherArg(Kind),
+ &ThisSpecificity)) {
MaxSpecificity = std::max(MaxSpecificity, ThisSpecificity);
}
}
@@ -442,6 +445,11 @@ bool VariantValue::isConvertibleTo(ArgKind Kind, unsigned *Specificity) const {
*Specificity = 1;
return true;
+ case ArgKind::AK_Node:
+ if (!isNodeKind())
+ return false;
+ return getMatcher().isConvertibleTo(Kind.getNodeKind(), Specificity);
+
case ArgKind::AK_Matcher:
if (!isMatcher())
return false;
diff --git a/clang/lib/ASTMatchers/GtestMatchers.cpp b/clang/lib/ASTMatchers/GtestMatchers.cpp
index 0e587c0c3b9f..6e4c12f31969 100644
--- a/clang/lib/ASTMatchers/GtestMatchers.cpp
+++ b/clang/lib/ASTMatchers/GtestMatchers.cpp
@@ -5,78 +5,111 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+//
+// This file implements several matchers for popular gtest macros. In general,
+// AST matchers cannot match calls to macros. However, we can simulate such
+// matches if the macro definition has identifiable elements that themselves can
+// be matched. In that case, we can match on those elements and then check that
+// the match occurs within an expansion of the desired macro. The more uncommon
+// the identified elements, the more efficient this process will be.
+//
+//===----------------------------------------------------------------------===//
#include "clang/ASTMatchers/GtestMatchers.h"
-#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
-#include "llvm/Support/Timer.h"
-#include <deque>
-#include <memory>
-#include <set>
+#include "llvm/ADT/StringRef.h"
namespace clang {
namespace ast_matchers {
+namespace {
+
+enum class MacroType {
+ Expect,
+ Assert,
+ On,
+};
+
+} // namespace
static DeclarationMatcher getComparisonDecl(GtestCmp Cmp) {
switch (Cmp) {
- case GtestCmp::Eq:
- return cxxMethodDecl(hasName("Compare"),
- ofClass(cxxRecordDecl(isSameOrDerivedFrom(
- hasName("::testing::internal::EqHelper")))));
- case GtestCmp::Ne:
- return functionDecl(hasName("::testing::internal::CmpHelperNE"));
- case GtestCmp::Ge:
- return functionDecl(hasName("::testing::internal::CmpHelperGE"));
- case GtestCmp::Gt:
- return functionDecl(hasName("::testing::internal::CmpHelperGT"));
- case GtestCmp::Le:
- return functionDecl(hasName("::testing::internal::CmpHelperLE"));
- case GtestCmp::Lt:
- return functionDecl(hasName("::testing::internal::CmpHelperLT"));
+ case GtestCmp::Eq:
+ return cxxMethodDecl(hasName("Compare"),
+ ofClass(cxxRecordDecl(isSameOrDerivedFrom(
+ hasName("::testing::internal::EqHelper")))));
+ case GtestCmp::Ne:
+ return functionDecl(hasName("::testing::internal::CmpHelperNE"));
+ case GtestCmp::Ge:
+ return functionDecl(hasName("::testing::internal::CmpHelperGE"));
+ case GtestCmp::Gt:
+ return functionDecl(hasName("::testing::internal::CmpHelperGT"));
+ case GtestCmp::Le:
+ return functionDecl(hasName("::testing::internal::CmpHelperLE"));
+ case GtestCmp::Lt:
+ return functionDecl(hasName("::testing::internal::CmpHelperLT"));
}
llvm_unreachable("Unhandled GtestCmp enum");
}
-static llvm::StringRef getAssertMacro(GtestCmp Cmp) {
- switch (Cmp) {
- case GtestCmp::Eq:
- return "ASSERT_EQ";
- case GtestCmp::Ne:
- return "ASSERT_NE";
- case GtestCmp::Ge:
- return "ASSERT_GE";
- case GtestCmp::Gt:
- return "ASSERT_GT";
- case GtestCmp::Le:
- return "ASSERT_LE";
- case GtestCmp::Lt:
- return "ASSERT_LT";
+static llvm::StringRef getMacroTypeName(MacroType Macro) {
+ switch (Macro) {
+ case MacroType::Expect:
+ return "EXPECT";
+ case MacroType::Assert:
+ return "ASSERT";
+ case MacroType::On:
+ return "ON";
}
- llvm_unreachable("Unhandled GtestCmp enum");
+ llvm_unreachable("Unhandled MacroType enum");
}
-static llvm::StringRef getExpectMacro(GtestCmp Cmp) {
+static llvm::StringRef getComparisonTypeName(GtestCmp Cmp) {
switch (Cmp) {
- case GtestCmp::Eq:
- return "EXPECT_EQ";
- case GtestCmp::Ne:
- return "EXPECT_NE";
- case GtestCmp::Ge:
- return "EXPECT_GE";
- case GtestCmp::Gt:
- return "EXPECT_GT";
- case GtestCmp::Le:
- return "EXPECT_LE";
- case GtestCmp::Lt:
- return "EXPECT_LT";
+ case GtestCmp::Eq:
+ return "EQ";
+ case GtestCmp::Ne:
+ return "NE";
+ case GtestCmp::Ge:
+ return "GE";
+ case GtestCmp::Gt:
+ return "GT";
+ case GtestCmp::Le:
+ return "LE";
+ case GtestCmp::Lt:
+ return "LT";
}
llvm_unreachable("Unhandled GtestCmp enum");
}
+static std::string getMacroName(MacroType Macro, GtestCmp Cmp) {
+ return (getMacroTypeName(Macro) + "_" + getComparisonTypeName(Cmp)).str();
+}
+
+static std::string getMacroName(MacroType Macro, llvm::StringRef Operation) {
+ return (getMacroTypeName(Macro) + "_" + Operation).str();
+}
+
+// Under the hood, ON_CALL is expanded to a call to `InternalDefaultActionSetAt`
+// to set a default action spec to the underlying function mocker, while
+// EXPECT_CALL is expanded to a call to `InternalExpectedAt` to set a new
+// expectation spec.
+static llvm::StringRef getSpecSetterName(MacroType Macro) {
+ switch (Macro) {
+ case MacroType::On:
+ return "InternalDefaultActionSetAt";
+ case MacroType::Expect:
+ return "InternalExpectedAt";
+ default:
+ llvm_unreachable("Unhandled MacroType enum");
+ }
+ llvm_unreachable("Unhandled MacroType enum");
+}
+
// In general, AST matchers cannot match calls to macros. However, we can
// simulate such matches if the macro definition has identifiable elements that
// themselves can be matched. In that case, we can match on those elements and
@@ -86,18 +119,115 @@ static llvm::StringRef getExpectMacro(GtestCmp Cmp) {
//
// We use this approach to implement the derived matchers gtestAssert and
// gtestExpect.
+static internal::BindableMatcher<Stmt>
+gtestComparisonInternal(MacroType Macro, GtestCmp Cmp, StatementMatcher Left,
+ StatementMatcher Right) {
+ return callExpr(isExpandedFromMacro(getMacroName(Macro, Cmp)),
+ callee(getComparisonDecl(Cmp)), hasArgument(2, Left),
+ hasArgument(3, Right));
+}
+
+static internal::BindableMatcher<Stmt>
+gtestThatInternal(MacroType Macro, StatementMatcher Actual,
+ StatementMatcher Matcher) {
+ return cxxOperatorCallExpr(
+ isExpandedFromMacro(getMacroName(Macro, "THAT")),
+ hasOverloadedOperatorName("()"), hasArgument(2, Actual),
+ hasArgument(
+ 0, expr(hasType(classTemplateSpecializationDecl(hasName(
+ "::testing::internal::PredicateFormatterFromMatcher"))),
+ ignoringImplicit(
+ callExpr(callee(functionDecl(hasName(
+ "::testing::internal::"
+ "MakePredicateFormatterFromMatcher"))),
+ hasArgument(0, ignoringImplicit(Matcher)))))));
+}
+
+static internal::BindableMatcher<Stmt>
+gtestCallInternal(MacroType Macro, StatementMatcher MockCall, MockArgs Args) {
+ // A ON_CALL or EXPECT_CALL macro expands to different AST structures
+ // depending on whether the mock method has arguments or not.
+ switch (Args) {
+ // For example,
+ // `ON_CALL(mock, TwoParamMethod)` is expanded to
+ // `mock.gmock_TwoArgsMethod(WithoutMatchers(),
+ // nullptr).InternalDefaultActionSetAt(...)`.
+ // EXPECT_CALL is the same except
+ // that it calls `InternalExpectedAt` instead of `InternalDefaultActionSetAt`
+ // in the end.
+ case MockArgs::None:
+ return cxxMemberCallExpr(
+ isExpandedFromMacro(getMacroName(Macro, "CALL")),
+ callee(functionDecl(hasName(getSpecSetterName(Macro)))),
+ onImplicitObjectArgument(ignoringImplicit(MockCall)));
+ // For example,
+ // `ON_CALL(mock, TwoParamMethod(m1, m2))` is expanded to
+ // `mock.gmock_TwoParamMethod(m1,m2)(WithoutMatchers(),
+ // nullptr).InternalDefaultActionSetAt(...)`.
+ // EXPECT_CALL is the same except that it calls `InternalExpectedAt` instead
+ // of `InternalDefaultActionSetAt` in the end.
+ case MockArgs::Some:
+ return cxxMemberCallExpr(
+ isExpandedFromMacro(getMacroName(Macro, "CALL")),
+ callee(functionDecl(hasName(getSpecSetterName(Macro)))),
+ onImplicitObjectArgument(ignoringImplicit(cxxOperatorCallExpr(
+ hasOverloadedOperatorName("()"), argumentCountIs(3),
+ hasArgument(0, ignoringImplicit(MockCall))))));
+ }
+ llvm_unreachable("Unhandled MockArgs enum");
+}
+
+static internal::BindableMatcher<Stmt>
+gtestCallInternal(MacroType Macro, StatementMatcher MockObject,
+ llvm::StringRef MockMethodName, MockArgs Args) {
+ return gtestCallInternal(
+ Macro,
+ cxxMemberCallExpr(
+ onImplicitObjectArgument(MockObject),
+ callee(functionDecl(hasName(("gmock_" + MockMethodName).str())))),
+ Args);
+}
+
internal::BindableMatcher<Stmt> gtestAssert(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right) {
- return callExpr(callee(getComparisonDecl(Cmp)),
- isExpandedFromMacro(getAssertMacro(Cmp).str()),
- hasArgument(2, Left), hasArgument(3, Right));
+ return gtestComparisonInternal(MacroType::Assert, Cmp, Left, Right);
}
internal::BindableMatcher<Stmt> gtestExpect(GtestCmp Cmp, StatementMatcher Left,
StatementMatcher Right) {
- return callExpr(callee(getComparisonDecl(Cmp)),
- isExpandedFromMacro(getExpectMacro(Cmp).str()),
- hasArgument(2, Left), hasArgument(3, Right));
+ return gtestComparisonInternal(MacroType::Expect, Cmp, Left, Right);
+}
+
+internal::BindableMatcher<Stmt> gtestAssertThat(StatementMatcher Actual,
+ StatementMatcher Matcher) {
+ return gtestThatInternal(MacroType::Assert, Actual, Matcher);
+}
+
+internal::BindableMatcher<Stmt> gtestExpectThat(StatementMatcher Actual,
+ StatementMatcher Matcher) {
+ return gtestThatInternal(MacroType::Expect, Actual, Matcher);
+}
+
+internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockObject,
+ llvm::StringRef MockMethodName,
+ MockArgs Args) {
+ return gtestCallInternal(MacroType::On, MockObject, MockMethodName, Args);
+}
+
+internal::BindableMatcher<Stmt> gtestOnCall(StatementMatcher MockCall,
+ MockArgs Args) {
+ return gtestCallInternal(MacroType::On, MockCall, Args);
+}
+
+internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockObject,
+ llvm::StringRef MockMethodName,
+ MockArgs Args) {
+ return gtestCallInternal(MacroType::Expect, MockObject, MockMethodName, Args);
+}
+
+internal::BindableMatcher<Stmt> gtestExpectCall(StatementMatcher MockCall,
+ MockArgs Args) {
+ return gtestCallInternal(MacroType::Expect, MockCall, Args);
}
} // end namespace ast_matchers
diff --git a/clang/lib/Analysis/AnalysisDeclContext.cpp b/clang/lib/Analysis/AnalysisDeclContext.cpp
index 783de6442645..d8466ac34a3d 100644
--- a/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -337,6 +337,59 @@ bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
return ND->isStdNamespace();
}
+std::string AnalysisDeclContext::getFunctionName(const Decl *D) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ const ASTContext &Ctx = D->getASTContext();
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ OS << FD->getQualifiedNameAsString();
+
+ // In C++, there are overloads.
+
+ if (Ctx.getLangOpts().CPlusPlus) {
+ OS << '(';
+ for (const auto &P : FD->parameters()) {
+ if (P != *FD->param_begin())
+ OS << ", ";
+ OS << P->getType().getAsString();
+ }
+ OS << ')';
+ }
+
+ } else if (isa<BlockDecl>(D)) {
+ PresumedLoc Loc = Ctx.getSourceManager().getPresumedLoc(D->getLocation());
+
+ if (Loc.isValid()) {
+ OS << "block (line: " << Loc.getLine() << ", col: " << Loc.getColumn()
+ << ')';
+ }
+
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+
+ // FIXME: copy-pasted from CGDebugInfo.cpp.
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const auto *OID = dyn_cast<ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
+ if (OC->IsClassExtension()) {
+ OS << OC->getClassInterface()->getName();
+ } else {
+ OS << OC->getIdentifier()->getNameStart() << '('
+ << OC->getIdentifier()->getNameStart() << ')';
+ }
+ } else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
+ OS << OCD->getClassInterface()->getName() << '(' << OCD->getName() << ')';
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+ }
+
+ return OS.str();
+}
+
LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
assert(
ADCMgr &&
@@ -456,7 +509,7 @@ void LocationContext::dumpStack(raw_ostream &Out) const {
Out << "\t#" << Frame << ' ';
++Frame;
if (const auto *D = dyn_cast<NamedDecl>(LCtx->getDecl()))
- Out << "Calling " << D->getQualifiedNameAsString();
+ Out << "Calling " << AnalysisDeclContext::getFunctionName(D);
else
Out << "Calling anonymous code";
if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
diff --git a/clang/lib/Analysis/BodyFarm.cpp b/clang/lib/Analysis/BodyFarm.cpp
index 603da6715625..e357bfb29b82 100644
--- a/clang/lib/Analysis/BodyFarm.cpp
+++ b/clang/lib/Analysis/BodyFarm.cpp
@@ -116,7 +116,7 @@ BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
QualType Ty) {
return BinaryOperator::Create(
C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), BO_Assign, Ty,
- VK_RValue, OK_Ordinary, SourceLocation(), FPOptionsOverride());
+ VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride());
}
BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
@@ -125,7 +125,7 @@ BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
BinaryOperator::isComparisonOp(Op));
return BinaryOperator::Create(
C, const_cast<Expr *>(LHS), const_cast<Expr *>(RHS), Op,
- C.getLogicalOperationType(), VK_RValue, OK_Ordinary, SourceLocation(),
+ C.getLogicalOperationType(), VK_PRValue, OK_Ordinary, SourceLocation(),
FPOptionsOverride());
}
@@ -169,7 +169,7 @@ ImplicitCastExpr *ASTMaker::makeImplicitCast(const Expr *Arg, QualType Ty,
/* CastKind=*/CK,
/* Expr=*/const_cast<Expr *>(Arg),
/* CXXCastPath=*/nullptr,
- /* ExprValueKind=*/VK_RValue,
+ /* ExprValueKind=*/VK_PRValue,
/* FPFeatures */ FPOptionsOverride());
}
@@ -264,7 +264,7 @@ static CallExpr *create_call_once_funcptr_call(ASTContext &C, ASTMaker M,
llvm_unreachable("Unexpected state");
}
- return CallExpr::Create(C, SubExpr, CallArgs, C.VoidTy, VK_RValue,
+ return CallExpr::Create(C, SubExpr, CallArgs, C.VoidTy, VK_PRValue,
SourceLocation(), FPOptionsOverride());
}
@@ -291,7 +291,7 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/*AstContext=*/C, OO_Call, callOperatorDeclRef,
/*Args=*/CallArgs,
/*QualType=*/C.VoidTy,
- /*ExprValueType=*/VK_RValue,
+ /*ExprValueType=*/VK_PRValue,
/*SourceLocation=*/SourceLocation(),
/*FPFeatures=*/FPOptionsOverride());
}
@@ -451,7 +451,7 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
CK_IntegralToBoolean),
/* opc=*/UO_LNot,
/* QualType=*/C.IntTy,
- /* ExprValueKind=*/VK_RValue,
+ /* ExprValueKind=*/VK_PRValue,
/* ExprObjectKind=*/OK_Ordinary, SourceLocation(),
/* CanOverflow*/ false, FPOptionsOverride());
@@ -513,13 +513,13 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
/*StmtClass=*/M.makeLvalueToRvalue(/*Expr=*/Block),
/*Args=*/None,
/*QualType=*/C.VoidTy,
- /*ExprValueType=*/VK_RValue,
+ /*ExprValueType=*/VK_PRValue,
/*SourceLocation=*/SourceLocation(), FPOptionsOverride());
// (2) Create the assignment to the predicate.
Expr *DoneValue =
UnaryOperator::Create(C, M.makeIntegerLiteral(0, C.LongTy), UO_Not,
- C.LongTy, VK_RValue, OK_Ordinary, SourceLocation(),
+ C.LongTy, VK_PRValue, OK_Ordinary, SourceLocation(),
/*CanOverflow*/ false, FPOptionsOverride());
BinaryOperator *B =
@@ -580,7 +580,7 @@ static Stmt *create_dispatch_sync(ASTContext &C, const FunctionDecl *D) {
ASTMaker M(C);
DeclRefExpr *DR = M.makeDeclRefExpr(PV);
ImplicitCastExpr *ICE = M.makeLvalueToRvalue(DR, Ty);
- CallExpr *CE = CallExpr::Create(C, ICE, None, C.VoidTy, VK_RValue,
+ CallExpr *CE = CallExpr::Create(C, ICE, None, C.VoidTy, VK_PRValue,
SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -742,8 +742,9 @@ static const ObjCIvarDecl *findBackingIvar(const ObjCPropertyDecl *Prop) {
static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
const ObjCMethodDecl *MD) {
- // First, find the backing ivar.
+ // First, find the backing ivar.
const ObjCIvarDecl *IVar = nullptr;
+ const ObjCPropertyDecl *Prop = nullptr;
// Property accessor stubs sometimes do not correspond to any property decl
// in the current interface (but in a superclass). They still have a
@@ -751,54 +752,57 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
if (MD->isSynthesizedAccessorStub()) {
const ObjCInterfaceDecl *IntD = MD->getClassInterface();
const ObjCImplementationDecl *ImpD = IntD->getImplementation();
- for (const auto *PI: ImpD->property_impls()) {
- if (const ObjCPropertyDecl *P = PI->getPropertyDecl()) {
- if (P->getGetterName() == MD->getSelector())
- IVar = P->getPropertyIvarDecl();
+ for (const auto *PI : ImpD->property_impls()) {
+ if (const ObjCPropertyDecl *Candidate = PI->getPropertyDecl()) {
+ if (Candidate->getGetterName() == MD->getSelector()) {
+ Prop = Candidate;
+ IVar = Prop->getPropertyIvarDecl();
+ }
}
}
}
if (!IVar) {
- const ObjCPropertyDecl *Prop = MD->findPropertyDecl();
+ Prop = MD->findPropertyDecl();
IVar = findBackingIvar(Prop);
- if (!IVar)
- return nullptr;
+ }
- // Ignore weak variables, which have special behavior.
- if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
- return nullptr;
+ if (!IVar || !Prop)
+ return nullptr;
+
+ // Ignore weak variables, which have special behavior.
+ if (Prop->getPropertyAttributes() & ObjCPropertyAttribute::kind_weak)
+ return nullptr;
- // Look to see if Sema has synthesized a body for us. This happens in
- // Objective-C++ because the return value may be a C++ class type with a
- // non-trivial copy constructor. We can only do this if we can find the
- // @synthesize for this property, though (or if we know it's been auto-
- // synthesized).
- const ObjCImplementationDecl *ImplDecl =
+ // Look to see if Sema has synthesized a body for us. This happens in
+ // Objective-C++ because the return value may be a C++ class type with a
+ // non-trivial copy constructor. We can only do this if we can find the
+ // @synthesize for this property, though (or if we know it's been auto-
+ // synthesized).
+ const ObjCImplementationDecl *ImplDecl =
IVar->getContainingInterface()->getImplementation();
- if (ImplDecl) {
- for (const auto *I : ImplDecl->property_impls()) {
- if (I->getPropertyDecl() != Prop)
- continue;
-
- if (I->getGetterCXXConstructor()) {
- ASTMaker M(Ctx);
- return M.makeReturn(I->getGetterCXXConstructor());
- }
+ if (ImplDecl) {
+ for (const auto *I : ImplDecl->property_impls()) {
+ if (I->getPropertyDecl() != Prop)
+ continue;
+
+ if (I->getGetterCXXConstructor()) {
+ ASTMaker M(Ctx);
+ return M.makeReturn(I->getGetterCXXConstructor());
}
}
-
- // Sanity check that the property is the same type as the ivar, or a
- // reference to it, and that it is either an object pointer or trivially
- // copyable.
- if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
- Prop->getType().getNonReferenceType()))
- return nullptr;
- if (!IVar->getType()->isObjCLifetimeType() &&
- !IVar->getType().isTriviallyCopyableType(Ctx))
- return nullptr;
}
+ // Sanity check that the property is the same type as the ivar, or a
+ // reference to it, and that it is either an object pointer or trivially
+ // copyable.
+ if (!Ctx.hasSameUnqualifiedType(IVar->getType(),
+ Prop->getType().getNonReferenceType()))
+ return nullptr;
+ if (!IVar->getType()->isObjCLifetimeType() &&
+ !IVar->getType().isTriviallyCopyableType(Ctx))
+ return nullptr;
+
// Generate our body:
// return self->_ivar;
ASTMaker M(Ctx);
@@ -807,11 +811,8 @@ static Stmt *createObjCPropertyGetter(ASTContext &Ctx,
if (!selfVar)
return nullptr;
- Expr *loadedIVar =
- M.makeObjCIvarRef(
- M.makeLvalueToRvalue(
- M.makeDeclRefExpr(selfVar),
- selfVar->getType()),
+ Expr *loadedIVar = M.makeObjCIvarRef(
+ M.makeLvalueToRvalue(M.makeDeclRefExpr(selfVar), selfVar->getType()),
IVar);
if (!MD->getReturnType()->isReferenceType())
diff --git a/clang/lib/Analysis/CFG.cpp b/clang/lib/Analysis/CFG.cpp
index edc86c41c3b9..ba5eceda24b5 100644
--- a/clang/lib/Analysis/CFG.cpp
+++ b/clang/lib/Analysis/CFG.cpp
@@ -1456,6 +1456,13 @@ void CFGBuilder::findConstructionContexts(
// TODO: Handle other cases. For now, fail to find construction contexts.
break;
}
+ case Stmt::ParenExprClass: {
+ // If expression is placed into parenthesis we should propagate the parent
+ // construction context to subexpressions.
+ auto *PE = cast<ParenExpr>(Child);
+ findConstructionContexts(Layer, PE->getSubExpr());
+ break;
+ }
default:
break;
}
@@ -4475,8 +4482,14 @@ CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
// Add implicit scope and dtors for loop variable.
addLocalScopeAndDtors(S->getLoopVarStmt());
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(S->getBody()))
+ addLocalScopeAndDtors(S->getBody());
+
// Populate a new block to contain the loop body and loop variable.
addStmt(S->getBody());
+
if (badCFG)
return nullptr;
CFGBlock *LoopVarStmtBlock = addStmt(S->getLoopVarStmt());
diff --git a/clang/lib/Analysis/CalledOnceCheck.cpp b/clang/lib/Analysis/CalledOnceCheck.cpp
index 883629a300dc..661f7b999f2b 100644
--- a/clang/lib/Analysis/CalledOnceCheck.cpp
+++ b/clang/lib/Analysis/CalledOnceCheck.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/CalledOnceCheck.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
@@ -22,6 +23,7 @@
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowWorklist.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/BitVector.h"
@@ -47,12 +49,29 @@ static constexpr unsigned EXPECTED_NUMBER_OF_BASIC_BLOCKS = 8;
template <class T>
using CFGSizedVector = llvm::SmallVector<T, EXPECTED_NUMBER_OF_BASIC_BLOCKS>;
constexpr llvm::StringLiteral CONVENTIONAL_NAMES[] = {
- "completionHandler", "completion", "withCompletionHandler"};
+ "completionHandler", "completion", "withCompletionHandler",
+ "withCompletion", "completionBlock", "withCompletionBlock",
+ "replyTo", "reply", "withReplyTo"};
constexpr llvm::StringLiteral CONVENTIONAL_SUFFIXES[] = {
- "WithCompletionHandler", "WithCompletion"};
+ "WithCompletionHandler", "WithCompletion", "WithCompletionBlock",
+ "WithReplyTo", "WithReply"};
constexpr llvm::StringLiteral CONVENTIONAL_CONDITIONS[] = {
"error", "cancel", "shouldCall", "done", "OK", "success"};
+struct KnownCalledOnceParameter {
+ llvm::StringLiteral FunctionName;
+ unsigned ParamIndex;
+};
+constexpr KnownCalledOnceParameter KNOWN_CALLED_ONCE_PARAMETERS[] = {
+ {llvm::StringLiteral{"dispatch_async"}, 1},
+ {llvm::StringLiteral{"dispatch_async_and_wait"}, 1},
+ {llvm::StringLiteral{"dispatch_after"}, 2},
+ {llvm::StringLiteral{"dispatch_sync"}, 1},
+ {llvm::StringLiteral{"dispatch_once"}, 1},
+ {llvm::StringLiteral{"dispatch_barrier_async"}, 1},
+ {llvm::StringLiteral{"dispatch_barrier_async_and_wait"}, 1},
+ {llvm::StringLiteral{"dispatch_barrier_sync"}, 1}};
+
class ParameterStatus {
public:
// Status kind is basically the main part of parameter's status.
@@ -330,6 +349,29 @@ public:
return Visit(OVE->getSourceExpr());
}
+ const DeclRefExpr *VisitCallExpr(const CallExpr *CE) {
+ if (!ShouldRetrieveFromComparisons)
+ return nullptr;
+
+ // We want to see through some of the boolean builtin functions
+ // that we are likely to see in conditions.
+ switch (CE->getBuiltinCallee()) {
+ case Builtin::BI__builtin_expect:
+ case Builtin::BI__builtin_expect_with_probability: {
+ assert(CE->getNumArgs() >= 2);
+
+ const DeclRefExpr *Candidate = Visit(CE->getArg(0));
+ return Candidate != nullptr ? Candidate : Visit(CE->getArg(1));
+ }
+
+ case Builtin::BI__builtin_unpredictable:
+ return Visit(CE->getArg(0));
+
+ default:
+ return nullptr;
+ }
+ }
+
const DeclRefExpr *VisitExpr(const Expr *E) {
// It is a fallback method that gets called whenever the actual type
// of the given expression is not covered.
@@ -436,7 +478,7 @@ bool mentionsAnyOfConventionalNames(const Expr *E) {
return llvm::any_of(
CONVENTIONAL_CONDITIONS,
[ConditionName](const llvm::StringLiteral &Conventional) {
- return ConditionName.contains_lower(Conventional);
+ return ConditionName.contains_insensitive(Conventional);
});
});
}
@@ -770,8 +812,12 @@ private:
}
}
- // Early exit if we don't have parameters for extra analysis.
- if (NotCalledOnEveryPath.none() && NotUsedOnEveryPath.none())
+ // Early exit if we don't have parameters for extra analysis...
+ if (NotCalledOnEveryPath.none() && NotUsedOnEveryPath.none() &&
+ // ... or if we've seen variables with cleanup functions.
+ // We can't reason that we've seen every path in this case,
+ // and thus abandon reporting any warnings that imply that.
+ !FunctionHasCleanupVars)
return;
// We are looking for a pair of blocks A, B so that the following is true:
@@ -840,16 +886,14 @@ private:
// Let's check if any of the call arguments is a point of interest.
for (const auto &Argument : llvm::enumerate(Arguments)) {
if (auto Index = getIndexOfExpression(Argument.value())) {
- ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(*Index);
-
if (shouldBeCalledOnce(CallOrMessage, Argument.index())) {
// If the corresponding parameter is marked as 'called_once' we should
// consider it as a call.
processCallFor(*Index, CallOrMessage);
- } else if (CurrentParamStatus.getKind() == ParameterStatus::NotCalled) {
+ } else {
// Otherwise, we mark this parameter as escaped, which can be
// interpreted both as called or not called depending on the context.
- CurrentParamStatus = ParameterStatus::Escaped;
+ processEscapeFor(*Index);
}
// Otherwise, let's keep the state as it is.
}
@@ -883,6 +927,16 @@ private:
}
}
+ /// Process escape of the parameter with the given index
+ void processEscapeFor(unsigned Index) {
+ ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(Index);
+
+ // Escape overrides whatever error we think happened.
+ if (CurrentParamStatus.isErrorStatus()) {
+ CurrentParamStatus = ParameterStatus::Escaped;
+ }
+ }
+
void findAndReportNotCalledBranches(const CFGBlock *Parent, unsigned Index,
bool IsEscape = false) {
for (const CFGBlock *Succ : Parent->succs()) {
@@ -894,9 +948,9 @@ private:
"Block should have at least two successors at this point");
if (auto Clarification = NotCalledClarifier::clarify(Parent, Succ)) {
const ParmVarDecl *Parameter = getParameter(Index);
- Handler.handleNeverCalled(Parameter, Clarification->Location,
- Clarification->Reason, !IsEscape,
- !isExplicitlyMarked(Parameter));
+ Handler.handleNeverCalled(
+ Parameter, AC.getDecl(), Clarification->Location,
+ Clarification->Reason, !IsEscape, !isExplicitlyMarked(Parameter));
}
}
}
@@ -929,9 +983,9 @@ private:
return false;
}
- QualType BlockType = Ty->getAs<BlockPointerType>()->getPointeeType();
+ QualType BlockType = Ty->castAs<BlockPointerType>()->getPointeeType();
// Completion handlers should have a block type with void return type.
- return BlockType->getAs<FunctionType>()->getReturnType()->isVoidType();
+ return BlockType->castAs<FunctionType>()->getReturnType()->isVoidType();
}
/// Return true if the only parameter of the function is conventional.
@@ -957,11 +1011,16 @@ private:
return llvm::None;
}
+ /// Return true if the specified selector represents init method.
+ static bool isInitMethod(Selector MethodSelector) {
+ return MethodSelector.getMethodFamily() == OMF_init;
+ }
+
/// Return true if the specified selector piece matches conventions.
static bool isConventionalSelectorPiece(Selector MethodSelector,
unsigned PieceIndex,
QualType PieceType) {
- if (!isConventional(PieceType)) {
+ if (!isConventional(PieceType) || isInitMethod(MethodSelector)) {
return false;
}
@@ -970,13 +1029,15 @@ private:
return hasConventionalSuffix(MethodSelector.getNameForSlot(0));
}
- return isConventional(MethodSelector.getNameForSlot(PieceIndex));
+ llvm::StringRef PieceName = MethodSelector.getNameForSlot(PieceIndex);
+ return isConventional(PieceName) || hasConventionalSuffix(PieceName);
}
bool shouldBeCalledOnce(const ParmVarDecl *Parameter) const {
return isExplicitlyMarked(Parameter) ||
(CheckConventionalParameters &&
- isConventional(Parameter->getName()) &&
+ (isConventional(Parameter->getName()) ||
+ hasConventionalSuffix(Parameter->getName())) &&
isConventional(Parameter->getType()));
}
@@ -1054,6 +1115,91 @@ private:
return false;
}
+ // Return a call site where the block is called exactly once or null otherwise
+ const Expr *getBlockGuaraneedCallSite(const BlockExpr *Block) const {
+ ParentMap &PM = AC.getParentMap();
+
+ // We don't want to track the block through assignments and so on, instead
+ // we simply see how the block used and if it's used directly in a call,
+ // we decide based on call to what it is.
+ //
+ // In order to do this, we go up the parents of the block looking for
+ // a call or a message expressions. These might not be immediate parents
+ // of the actual block expression due to casts and parens, so we skip them.
+ for (const Stmt *Prev = Block, *Current = PM.getParent(Block);
+ Current != nullptr; Prev = Current, Current = PM.getParent(Current)) {
+ // Skip no-op (for our case) operations.
+ if (isa<CastExpr>(Current) || isa<ParenExpr>(Current))
+ continue;
+
+ // At this point, Prev represents our block as an immediate child of the
+ // call.
+ if (const auto *Call = dyn_cast<CallExpr>(Current)) {
+ // It might be the call of the Block itself...
+ if (Call->getCallee() == Prev)
+ return Call;
+
+ // ...or it can be an indirect call of the block.
+ return shouldBlockArgumentBeCalledOnce(Call, Prev) ? Call : nullptr;
+ }
+ if (const auto *Message = dyn_cast<ObjCMessageExpr>(Current)) {
+ return shouldBlockArgumentBeCalledOnce(Message, Prev) ? Message
+ : nullptr;
+ }
+
+ break;
+ }
+
+ return nullptr;
+ }
+
+ template <class CallLikeExpr>
+ bool shouldBlockArgumentBeCalledOnce(const CallLikeExpr *CallOrMessage,
+ const Stmt *BlockArgument) const {
+ // CallExpr::arguments does not interact nicely with llvm::enumerate.
+ llvm::ArrayRef<const Expr *> Arguments = llvm::makeArrayRef(
+ CallOrMessage->getArgs(), CallOrMessage->getNumArgs());
+
+ for (const auto &Argument : llvm::enumerate(Arguments)) {
+ if (Argument.value() == BlockArgument) {
+ return shouldBlockArgumentBeCalledOnce(CallOrMessage, Argument.index());
+ }
+ }
+
+ return false;
+ }
+
+ bool shouldBlockArgumentBeCalledOnce(const CallExpr *Call,
+ unsigned ParamIndex) const {
+ const FunctionDecl *Function = Call->getDirectCallee();
+ return shouldBlockArgumentBeCalledOnce(Function, ParamIndex) ||
+ shouldBeCalledOnce(Call, ParamIndex);
+ }
+
+ bool shouldBlockArgumentBeCalledOnce(const ObjCMessageExpr *Message,
+ unsigned ParamIndex) const {
+ // At the moment, we don't have any Obj-C methods we want to specifically
+ // check in here.
+ return shouldBeCalledOnce(Message, ParamIndex);
+ }
+
+ static bool shouldBlockArgumentBeCalledOnce(const FunctionDecl *Function,
+ unsigned ParamIndex) {
+ // There is a list of important API functions that while not following
+ // conventions nor being directly annotated, still guarantee that the
+ // callback parameter will be called exactly once.
+ //
+ // Here we check if this is the case.
+ return Function &&
+ llvm::any_of(KNOWN_CALLED_ONCE_PARAMETERS,
+ [Function, ParamIndex](
+ const KnownCalledOnceParameter &Reference) {
+ return Reference.FunctionName ==
+ Function->getName() &&
+ Reference.ParamIndex == ParamIndex;
+ });
+ }
+
/// Return true if the analyzed function is actually a default implementation
/// of the method that has to be overriden.
///
@@ -1336,11 +1482,7 @@ private:
/// Check given parameter that was discovered to escape.
void checkEscapee(const ParmVarDecl &Parameter) {
if (auto Index = getIndex(Parameter)) {
- ParameterStatus &CurrentParamStatus = CurrentState.getStatusFor(*Index);
-
- if (CurrentParamStatus.getKind() == ParameterStatus::NotCalled) {
- CurrentParamStatus = ParameterStatus::Escaped;
- }
+ processEscapeFor(*Index);
}
}
@@ -1404,17 +1546,44 @@ public:
}
void VisitBlockExpr(const BlockExpr *Block) {
+ // Block expressions are tricky. It is a very common practice to capture
+ // completion handlers by blocks and use them there.
+ // For this reason, it is important to analyze blocks and report warnings
+ // for completion handler misuse in blocks.
+ //
+ // However, it can be quite difficult to track how the block itself is being
+ // used. The full precise anlysis of that will be similar to alias analysis
+ // for completion handlers and can be too heavyweight for a compile-time
+ // diagnostic. Instead, we judge about the immediate use of the block.
+ //
+ // Here, we try to find a call expression where we know due to conventions,
+ // annotations, or other reasons that the block is called once and only
+ // once.
+ const Expr *CalledOnceCallSite = getBlockGuaraneedCallSite(Block);
+
+ // We need to report this information to the handler because in the
+ // situation when we know that the block is called exactly once, we can be
+ // stricter in terms of reported diagnostics.
+ if (CalledOnceCallSite) {
+ Handler.handleBlockThatIsGuaranteedToBeCalledOnce(Block->getBlockDecl());
+ } else {
+ Handler.handleBlockWithNoGuarantees(Block->getBlockDecl());
+ }
+
for (const auto &Capture : Block->getBlockDecl()->captures()) {
- // If a block captures a tracked parameter, it should be
- // considered escaped.
- // On one hand, blocks that do that should definitely call it on
- // every path. However, it is not guaranteed that the block
- // itself gets called whenever it gets created.
- //
- // Because we don't want to track blocks and whether they get called,
- // we consider such parameters simply escaped.
if (const auto *Param = dyn_cast<ParmVarDecl>(Capture.getVariable())) {
- checkEscapee(*Param);
+ if (auto Index = getIndex(*Param)) {
+ if (CalledOnceCallSite) {
+ // The call site of a block can be considered a call site of the
+ // captured parameter we track.
+ processCallFor(*Index, CalledOnceCallSite);
+ } else {
+ // We still should consider this block as an escape for parameter,
+ // if we don't know about its call site or the number of time it
+ // can be invoked.
+ processEscapeFor(*Index);
+ }
+ }
}
}
}
@@ -1441,6 +1610,10 @@ public:
if (Var->getInit()) {
checkEscapee(Var->getInit());
}
+
+ if (Var->hasAttr<CleanupAttr>()) {
+ FunctionHasCleanupVars = true;
+ }
}
}
}
@@ -1509,6 +1682,13 @@ private:
// around.
bool SuppressOnConventionalErrorPaths = false;
+ // The user can annotate variable declarations with cleanup functions, which
+ // essentially imposes a custom destructor logic on that variable.
+ // It is possible to use it, however, to call tracked parameters on all exits
+ // from the function. For this reason, we track the fact that the function
+ // actually has these.
+ bool FunctionHasCleanupVars = false;
+
State CurrentState;
ParamSizedVector<const ParmVarDecl *> TrackedParams;
CFGSizedVector<State> States;
diff --git a/clang/lib/Analysis/LiveVariables.cpp b/clang/lib/Analysis/LiveVariables.cpp
index 8cdc4cc5bd61..6c601c290c92 100644
--- a/clang/lib/Analysis/LiveVariables.cpp
+++ b/clang/lib/Analysis/LiveVariables.cpp
@@ -325,6 +325,11 @@ static bool writeShouldKill(const VarDecl *VD) {
}
void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
+ if (LV.killAtAssign && B->getOpcode() == BO_Assign) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParens())) {
+ LV.inAssignment[DR] = 1;
+ }
+ }
if (B->isAssignmentOp()) {
if (!LV.killAtAssign)
return;
@@ -513,29 +518,8 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC, bool killAtAssign) {
llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
// FIXME: we should enqueue using post order.
- for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
- const CFGBlock *block = *it;
- worklist.enqueueBlock(block);
-
- // FIXME: Scan for DeclRefExprs using in the LHS of an assignment.
- // We need to do this because we lack context in the reverse analysis
- // to determine if a DeclRefExpr appears in such a context, and thus
- // doesn't constitute a "use".
- if (killAtAssign)
- for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
- bi != be; ++bi) {
- if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) {
- const Stmt* stmt = cs->getStmt();
- if (const auto *BO = dyn_cast<BinaryOperator>(stmt)) {
- if (BO->getOpcode() == BO_Assign) {
- if (const auto *DR =
- dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
- LV->inAssignment[DR] = 1;
- }
- }
- }
- }
- }
+ for (const CFGBlock *B : cfg->nodes()) {
+ worklist.enqueueBlock(B);
}
while (const CFGBlock *block = worklist.dequeue()) {
diff --git a/clang/lib/Analysis/MacroExpansionContext.cpp b/clang/lib/Analysis/MacroExpansionContext.cpp
new file mode 100644
index 000000000000..290510691891
--- /dev/null
+++ b/clang/lib/Analysis/MacroExpansionContext.cpp
@@ -0,0 +1,231 @@
+//===- MacroExpansionContext.cpp - Macro expansion information --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/MacroExpansionContext.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "macro-expansion-context"
+
+static void dumpTokenInto(const clang::Preprocessor &PP, clang::raw_ostream &OS,
+ clang::Token Tok);
+
+namespace clang {
+namespace detail {
+class MacroExpansionRangeRecorder : public PPCallbacks {
+ const Preprocessor &PP;
+ SourceManager &SM;
+ MacroExpansionContext::ExpansionRangeMap &ExpansionRanges;
+
+public:
+ explicit MacroExpansionRangeRecorder(
+ const Preprocessor &PP, SourceManager &SM,
+ MacroExpansionContext::ExpansionRangeMap &ExpansionRanges)
+ : PP(PP), SM(SM), ExpansionRanges(ExpansionRanges) {}
+
+ void MacroExpands(const Token &MacroName, const MacroDefinition &MD,
+ SourceRange Range, const MacroArgs *Args) override {
+ // Ignore annotation tokens like: _Pragma("pack(push, 1)")
+ if (MacroName.getIdentifierInfo()->getName() == "_Pragma")
+ return;
+
+ SourceLocation MacroNameBegin = SM.getExpansionLoc(MacroName.getLocation());
+ assert(MacroNameBegin == SM.getExpansionLoc(Range.getBegin()));
+
+ const SourceLocation ExpansionEnd = [Range, &SM = SM, &MacroName] {
+ // If the range is empty, use the length of the macro.
+ if (Range.getBegin() == Range.getEnd())
+ return SM.getExpansionLoc(
+ MacroName.getLocation().getLocWithOffset(MacroName.getLength()));
+
+ // Include the last character.
+ return SM.getExpansionLoc(Range.getEnd()).getLocWithOffset(1);
+ }();
+
+ (void)PP;
+ LLVM_DEBUG(llvm::dbgs() << "MacroExpands event: '";
+ dumpTokenInto(PP, llvm::dbgs(), MacroName);
+ llvm::dbgs()
+ << "' with length " << MacroName.getLength() << " at ";
+ MacroNameBegin.print(llvm::dbgs(), SM);
+ llvm::dbgs() << ", expansion end at ";
+ ExpansionEnd.print(llvm::dbgs(), SM); llvm::dbgs() << '\n';);
+
+ // If the expansion range is empty, use the identifier of the macro as a
+ // range.
+ MacroExpansionContext::ExpansionRangeMap::iterator It;
+ bool Inserted;
+ std::tie(It, Inserted) =
+ ExpansionRanges.try_emplace(MacroNameBegin, ExpansionEnd);
+ if (Inserted) {
+ LLVM_DEBUG(llvm::dbgs() << "maps ";
+ It->getFirst().print(llvm::dbgs(), SM); llvm::dbgs() << " to ";
+ It->getSecond().print(llvm::dbgs(), SM);
+ llvm::dbgs() << '\n';);
+ } else {
+ if (SM.isBeforeInTranslationUnit(It->getSecond(), ExpansionEnd)) {
+ It->getSecond() = ExpansionEnd;
+ LLVM_DEBUG(
+ llvm::dbgs() << "remaps "; It->getFirst().print(llvm::dbgs(), SM);
+ llvm::dbgs() << " to "; It->getSecond().print(llvm::dbgs(), SM);
+ llvm::dbgs() << '\n';);
+ }
+ }
+ }
+};
+} // namespace detail
+} // namespace clang
+
+using namespace clang;
+
+MacroExpansionContext::MacroExpansionContext(const LangOptions &LangOpts)
+ : LangOpts(LangOpts) {}
+
+void MacroExpansionContext::registerForPreprocessor(Preprocessor &NewPP) {
+ PP = &NewPP;
+ SM = &NewPP.getSourceManager();
+
+ // Make sure that the Preprocessor does not outlive the MacroExpansionContext.
+ PP->addPPCallbacks(std::make_unique<detail::MacroExpansionRangeRecorder>(
+ *PP, *SM, ExpansionRanges));
+ // Same applies here.
+ PP->setTokenWatcher([this](const Token &Tok) { onTokenLexed(Tok); });
+}
+
+Optional<StringRef>
+MacroExpansionContext::getExpandedText(SourceLocation MacroExpansionLoc) const {
+ if (MacroExpansionLoc.isMacroID())
+ return llvm::None;
+
+ // If there was no macro expansion at that location, return None.
+ if (ExpansionRanges.find_as(MacroExpansionLoc) == ExpansionRanges.end())
+ return llvm::None;
+
+ // There was macro expansion, but resulted in no tokens, return empty string.
+ const auto It = ExpandedTokens.find_as(MacroExpansionLoc);
+ if (It == ExpandedTokens.end())
+ return StringRef{""};
+
+ // Otherwise we have the actual token sequence as string.
+ return It->getSecond().str();
+}
+
+Optional<StringRef>
+MacroExpansionContext::getOriginalText(SourceLocation MacroExpansionLoc) const {
+ if (MacroExpansionLoc.isMacroID())
+ return llvm::None;
+
+ const auto It = ExpansionRanges.find_as(MacroExpansionLoc);
+ if (It == ExpansionRanges.end())
+ return llvm::None;
+
+ assert(It->getFirst() != It->getSecond() &&
+ "Every macro expansion must cover a non-empty range.");
+
+ return Lexer::getSourceText(
+ CharSourceRange::getCharRange(It->getFirst(), It->getSecond()), *SM,
+ LangOpts);
+}
+
+void MacroExpansionContext::dumpExpansionRanges() const {
+ dumpExpansionRangesToStream(llvm::dbgs());
+}
+void MacroExpansionContext::dumpExpandedTexts() const {
+ dumpExpandedTextsToStream(llvm::dbgs());
+}
+
+void MacroExpansionContext::dumpExpansionRangesToStream(raw_ostream &OS) const {
+ std::vector<std::pair<SourceLocation, SourceLocation>> LocalExpansionRanges;
+ LocalExpansionRanges.reserve(ExpansionRanges.size());
+ for (const auto &Record : ExpansionRanges)
+ LocalExpansionRanges.emplace_back(
+ std::make_pair(Record.getFirst(), Record.getSecond()));
+ llvm::sort(LocalExpansionRanges);
+
+ OS << "\n=============== ExpansionRanges ===============\n";
+ for (const auto &Record : LocalExpansionRanges) {
+ OS << "> ";
+ Record.first.print(OS, *SM);
+ OS << ", ";
+ Record.second.print(OS, *SM);
+ OS << '\n';
+ }
+}
+
+void MacroExpansionContext::dumpExpandedTextsToStream(raw_ostream &OS) const {
+ std::vector<std::pair<SourceLocation, MacroExpansionText>>
+ LocalExpandedTokens;
+ LocalExpandedTokens.reserve(ExpandedTokens.size());
+ for (const auto &Record : ExpandedTokens)
+ LocalExpandedTokens.emplace_back(
+ std::make_pair(Record.getFirst(), Record.getSecond()));
+ llvm::sort(LocalExpandedTokens);
+
+ OS << "\n=============== ExpandedTokens ===============\n";
+ for (const auto &Record : LocalExpandedTokens) {
+ OS << "> ";
+ Record.first.print(OS, *SM);
+ OS << " -> '" << Record.second << "'\n";
+ }
+}
+
+static void dumpTokenInto(const Preprocessor &PP, raw_ostream &OS, Token Tok) {
+ assert(Tok.isNot(tok::raw_identifier));
+
+ // Ignore annotation tokens like: _Pragma("pack(push, 1)")
+ if (Tok.isAnnotation())
+ return;
+
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // FIXME: For now, we don't respect whitespaces between macro expanded
+ // tokens. We just emit a space after every identifier to produce a valid
+ // code for `int a ;` like expansions.
+ // ^-^-- Space after the 'int' and 'a' identifiers.
+ OS << II->getName() << ' ';
+ } else if (Tok.isLiteral() && !Tok.needsCleaning() && Tok.getLiteralData()) {
+ OS << StringRef(Tok.getLiteralData(), Tok.getLength());
+ } else {
+ char Tmp[256];
+ if (Tok.getLength() < sizeof(Tmp)) {
+ const char *TokPtr = Tmp;
+ // FIXME: Might use a different overload for cleaner callsite.
+ unsigned Len = PP.getSpelling(Tok, TokPtr);
+ OS.write(TokPtr, Len);
+ } else {
+ OS << "<too long token>";
+ }
+ }
+}
+
+void MacroExpansionContext::onTokenLexed(const Token &Tok) {
+ SourceLocation SLoc = Tok.getLocation();
+ if (SLoc.isFileID())
+ return;
+
+ LLVM_DEBUG(llvm::dbgs() << "lexed macro expansion token '";
+ dumpTokenInto(*PP, llvm::dbgs(), Tok); llvm::dbgs() << "' at ";
+ SLoc.print(llvm::dbgs(), *SM); llvm::dbgs() << '\n';);
+
+ // Remove spelling location.
+ SourceLocation CurrExpansionLoc = SM->getExpansionLoc(SLoc);
+
+ MacroExpansionText TokenAsString;
+ llvm::raw_svector_ostream OS(TokenAsString);
+
+ // FIXME: Prepend newlines and space to produce the exact same output as the
+ // preprocessor would for this token.
+
+ dumpTokenInto(*PP, OS, Tok);
+
+ ExpansionMap::iterator It;
+ bool Inserted;
+ std::tie(It, Inserted) =
+ ExpandedTokens.try_emplace(CurrExpansionLoc, std::move(TokenAsString));
+ if (!Inserted)
+ It->getSecond().append(TokenAsString);
+}
+
diff --git a/clang/lib/Analysis/PathDiagnostic.cpp b/clang/lib/Analysis/PathDiagnostic.cpp
index b42f47fb68c5..ee8185c2147c 100644
--- a/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/clang/lib/Analysis/PathDiagnostic.cpp
@@ -898,7 +898,7 @@ static void describeTemplateParameter(raw_ostream &Out,
if (TArg.getKind() == TemplateArgument::ArgKind::Pack) {
describeTemplateParameters(Out, TArg.getPackAsArray(), LO);
} else {
- TArg.print(PrintingPolicy(LO), Out);
+ TArg.print(PrintingPolicy(LO), Out, /*IncludeType*/ true);
}
}
diff --git a/clang/lib/Analysis/RetainSummaryManager.cpp b/clang/lib/Analysis/RetainSummaryManager.cpp
index 9f45a8efe546..7ed1e40333f4 100644
--- a/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -145,16 +145,20 @@ static bool isSubclass(const Decl *D,
return !(match(SubclassM, *D, D->getASTContext()).empty());
}
-static bool isOSObjectSubclass(const Decl *D) {
- // OSSymbols are particular OSObjects that are allocated globally
- // and therefore aren't really refcounted, so we ignore them.
- return D && isSubclass(D, "OSMetaClassBase") && !isSubclass(D, "OSSymbol");
+static bool isExactClass(const Decl *D, StringRef ClassName) {
+ using namespace ast_matchers;
+ DeclarationMatcher sameClassM =
+ cxxRecordDecl(hasName(std::string(ClassName)));
+ return !(match(sameClassM, *D, D->getASTContext()).empty());
}
-static bool isOSObjectDynamicCast(StringRef S) {
- return S == "safeMetaCast";
+static bool isOSObjectSubclass(const Decl *D) {
+ return D && isSubclass(D, "OSMetaClassBase") &&
+ !isExactClass(D, "OSMetaClass");
}
+static bool isOSObjectDynamicCast(StringRef S) { return S == "safeMetaCast"; }
+
static bool isOSObjectRequiredCast(StringRef S) {
return S == "requiredMetaCast";
}
@@ -185,20 +189,22 @@ static bool hasRCAnnotation(const Decl *D, StringRef rcAnnotation) {
}
static bool isRetain(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("retain") || FName.endswith_lower("retain");
+ return FName.startswith_insensitive("retain") ||
+ FName.endswith_insensitive("retain");
}
static bool isRelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("release") || FName.endswith_lower("release");
+ return FName.startswith_insensitive("release") ||
+ FName.endswith_insensitive("release");
}
static bool isAutorelease(const FunctionDecl *FD, StringRef FName) {
- return FName.startswith_lower("autorelease") ||
- FName.endswith_lower("autorelease");
+ return FName.startswith_insensitive("autorelease") ||
+ FName.endswith_insensitive("autorelease");
}
static bool isMakeCollectable(StringRef FName) {
- return FName.contains_lower("MakeCollectable");
+ return FName.contains_insensitive("MakeCollectable");
}
/// A function is OSObject related if it is declared on a subclass
@@ -883,8 +889,8 @@ RetainSummaryManager::getRetEffectFromAnnotations(QualType RetTy,
return None;
}
-/// \return Whether the chain of typedefs starting from {@code QT}
-/// has a typedef with a given name {@code Name}.
+/// \return Whether the chain of typedefs starting from @c QT
+/// has a typedef with a given name @c Name.
static bool hasTypedefNamed(QualType QT,
StringRef Name) {
while (auto *T = dyn_cast<TypedefType>(QT)) {
@@ -1096,7 +1102,7 @@ RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
if (S.isKeywordSelector()) {
for (unsigned i = 0, e = S.getNumArgs(); i != e; ++i) {
StringRef Slot = S.getNameForSlot(i);
- if (Slot.substr(Slot.size() - 8).equals_lower("delegate")) {
+ if (Slot.substr(Slot.size() - 8).equals_insensitive("delegate")) {
if (ResultEff == ObjCInitRetE)
ResultEff = RetEffect::MakeNoRetHard();
else
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index 21583e92c72d..5b2c882c4235 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -105,32 +105,37 @@ class FactSet;
///
/// FIXME: this analysis does not currently support re-entrant locking.
class FactEntry : public CapabilityExpr {
+public:
+ /// Where a fact comes from.
+ enum SourceKind {
+ Acquired, ///< The fact has been directly acquired.
+ Asserted, ///< The fact has been asserted to be held.
+ Declared, ///< The fact is assumed to be held by callers.
+ Managed, ///< The fact has been acquired through a scoped capability.
+ };
+
private:
/// Exclusive or shared.
- LockKind LKind;
+ LockKind LKind : 8;
+
+ // How it was acquired.
+ SourceKind Source : 8;
/// Where it was acquired.
SourceLocation AcquireLoc;
- /// True if the lock was asserted.
- bool Asserted;
-
- /// True if the lock was declared.
- bool Declared;
-
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- bool Asrt, bool Declrd = false)
- : CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
- Declared(Declrd) {}
+ SourceKind Src)
+ : CapabilityExpr(CE), LKind(LK), Source(Src), AcquireLoc(Loc) {}
virtual ~FactEntry() = default;
LockKind kind() const { return LKind; }
SourceLocation loc() const { return AcquireLoc; }
- bool asserted() const { return Asserted; }
- bool declared() const { return Declared; }
- void setDeclared(bool D) { Declared = D; }
+ bool asserted() const { return Source == Asserted; }
+ bool declared() const { return Source == Declared; }
+ bool managed() const { return Source == Managed; }
virtual void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
@@ -851,20 +856,16 @@ static void findBlockLocations(CFG *CFGraph,
namespace {
class LockableFactEntry : public FactEntry {
-private:
- /// managed by ScopedLockable object
- bool Managed;
-
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- bool Mng = false, bool Asrt = false)
- : FactEntry(CE, LK, Loc, Asrt), Managed(Mng) {}
+ SourceKind Src = Acquired)
+ : FactEntry(CE, LK, Loc, Src) {}
void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
- if (!Managed && !asserted() && !negative() && !isUniversal()) {
+ if (!asserted() && !negative() && !isUniversal()) {
Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
LEK);
}
@@ -903,7 +904,7 @@ private:
public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
- : FactEntry(CE, LK_Exclusive, Loc, false) {}
+ : FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
void addLock(const CapabilityExpr &M) {
UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
@@ -983,7 +984,7 @@ private:
} else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
- std::make_unique<LockableFactEntry>(Cp, kind, loc));
+ std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed));
}
}
@@ -1049,15 +1050,15 @@ public:
const CFGBlock* PredBlock,
const CFGBlock *CurrBlock);
- void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
- SourceLocation JoinLoc,
- LockErrorKind LEK1, LockErrorKind LEK2,
- bool Modify=true);
+ bool join(const FactEntry &a, const FactEntry &b);
+
+ void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
+ SourceLocation JoinLoc, LockErrorKind EntryLEK,
+ LockErrorKind ExitLEK);
- void intersectAndWarn(FactSet &FSet1, const FactSet &FSet2,
- SourceLocation JoinLoc, LockErrorKind LEK1,
- bool Modify=true) {
- intersectAndWarn(FSet1, FSet2, JoinLoc, LEK1, LEK1, Modify);
+ void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
+ SourceLocation JoinLoc, LockErrorKind LEK) {
+ intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
}
void runAnalysis(AnalysisDeclContext &AC);
@@ -1854,10 +1855,11 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet,
- std::make_unique<LockableFactEntry>(
- AssertLock, LK_Exclusive, Loc, false, true),
- ClassifyDiagnostic(A));
+ Analyzer->addLock(
+ FSet,
+ std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc,
+ FactEntry::Asserted),
+ ClassifyDiagnostic(A));
break;
}
case attr::AssertSharedLock: {
@@ -1866,10 +1868,11 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet,
- std::make_unique<LockableFactEntry>(
- AssertLock, LK_Shared, Loc, false, true),
- ClassifyDiagnostic(A));
+ Analyzer->addLock(
+ FSet,
+ std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc,
+ FactEntry::Asserted),
+ ClassifyDiagnostic(A));
break;
}
@@ -1882,7 +1885,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
std::make_unique<LockableFactEntry>(
AssertLock,
A->isShared() ? LK_Shared : LK_Exclusive, Loc,
- false, true),
+ FactEntry::Asserted),
ClassifyDiagnostic(A));
break;
}
@@ -1943,14 +1946,16 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
// Add locks.
+ FactEntry::SourceKind Source =
+ isScopedVar ? FactEntry::Managed : FactEntry::Acquired;
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
- M, LK_Exclusive, Loc, isScopedVar),
- CapDiagKind);
+ Analyzer->addLock(
+ FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source),
+ CapDiagKind);
for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
- M, LK_Shared, Loc, isScopedVar),
- CapDiagKind);
+ Analyzer->addLock(
+ FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source),
+ CapDiagKind);
if (isScopedVar) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
@@ -2051,15 +2056,11 @@ void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
if (ME && MD) {
if (ME->isArrow()) {
- if (MD->isConst())
- checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
- else // FIXME -- should be AK_Written
- checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
+ // Should perhaps be AK_Written if !MD->isConst().
+ checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
} else {
- if (MD->isConst())
- checkAccess(CE->getImplicitObjectArgument(), AK_Read);
- else // FIXME -- should be AK_Written
- checkAccess(CE->getImplicitObjectArgument(), AK_Read);
+ // Should perhaps be AK_Written if !MD->isConst().
+ checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
@@ -2169,7 +2170,7 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
if (!CtorD || !CtorD->hasAttrs())
continue;
handleCall(E, CtorD, VD);
- } else if (isa<CallExpr>(E) && E->isRValue()) {
+ } else if (isa<CallExpr>(E) && E->isPRValue()) {
// If the object is initialized by a function call that returns a
// scoped lockable by value, use the attributes on the copy or move
// constructor to figure out what effect that should have on the
@@ -2187,6 +2188,28 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
}
}
+/// Given two facts merging on a join point, decide whether to warn and which
+/// one to keep.
+///
+/// \return false if we should keep \p A, true if we should keep \p B.
+bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B) {
+ if (A.kind() != B.kind()) {
+ // For managed capabilities, the destructor should unlock in the right mode
+ // anyway. For asserted capabilities no unlocking is needed.
+ if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
+ // The shared capability subsumes the exclusive capability.
+ return B.kind() == LK_Shared;
+ } else {
+ Handler.handleExclusiveAndShared("mutex", B.toString(), B.loc(), A.loc());
+ // Take the exclusive capability to reduce further warnings.
+ return B.kind() == LK_Exclusive;
+ }
+ } else {
+ // The non-asserted capability is the one we want to track.
+ return A.asserted() && !B.asserted();
+ }
+}
+
/// Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
@@ -2196,55 +2219,44 @@ void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
/// are the same. In the event of a difference, we use the intersection of these
/// two locksets at the start of D.
///
-/// \param FSet1 The first lockset.
-/// \param FSet2 The second lockset.
+/// \param EntrySet A lockset for entry into a (possibly new) block.
+/// \param ExitSet The lockset on exiting a preceding block.
/// \param JoinLoc The location of the join point for error reporting
-/// \param LEK1 The error message to report if a mutex is missing from LSet1
-/// \param LEK2 The error message to report if a mutex is missing from Lset2
-void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
- const FactSet &FSet2,
+/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
+/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
+void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
+ const FactSet &ExitSet,
SourceLocation JoinLoc,
- LockErrorKind LEK1,
- LockErrorKind LEK2,
- bool Modify) {
- FactSet FSet1Orig = FSet1;
-
- // Find locks in FSet2 that conflict or are not in FSet1, and warn.
- for (const auto &Fact : FSet2) {
- const FactEntry *LDat1 = nullptr;
- const FactEntry *LDat2 = &FactMan[Fact];
- FactSet::iterator Iter1 = FSet1.findLockIter(FactMan, *LDat2);
- if (Iter1 != FSet1.end()) LDat1 = &FactMan[*Iter1];
-
- if (LDat1) {
- if (LDat1->kind() != LDat2->kind()) {
- Handler.handleExclusiveAndShared("mutex", LDat2->toString(),
- LDat2->loc(), LDat1->loc());
- if (Modify && LDat1->kind() != LK_Exclusive) {
- // Take the exclusive lock, which is the one in FSet2.
- *Iter1 = Fact;
- }
- }
- else if (Modify && LDat1->asserted() && !LDat2->asserted()) {
- // The non-asserted lock in FSet2 is the one we want to track.
- *Iter1 = Fact;
- }
- } else {
- LDat2->handleRemovalFromIntersection(FSet2, FactMan, JoinLoc, LEK1,
- Handler);
+ LockErrorKind EntryLEK,
+ LockErrorKind ExitLEK) {
+ FactSet EntrySetOrig = EntrySet;
+
+ // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
+ for (const auto &Fact : ExitSet) {
+ const FactEntry &ExitFact = FactMan[Fact];
+
+ FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
+ if (EntryIt != EntrySet.end()) {
+ if (join(FactMan[*EntryIt], ExitFact) &&
+ EntryLEK == LEK_LockedSomePredecessors)
+ *EntryIt = Fact;
+ } else if (!ExitFact.managed()) {
+ ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
+ EntryLEK, Handler);
}
}
- // Find locks in FSet1 that are not in FSet2, and remove them.
- for (const auto &Fact : FSet1Orig) {
- const FactEntry *LDat1 = &FactMan[Fact];
- const FactEntry *LDat2 = FSet2.findLock(FactMan, *LDat1);
+ // Find locks in EntrySet that are not in ExitSet, and remove them.
+ for (const auto &Fact : EntrySetOrig) {
+ const FactEntry *EntryFact = &FactMan[Fact];
+ const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
- if (!LDat2) {
- LDat1->handleRemovalFromIntersection(FSet1Orig, FactMan, JoinLoc, LEK2,
- Handler);
- if (Modify)
- FSet1.removeLock(FactMan, *LDat1);
+ if (!ExitFact) {
+ if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations)
+ EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
+ ExitLEK, Handler);
+ if (ExitLEK == LEK_LockedSomePredecessors)
+ EntrySet.removeLock(FactMan, *EntryFact);
}
}
}
@@ -2368,13 +2380,13 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// FIXME -- Loc can be wrong here.
for (const auto &Mu : ExclusiveLocksToAdd) {
- auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc);
- Entry->setDeclared(true);
+ auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
+ FactEntry::Declared);
addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
}
for (const auto &Mu : SharedLocksToAdd) {
- auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc);
- Entry->setDeclared(true);
+ auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
+ FactEntry::Declared);
addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
}
}
@@ -2468,11 +2480,9 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
PrevBlock, CurrBlock);
// Do not update EntrySet.
- intersectAndWarn(CurrBlockInfo->EntrySet, PrevLockset,
- PrevBlockInfo->ExitLoc,
- IsLoop ? LEK_LockedSomeLoopIterations
- : LEK_LockedSomePredecessors,
- false);
+ intersectAndWarn(
+ CurrBlockInfo->EntrySet, PrevLockset, PrevBlockInfo->ExitLoc,
+ IsLoop ? LEK_LockedSomeLoopIterations : LEK_LockedSomePredecessors);
}
}
@@ -2520,10 +2530,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFGBlock *FirstLoopBlock = *SI;
CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
- intersectAndWarn(LoopEnd->ExitSet, PreLoop->EntrySet,
- PreLoop->EntryLoc,
- LEK_LockedSomeLoopIterations,
- false);
+ intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
+ LEK_LockedSomeLoopIterations);
}
}
@@ -2551,11 +2559,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
ExpectedExitSet.removeLock(FactMan, Lock);
// FIXME: Should we call this function for all blocks which exit the function?
- intersectAndWarn(ExpectedExitSet, Final->ExitSet,
- Final->ExitLoc,
- LEK_LockedAtEndOfFunction,
- LEK_NotLockedAtEndOfFunction,
- false);
+ intersectAndWarn(ExpectedExitSet, Final->ExitSet, Final->ExitLoc,
+ LEK_LockedAtEndOfFunction, LEK_NotLockedAtEndOfFunction);
Handler.leaveFunction(CurrentFunction);
}
diff --git a/clang/lib/Analysis/ThreadSafetyCommon.cpp b/clang/lib/Analysis/ThreadSafetyCommon.cpp
index 0c5d1857cc2b..e6b4a05501e2 100644
--- a/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -26,6 +26,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Casting.h"
#include <algorithm>
@@ -40,7 +41,7 @@ using namespace threadSafety;
std::string threadSafety::getSourceLiteralString(const Expr *CE) {
switch (CE->getStmtClass()) {
case Stmt::IntegerLiteralClass:
- return cast<IntegerLiteral>(CE)->getValue().toString(10, true);
+ return toString(cast<IntegerLiteral>(CE)->getValue(), 10, true);
case Stmt::StringLiteralClass: {
std::string ret("\"");
ret += cast<StringLiteral>(CE)->getString();
diff --git a/clang/lib/Basic/Attributes.cpp b/clang/lib/Basic/Attributes.cpp
index ff6dbf870fcf..62eea9c59082 100644
--- a/clang/lib/Basic/Attributes.cpp
+++ b/clang/lib/Basic/Attributes.cpp
@@ -20,6 +20,14 @@ int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
else if (ScopeName == "_Clang")
ScopeName = "clang";
+ // As a special case, look for the omp::sequence and omp::directive
+ // attributes. We support those, but not through the typical attribute
+ // machinery that goes through TableGen. We support this in all OpenMP modes
+ // so long as double square brackets are enabled.
+ if (LangOpts.OpenMP && LangOpts.DoubleSquareBracketAttributes &&
+ ScopeName == "omp")
+ return (Name == "directive" || Name == "sequence") ? 1 : 0;
+
#include "clang/Basic/AttrHasAttributeImpl.inc"
return 0;
diff --git a/clang/lib/Basic/Builtins.cpp b/clang/lib/Basic/Builtins.cpp
index 0cd89df41b67..7118aa9dc210 100644
--- a/clang/lib/Basic/Builtins.cpp
+++ b/clang/lib/Basic/Builtins.cpp
@@ -60,6 +60,8 @@ bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
bool BuiltinsUnsupported =
(LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
strchr(BuiltinInfo.Attributes, 'f');
+ bool CorBuiltinsUnsupported =
+ !LangOpts.Coroutines && (BuiltinInfo.Langs & COR_LANG);
bool MathBuiltinsUnsupported =
LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
@@ -75,12 +77,14 @@ bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
bool OclCUnsupported = !LangOpts.OpenCL &&
(BuiltinInfo.Langs & ALL_OCLC_LANGUAGES);
bool OpenMPUnsupported = !LangOpts.OpenMP && BuiltinInfo.Langs == OMP_LANG;
+ bool CUDAUnsupported = !LangOpts.CUDA && BuiltinInfo.Langs == CUDA_LANG;
bool CPlusPlusUnsupported =
!LangOpts.CPlusPlus && BuiltinInfo.Langs == CXX_LANG;
- return !BuiltinsUnsupported && !MathBuiltinsUnsupported && !OclCUnsupported &&
- !OclC1Unsupported && !OclC2Unsupported && !OpenMPUnsupported &&
- !GnuModeUnsupported && !MSModeUnsupported && !ObjCUnsupported &&
- !CPlusPlusUnsupported;
+ return !BuiltinsUnsupported && !CorBuiltinsUnsupported &&
+ !MathBuiltinsUnsupported && !OclCUnsupported && !OclC1Unsupported &&
+ !OclC2Unsupported && !OpenMPUnsupported && !GnuModeUnsupported &&
+ !MSModeUnsupported && !ObjCUnsupported && !CPlusPlusUnsupported &&
+ !CUDAUnsupported;
}
/// initializeBuiltins - Mark the identifiers for all the builtins with their
@@ -105,10 +109,6 @@ void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
.setBuiltinID(i + Builtin::FirstTSBuiltin + TSRecords.size());
}
-void Builtin::Context::forgetBuiltin(unsigned ID, IdentifierTable &Table) {
- Table.get(getRecord(ID).Name).setBuiltinID(0);
-}
-
unsigned Builtin::Context::getRequiredVectorWidth(unsigned ID) const {
const char *WidthPos = ::strchr(getRecord(ID).Attributes, 'V');
if (!WidthPos)
diff --git a/clang/lib/Basic/CodeGenOptions.cpp b/clang/lib/Basic/CodeGenOptions.cpp
index 4fc7a535c9eb..0c609cfa61de 100644
--- a/clang/lib/Basic/CodeGenOptions.cpp
+++ b/clang/lib/Basic/CodeGenOptions.cpp
@@ -20,12 +20,4 @@ CodeGenOptions::CodeGenOptions() {
memcpy(CoverageVersion, "408*", 4);
}
-bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
- StringRef FuncName(Name);
- for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
- if (FuncName.equals(NoBuiltinFuncs[i]))
- return true;
- return false;
-}
-
} // end namespace clang
diff --git a/clang/lib/Basic/Cuda.cpp b/clang/lib/Basic/Cuda.cpp
index 144113f2d2e7..766135bcb376 100644
--- a/clang/lib/Basic/Cuda.cpp
+++ b/clang/lib/Basic/Cuda.cpp
@@ -32,6 +32,10 @@ const char *CudaVersionToString(CudaVersion V) {
return "10.2";
case CudaVersion::CUDA_110:
return "11.0";
+ case CudaVersion::CUDA_111:
+ return "11.1";
+ case CudaVersion::CUDA_112:
+ return "11.2";
}
llvm_unreachable("invalid enum");
}
@@ -48,21 +52,25 @@ CudaVersion CudaStringToVersion(const llvm::Twine &S) {
.Case("10.1", CudaVersion::CUDA_101)
.Case("10.2", CudaVersion::CUDA_102)
.Case("11.0", CudaVersion::CUDA_110)
+ .Case("11.1", CudaVersion::CUDA_111)
+ .Case("11.2", CudaVersion::CUDA_112)
.Default(CudaVersion::UNKNOWN);
}
+namespace {
struct CudaArchToStringMap {
CudaArch arch;
const char *arch_name;
const char *virtual_arch_name;
};
+} // namespace
#define SM2(sm, ca) \
{ CudaArch::SM_##sm, "sm_" #sm, ca }
#define SM(sm) SM2(sm, "compute_" #sm)
#define GFX(gpu) \
{ CudaArch::GFX##gpu, "gfx" #gpu, "compute_amdgcn" }
-CudaArchToStringMap arch_names[] = {
+static const CudaArchToStringMap arch_names[] = {
// clang-format off
{CudaArch::UNUSED, "", ""},
SM2(20, "compute_20"), SM2(21, "compute_20"), // Fermi
@@ -71,7 +79,7 @@ CudaArchToStringMap arch_names[] = {
SM(60), SM(61), SM(62), // Pascal
SM(70), SM(72), // Volta
SM(75), // Turing
- SM(80), // Ampere
+ SM(80), SM(86), // Ampere
GFX(600), // gfx600
GFX(601), // gfx601
GFX(602), // gfx602
@@ -92,14 +100,18 @@ CudaArchToStringMap arch_names[] = {
GFX(906), // gfx906
GFX(908), // gfx908
GFX(909), // gfx909
+ GFX(90a), // gfx90a
GFX(90c), // gfx90c
GFX(1010), // gfx1010
GFX(1011), // gfx1011
GFX(1012), // gfx1012
+ GFX(1013), // gfx1013
GFX(1030), // gfx1030
GFX(1031), // gfx1031
GFX(1032), // gfx1032
GFX(1033), // gfx1033
+ GFX(1034), // gfx1034
+ GFX(1035), // gfx1035
// clang-format on
};
#undef SM
@@ -164,6 +176,8 @@ CudaVersion MinVersionForCudaArch(CudaArch A) {
return CudaVersion::CUDA_100;
case CudaArch::SM_80:
return CudaVersion::CUDA_110;
+ case CudaArch::SM_86:
+ return CudaVersion::CUDA_111;
default:
llvm_unreachable("invalid enum");
}
@@ -209,6 +223,10 @@ CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
return CudaVersion::CUDA_102;
case 110:
return CudaVersion::CUDA_110;
+ case 111:
+ return CudaVersion::CUDA_111;
+ case 112:
+ return CudaVersion::CUDA_112;
default:
return CudaVersion::UNKNOWN;
}
diff --git a/clang/lib/Basic/DarwinSDKInfo.cpp b/clang/lib/Basic/DarwinSDKInfo.cpp
new file mode 100644
index 000000000000..fe35f77782c9
--- /dev/null
+++ b/clang/lib/Basic/DarwinSDKInfo.cpp
@@ -0,0 +1,131 @@
+//===--- DarwinSDKInfo.cpp - SDK Information parser for darwin - ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/DarwinSDKInfo.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang;
+
+Optional<VersionTuple> DarwinSDKInfo::RelatedTargetVersionMapping::map(
+ const VersionTuple &Key, const VersionTuple &MinimumValue,
+ Optional<VersionTuple> MaximumValue) const {
+ if (Key < MinimumKeyVersion)
+ return MinimumValue;
+ if (Key > MaximumKeyVersion)
+ return MaximumValue;
+ auto KV = Mapping.find(Key.normalize());
+ if (KV != Mapping.end())
+ return KV->getSecond();
+ // If no exact entry found, try just the major key version. Only do so when
+ // a minor version number is present, to avoid recursing indefinitely into
+ // the major-only check.
+ if (Key.getMinor())
+ return map(VersionTuple(Key.getMajor()), MinimumValue, MaximumValue);
+ // If this a major only key, return None for a missing entry.
+ return None;
+}
+
+Optional<DarwinSDKInfo::RelatedTargetVersionMapping>
+DarwinSDKInfo::RelatedTargetVersionMapping::parseJSON(
+ const llvm::json::Object &Obj, VersionTuple MaximumDeploymentTarget) {
+ VersionTuple Min = VersionTuple(std::numeric_limits<unsigned>::max());
+ VersionTuple Max = VersionTuple(0);
+ VersionTuple MinValue = Min;
+ llvm::DenseMap<VersionTuple, VersionTuple> Mapping;
+ for (const auto &KV : Obj) {
+ if (auto Val = KV.getSecond().getAsString()) {
+ llvm::VersionTuple KeyVersion;
+ llvm::VersionTuple ValueVersion;
+ if (KeyVersion.tryParse(KV.getFirst()) || ValueVersion.tryParse(*Val))
+ return None;
+ Mapping[KeyVersion.normalize()] = ValueVersion;
+ if (KeyVersion < Min)
+ Min = KeyVersion;
+ if (KeyVersion > Max)
+ Max = KeyVersion;
+ if (ValueVersion < MinValue)
+ MinValue = ValueVersion;
+ }
+ }
+ if (Mapping.empty())
+ return None;
+ return RelatedTargetVersionMapping(
+ Min, Max, MinValue, MaximumDeploymentTarget, std::move(Mapping));
+}
+
+static Optional<VersionTuple> getVersionKey(const llvm::json::Object &Obj,
+ StringRef Key) {
+ auto Value = Obj.getString(Key);
+ if (!Value)
+ return None;
+ VersionTuple Version;
+ if (Version.tryParse(*Value))
+ return None;
+ return Version;
+}
+
+Optional<DarwinSDKInfo>
+DarwinSDKInfo::parseDarwinSDKSettingsJSON(const llvm::json::Object *Obj) {
+ auto Version = getVersionKey(*Obj, "Version");
+ if (!Version)
+ return None;
+ auto MaximumDeploymentVersion =
+ getVersionKey(*Obj, "MaximumDeploymentTarget");
+ if (!MaximumDeploymentVersion)
+ return None;
+ llvm::DenseMap<OSEnvPair::StorageType, Optional<RelatedTargetVersionMapping>>
+ VersionMappings;
+ if (const auto *VM = Obj->getObject("VersionMap")) {
+ if (const auto *Mapping = VM->getObject("macOS_iOSMac")) {
+ auto VersionMap = RelatedTargetVersionMapping::parseJSON(
+ *Mapping, *MaximumDeploymentVersion);
+ if (!VersionMap)
+ return None;
+ VersionMappings[OSEnvPair::macOStoMacCatalystPair().Value] =
+ std::move(VersionMap);
+ }
+ if (const auto *Mapping = VM->getObject("iOSMac_macOS")) {
+ auto VersionMap = RelatedTargetVersionMapping::parseJSON(
+ *Mapping, *MaximumDeploymentVersion);
+ if (!VersionMap)
+ return None;
+ VersionMappings[OSEnvPair::macCatalystToMacOSPair().Value] =
+ std::move(VersionMap);
+ }
+ }
+
+ return DarwinSDKInfo(std::move(*Version),
+ std::move(*MaximumDeploymentVersion),
+ std::move(VersionMappings));
+}
+
+Expected<Optional<DarwinSDKInfo>>
+clang::parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath) {
+ llvm::SmallString<256> Filepath = SDKRootPath;
+ llvm::sys::path::append(Filepath, "SDKSettings.json");
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
+ VFS.getBufferForFile(Filepath);
+ if (!File) {
+ // If the file couldn't be read, assume it just doesn't exist.
+ return None;
+ }
+ Expected<llvm::json::Value> Result =
+ llvm::json::parse(File.get()->getBuffer());
+ if (!Result)
+ return Result.takeError();
+
+ if (const auto *Obj = Result->getAsObject()) {
+ if (auto SDKInfo = DarwinSDKInfo::parseDarwinSDKSettingsJSON(Obj))
+ return std::move(SDKInfo);
+ }
+ return llvm::make_error<llvm::StringError>("invalid SDKSettings.json",
+ llvm::inconvertibleErrorCode());
+}
diff --git a/clang/lib/Basic/DiagnosticIDs.cpp b/clang/lib/Basic/DiagnosticIDs.cpp
index 06a8e2ed5ebd..c333076d2efc 100644
--- a/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/clang/lib/Basic/DiagnosticIDs.cpp
@@ -109,15 +109,15 @@ enum {
struct StaticDiagInfoRec {
uint16_t DiagID;
- unsigned DefaultSeverity : 3;
- unsigned Class : 3;
- unsigned SFINAE : 2;
- unsigned WarnNoWerror : 1;
- unsigned WarnShowInSystemHeader : 1;
- unsigned Deferrable : 1;
- unsigned Category : 6;
+ uint8_t DefaultSeverity : 3;
+ uint8_t Class : 3;
+ uint8_t SFINAE : 2;
+ uint8_t Category : 6;
+ uint8_t WarnNoWerror : 1;
+ uint8_t WarnShowInSystemHeader : 1;
- uint16_t OptionGroupIndex;
+ uint16_t OptionGroupIndex : 15;
+ uint16_t Deferrable : 1;
uint16_t DescriptionLen;
@@ -168,20 +168,20 @@ VALIDATE_DIAG_SIZE(REFACTORING)
#undef STRINGIFY_NAME
const StaticDiagInfoRec StaticDiagInfo[] = {
+// clang-format off
#define DIAG(ENUM, CLASS, DEFAULT_SEVERITY, DESC, GROUP, SFINAE, NOWERROR, \
- SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
+ SHOWINSYSHEADER, DEFERRABLE, CATEGORY) \
{ \
diag::ENUM, \
DEFAULT_SEVERITY, \
CLASS, \
DiagnosticIDs::SFINAE, \
+ CATEGORY, \
NOWERROR, \
SHOWINSYSHEADER, \
- DEFERRABLE, \
- CATEGORY, \
GROUP, \
+ DEFERRABLE, \
STR_SIZE(DESC, uint16_t)},
-// clang-format off
#include "clang/Basic/DiagnosticCommonKinds.inc"
#include "clang/Basic/DiagnosticDriverKinds.inc"
#include "clang/Basic/DiagnosticFrontendKinds.inc"
@@ -194,7 +194,7 @@ const StaticDiagInfoRec StaticDiagInfo[] = {
#include "clang/Basic/DiagnosticSemaKinds.inc"
#include "clang/Basic/DiagnosticAnalysisKinds.inc"
#include "clang/Basic/DiagnosticRefactoringKinds.inc"
- // clang-format on
+// clang-format on
#undef DIAG
};
diff --git a/clang/lib/Basic/FileManager.cpp b/clang/lib/Basic/FileManager.cpp
index 6e9d5d7fb422..74cd2f295be6 100644
--- a/clang/lib/Basic/FileManager.cpp
+++ b/clang/lib/Basic/FileManager.cpp
@@ -128,7 +128,7 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
// Stat("C:") does not recognize "C:" as a valid directory
std::string DirNameStr;
if (DirName.size() > 1 && DirName.back() == ':' &&
- DirName.equals_lower(llvm::sys::path::root_name(DirName))) {
+ DirName.equals_insensitive(llvm::sys::path::root_name(DirName))) {
DirNameStr = DirName.str() + '.';
DirName = DirNameStr;
}
@@ -384,9 +384,12 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
// Now that all ancestors of Filename are in the cache, the
// following call is guaranteed to find the DirectoryEntry from the
- // cache.
- auto DirInfo = expectedToOptional(
- getDirectoryFromFile(*this, Filename, /*CacheFailure=*/true));
+ // cache. A virtual file can also have an empty filename, that could come
+ // from a source location preprocessor directive with an empty filename as
+ // an example, so we need to pretend it has a name to ensure a valid directory
+ // entry can be returned.
+ auto DirInfo = expectedToOptional(getDirectoryFromFile(
+ *this, Filename.empty() ? "." : Filename, /*CacheFailure=*/true));
assert(DirInfo &&
"The directory of a virtual file should already be in the cache.");
@@ -608,7 +611,7 @@ StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
SmallString<4096> CanonicalNameBuf;
if (!FS->getRealPath(Dir->getName(), CanonicalNameBuf))
- CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
+ CanonicalName = CanonicalNameBuf.str().copy(CanonicalNameStorage);
CanonicalNames.insert({Dir, CanonicalName});
return CanonicalName;
@@ -624,7 +627,7 @@ StringRef FileManager::getCanonicalName(const FileEntry *File) {
SmallString<4096> CanonicalNameBuf;
if (!FS->getRealPath(File->getName(), CanonicalNameBuf))
- CanonicalName = StringRef(CanonicalNameBuf).copy(CanonicalNameStorage);
+ CanonicalName = CanonicalNameBuf.str().copy(CanonicalNameStorage);
CanonicalNames.insert({File, CanonicalName});
return CanonicalName;
diff --git a/clang/lib/Basic/IdentifierTable.cpp b/clang/lib/Basic/IdentifierTable.cpp
index 51c6e02e2e2e..d811aeec84a0 100644
--- a/clang/lib/Basic/IdentifierTable.cpp
+++ b/clang/lib/Basic/IdentifierTable.cpp
@@ -107,8 +107,9 @@ namespace {
KEYCXX20 = 0x200000,
KEYOPENCLCXX = 0x400000,
KEYMSCOMPAT = 0x800000,
+ KEYSYCL = 0x1000000,
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
- KEYALL = (0xffffff & ~KEYNOMS18 &
+ KEYALL = (0x1ffffff & ~KEYNOMS18 &
~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
@@ -155,6 +156,8 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
if (LangOpts.CPlusPlus && (Flags & KEYALLCXX)) return KS_Future;
if (LangOpts.CPlusPlus && !LangOpts.CPlusPlus20 && (Flags & CHAR8SUPPORT))
return KS_Future;
+ if (LangOpts.isSYCL() && (Flags & KEYSYCL))
+ return KS_Enabled;
return KS_Disabled;
}
@@ -227,6 +230,9 @@ void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
if (LangOpts.DeclSpecKeyword)
AddKeyword("__declspec", tok::kw___declspec, KEYALL, LangOpts, *this);
+ if (LangOpts.IEEE128)
+ AddKeyword("__ieee128", tok::kw___float128, KEYALL, LangOpts, *this);
+
// Add the 'import' contextual keyword.
get("import").setModulesImport(true);
}
@@ -270,6 +276,39 @@ bool IdentifierInfo::isCPlusPlusKeyword(const LangOptions &LangOpts) const {
return !isKeyword(LangOptsNoCPP);
}
+ReservedIdentifierStatus
+IdentifierInfo::isReserved(const LangOptions &LangOpts) const {
+ StringRef Name = getName();
+
+ // '_' is a reserved identifier, but its use is so common (e.g. to store
+ // ignored values) that we don't warn on it.
+ if (Name.size() <= 1)
+ return ReservedIdentifierStatus::NotReserved;
+
+ // [lex.name] p3
+ if (Name[0] == '_') {
+
+ // Each name that begins with an underscore followed by an uppercase letter
+ // or another underscore is reserved.
+ if (Name[1] == '_')
+ return ReservedIdentifierStatus::StartsWithDoubleUnderscore;
+
+ if ('A' <= Name[1] && Name[1] <= 'Z')
+ return ReservedIdentifierStatus::
+ StartsWithUnderscoreFollowedByCapitalLetter;
+
+ // This is a bit misleading: it actually means it's only reserved if we're
+ // at global scope because it starts with an underscore.
+ return ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope;
+ }
+
+ // Each name that contains a double underscore (__) is reserved.
+ if (LangOpts.CPlusPlus && Name.contains("__"))
+ return ReservedIdentifierStatus::ContainsDoubleUnderscore;
+
+ return ReservedIdentifierStatus::NotReserved;
+}
+
tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// We use a perfect hash function here involving the length of the keyword,
// the first and third character. For preprocessor ID's there are no
@@ -305,9 +344,11 @@ tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
CASE( 6, 'p', 'a', pragma);
CASE( 7, 'd', 'f', defined);
+ CASE( 7, 'e', 'i', elifdef);
CASE( 7, 'i', 'c', include);
CASE( 7, 'w', 'r', warning);
+ CASE( 8, 'e', 'i', elifndef);
CASE( 8, 'u', 'a', unassert);
CASE(12, 'i', 'c', include_next);
diff --git a/clang/lib/Basic/LangOptions.cpp b/clang/lib/Basic/LangOptions.cpp
index ed275ade4001..dc392d5352aa 100644
--- a/clang/lib/Basic/LangOptions.cpp
+++ b/clang/lib/Basic/LangOptions.cpp
@@ -28,7 +28,7 @@ void LangOptions::resetNonModularOptions() {
#include "clang/Basic/LangOptions.def"
// These options do not affect AST generation.
- SanitizerBlacklistFiles.clear();
+ NoSanitizeFiles.clear();
XRayAlwaysInstrumentFiles.clear();
XRayNeverInstrumentFiles.clear();
diff --git a/clang/lib/Basic/Module.cpp b/clang/lib/Basic/Module.cpp
index 2dd53b05d442..b6cf1624ef01 100644
--- a/clang/lib/Basic/Module.cpp
+++ b/clang/lib/Basic/Module.cpp
@@ -245,12 +245,10 @@ bool Module::fullModuleNameIs(ArrayRef<StringRef> nameParts) const {
Module::DirectoryName Module::getUmbrellaDir() const {
if (Header U = getUmbrellaHeader())
- return {"", U.Entry->getDir()};
+ return {"", "", U.Entry->getDir()};
- if (auto *ME = Umbrella.dyn_cast<const DirectoryEntryRef::MapEntry *>())
- return {UmbrellaAsWritten, DirectoryEntryRef(*ME)};
-
- return {"", None};
+ return {UmbrellaAsWritten, UmbrellaRelativeToRootModuleDirectory,
+ Umbrella.dyn_cast<const DirectoryEntry *>()};
}
void Module::addTopHeader(const FileEntry *File) {
@@ -432,7 +430,7 @@ void Module::buildVisibleModulesCache() const {
}
}
-void Module::print(raw_ostream &OS, unsigned Indent) const {
+void Module::print(raw_ostream &OS, unsigned Indent, bool Dump) const {
OS.indent(Indent);
if (IsFramework)
OS << "framework ";
@@ -538,7 +536,7 @@ void Module::print(raw_ostream &OS, unsigned Indent) const {
// the module. Regular inferred submodules are OK, as we need to look at all
// those header files anyway.
if (!(*MI)->IsInferred || (*MI)->IsFramework)
- (*MI)->print(OS, Indent + 2);
+ (*MI)->print(OS, Indent + 2, Dump);
for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
OS.indent(Indent + 2);
@@ -562,6 +560,13 @@ void Module::print(raw_ostream &OS, unsigned Indent) const {
OS << "\n";
}
+ if (Dump) {
+ for (Module *M : Imports) {
+ OS.indent(Indent + 2);
+ llvm::errs() << "import " << M->getFullModuleName() << "\n";
+ }
+ }
+
for (unsigned I = 0, N = DirectUses.size(); I != N; ++I) {
OS.indent(Indent + 2);
OS << "use ";
@@ -622,7 +627,7 @@ void Module::print(raw_ostream &OS, unsigned Indent) const {
}
LLVM_DUMP_METHOD void Module::dump() const {
- print(llvm::errs());
+ print(llvm::errs(), 0, true);
}
void VisibleModuleSet::setVisible(Module *M, SourceLocation Loc,
diff --git a/clang/lib/Basic/NoSanitizeList.cpp b/clang/lib/Basic/NoSanitizeList.cpp
new file mode 100644
index 000000000000..3efd613b0d33
--- /dev/null
+++ b/clang/lib/Basic/NoSanitizeList.cpp
@@ -0,0 +1,54 @@
+//===--- NoSanitizeList.cpp - Ignored list for sanitizers ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided ignore-list used to disable/alter instrumentation done in
+// sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/NoSanitizeList.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SanitizerSpecialCaseList.h"
+#include "clang/Basic/Sanitizers.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+
+NoSanitizeList::NoSanitizeList(const std::vector<std::string> &NoSanitizePaths,
+ SourceManager &SM)
+ : SSCL(SanitizerSpecialCaseList::createOrDie(
+ NoSanitizePaths, SM.getFileManager().getVirtualFileSystem())),
+ SM(SM) {}
+
+NoSanitizeList::~NoSanitizeList() = default;
+
+bool NoSanitizeList::containsGlobal(SanitizerMask Mask, StringRef GlobalName,
+ StringRef Category) const {
+ return SSCL->inSection(Mask, "global", GlobalName, Category);
+}
+
+bool NoSanitizeList::containsType(SanitizerMask Mask, StringRef MangledTypeName,
+ StringRef Category) const {
+ return SSCL->inSection(Mask, "type", MangledTypeName, Category);
+}
+
+bool NoSanitizeList::containsFunction(SanitizerMask Mask,
+ StringRef FunctionName) const {
+ return SSCL->inSection(Mask, "fun", FunctionName);
+}
+
+bool NoSanitizeList::containsFile(SanitizerMask Mask, StringRef FileName,
+ StringRef Category) const {
+ return SSCL->inSection(Mask, "src", FileName, Category);
+}
+
+bool NoSanitizeList::containsLocation(SanitizerMask Mask, SourceLocation Loc,
+ StringRef Category) const {
+ return Loc.isValid() &&
+ containsFile(Mask, SM.getFilename(SM.getFileLoc(Loc)), Category);
+}
diff --git a/clang/lib/Basic/OpenCLOptions.cpp b/clang/lib/Basic/OpenCLOptions.cpp
index 266acc5fe477..2e215b185f66 100644
--- a/clang/lib/Basic/OpenCLOptions.cpp
+++ b/clang/lib/Basic/OpenCLOptions.cpp
@@ -7,6 +7,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/OpenCLOptions.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
namespace clang {
@@ -14,39 +16,47 @@ bool OpenCLOptions::isKnown(llvm::StringRef Ext) const {
return OptMap.find(Ext) != OptMap.end();
}
+bool OpenCLOptions::isAvailableOption(llvm::StringRef Ext,
+ const LangOptions &LO) const {
+ if (!isKnown(Ext))
+ return false;
+
+ auto &OptInfo = OptMap.find(Ext)->getValue();
+ if (OptInfo.isCoreIn(LO) || OptInfo.isOptionalCoreIn(LO))
+ return isSupported(Ext, LO);
+
+ return isEnabled(Ext);
+}
+
bool OpenCLOptions::isEnabled(llvm::StringRef Ext) const {
+ auto I = OptMap.find(Ext);
+ return I != OptMap.end() && I->getValue().Enabled;
+}
+
+bool OpenCLOptions::isWithPragma(llvm::StringRef Ext) const {
auto E = OptMap.find(Ext);
- return E != OptMap.end() && E->second.Enabled;
+ return E != OptMap.end() && E->second.WithPragma;
}
bool OpenCLOptions::isSupported(llvm::StringRef Ext,
const LangOptions &LO) const {
- auto E = OptMap.find(Ext);
- if (E == OptMap.end()) {
- return false;
- }
- auto I = OptMap.find(Ext)->getValue();
- return I.Supported && I.isAvailableIn(LO);
+ auto I = OptMap.find(Ext);
+ return I != OptMap.end() && I->getValue().Supported &&
+ I->getValue().isAvailableIn(LO);
}
bool OpenCLOptions::isSupportedCore(llvm::StringRef Ext,
const LangOptions &LO) const {
- auto E = OptMap.find(Ext);
- if (E == OptMap.end()) {
- return false;
- }
- auto I = OptMap.find(Ext)->getValue();
- return I.Supported && I.isCoreIn(LO);
+ auto I = OptMap.find(Ext);
+ return I != OptMap.end() && I->getValue().Supported &&
+ I->getValue().isCoreIn(LO);
}
bool OpenCLOptions::isSupportedOptionalCore(llvm::StringRef Ext,
const LangOptions &LO) const {
- auto E = OptMap.find(Ext);
- if (E == OptMap.end()) {
- return false;
- }
- auto I = OptMap.find(Ext)->getValue();
- return I.Supported && I.isOptionalCoreIn(LO);
+ auto I = OptMap.find(Ext);
+ return I != OptMap.end() && I->getValue().Supported &&
+ I->getValue().isOptionalCoreIn(LO);
}
bool OpenCLOptions::isSupportedCoreOrOptionalCore(llvm::StringRef Ext,
@@ -56,12 +66,9 @@ bool OpenCLOptions::isSupportedCoreOrOptionalCore(llvm::StringRef Ext,
bool OpenCLOptions::isSupportedExtension(llvm::StringRef Ext,
const LangOptions &LO) const {
- auto E = OptMap.find(Ext);
- if (E == OptMap.end()) {
- return false;
- }
- auto I = OptMap.find(Ext)->getValue();
- return I.Supported && I.isAvailableIn(LO) &&
+ auto I = OptMap.find(Ext);
+ return I != OptMap.end() && I->getValue().Supported &&
+ I->getValue().isAvailableIn(LO) &&
!isSupportedCoreOrOptionalCore(Ext, LO);
}
@@ -69,6 +76,10 @@ void OpenCLOptions::enable(llvm::StringRef Ext, bool V) {
OptMap[Ext].Enabled = V;
}
+void OpenCLOptions::acceptsPragma(llvm::StringRef Ext, bool V) {
+ OptMap[Ext].WithPragma = V;
+}
+
void OpenCLOptions::support(llvm::StringRef Ext, bool V) {
assert(!Ext.empty() && "Extension is empty.");
assert(Ext[0] != '+' && Ext[0] != '-');
@@ -76,10 +87,8 @@ void OpenCLOptions::support(llvm::StringRef Ext, bool V) {
}
OpenCLOptions::OpenCLOptions() {
-#define OPENCL_GENERIC_EXTENSION(Ext, AvailVer, CoreVer, OptVer) \
- OptMap[#Ext].Avail = AvailVer; \
- OptMap[#Ext].Core = CoreVer; \
- OptMap[#Ext].Opt = OptVer;
+#define OPENCL_GENERIC_EXTENSION(Ext, ...) \
+ OptMap.insert_or_assign(#Ext, OpenCLOptionInfo{__VA_ARGS__});
#include "clang/Basic/OpenCLExtensions.def"
}
@@ -97,10 +106,43 @@ void OpenCLOptions::disableAll() {
Opt.getValue().Enabled = false;
}
-void OpenCLOptions::enableSupportedCore(const LangOptions &LO) {
- for (auto &Opt : OptMap)
- if (isSupportedCoreOrOptionalCore(Opt.getKey(), LO))
- Opt.getValue().Enabled = true;
+bool OpenCLOptions::diagnoseUnsupportedFeatureDependencies(
+ const TargetInfo &TI, DiagnosticsEngine &Diags) {
+ // Feature pairs. First feature in a pair requires the second one to be
+ // supported.
+ static const llvm::StringMap<llvm::StringRef> DependentFeaturesMap = {
+ {"__opencl_c_read_write_images", "__opencl_c_images"}};
+
+ auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
+
+ bool IsValid = true;
+ for (auto &FeaturePair : DependentFeaturesMap)
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getKey()) &&
+ !TI.hasFeatureEnabled(OpenCLFeaturesMap, FeaturePair.getValue())) {
+ IsValid = false;
+ Diags.Report(diag::err_opencl_feature_requires)
+ << FeaturePair.getKey() << FeaturePair.getValue();
+ }
+ return IsValid;
+}
+
+bool OpenCLOptions::diagnoseFeatureExtensionDifferences(
+ const TargetInfo &TI, DiagnosticsEngine &Diags) {
+ // Extensions and equivalent feature pairs.
+ static const llvm::StringMap<llvm::StringRef> FeatureExtensionMap = {
+ {"cl_khr_fp64", "__opencl_c_fp64"}};
+
+ auto OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
+
+ bool IsValid = true;
+ for (auto &ExtAndFeat : FeatureExtensionMap)
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getKey()) !=
+ TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getValue())) {
+ IsValid = false;
+ Diags.Report(diag::err_opencl_extension_and_feature_differs)
+ << ExtAndFeat.getKey() << ExtAndFeat.getValue();
+ }
+ return IsValid;
}
} // end namespace clang
diff --git a/clang/lib/Basic/OpenMPKinds.cpp b/clang/lib/Basic/OpenMPKinds.cpp
index 5c19d60cbd6e..cfdba09eb1ec 100644
--- a/clang/lib/Basic/OpenMPKinds.cpp
+++ b/clang/lib/Basic/OpenMPKinds.cpp
@@ -130,6 +130,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_allocate:
case OMPC_collapse:
@@ -175,6 +176,8 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_match:
case OMPC_nontemporal:
case OMPC_destroy:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
@@ -370,6 +373,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_allocate:
case OMPC_collapse:
@@ -416,6 +420,8 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_nontemporal:
case OMPC_destroy:
case OMPC_detach:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_inclusive:
case OMPC_exclusive:
case OMPC_uses_allocators:
@@ -446,7 +452,8 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams_distribute ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd;
+ DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
+ DKind == OMPD_unroll;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -573,6 +580,10 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_target_teams_distribute_parallel_for_simd;
}
+bool clang::isOpenMPLoopTransformationDirective(OpenMPDirectiveKind DKind) {
+ return DKind == OMPD_tile || DKind == OMPD_unroll;
+}
+
void clang::getOpenMPCaptureRegions(
SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
OpenMPDirectiveKind DKind) {
@@ -654,8 +665,13 @@ void clang::getOpenMPCaptureRegions(
case OMPD_atomic:
case OMPD_target_data:
case OMPD_distribute_simd:
+ case OMPD_dispatch:
CaptureRegions.push_back(OMPD_unknown);
break;
+ case OMPD_tile:
+ case OMPD_unroll:
+ // loop transformations do not introduce captures.
+ break;
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
diff --git a/clang/lib/Basic/ProfileList.cpp b/clang/lib/Basic/ProfileList.cpp
index 56bc37a79301..2cb05c1c3c07 100644
--- a/clang/lib/Basic/ProfileList.cpp
+++ b/clang/lib/Basic/ProfileList.cpp
@@ -82,6 +82,7 @@ static StringRef getSectionName(CodeGenOptions::ProfileInstrKind Kind) {
case CodeGenOptions::ProfileCSIRInstr:
return "csllvm";
}
+ llvm_unreachable("Unhandled CodeGenOptions::ProfileInstrKind enum");
}
llvm::Optional<bool>
diff --git a/clang/lib/Basic/SanitizerBlacklist.cpp b/clang/lib/Basic/SanitizerBlacklist.cpp
deleted file mode 100644
index feb7cbda39b7..000000000000
--- a/clang/lib/Basic/SanitizerBlacklist.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-//===--- SanitizerBlacklist.cpp - Blacklist for sanitizers ----------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// User-provided blacklist used to disable/alter instrumentation done in
-// sanitizers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Basic/SanitizerBlacklist.h"
-#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SanitizerSpecialCaseList.h"
-#include "clang/Basic/Sanitizers.h"
-#include "clang/Basic/SourceManager.h"
-
-using namespace clang;
-
-SanitizerBlacklist::SanitizerBlacklist(
- const std::vector<std::string> &BlacklistPaths, SourceManager &SM)
- : SSCL(SanitizerSpecialCaseList::createOrDie(
- BlacklistPaths, SM.getFileManager().getVirtualFileSystem())),
- SM(SM) {}
-
-SanitizerBlacklist::~SanitizerBlacklist() = default;
-
-bool SanitizerBlacklist::isBlacklistedGlobal(SanitizerMask Mask,
- StringRef GlobalName,
- StringRef Category) const {
- return SSCL->inSection(Mask, "global", GlobalName, Category);
-}
-
-bool SanitizerBlacklist::isBlacklistedType(SanitizerMask Mask,
- StringRef MangledTypeName,
- StringRef Category) const {
- return SSCL->inSection(Mask, "type", MangledTypeName, Category);
-}
-
-bool SanitizerBlacklist::isBlacklistedFunction(SanitizerMask Mask,
- StringRef FunctionName) const {
- return SSCL->inSection(Mask, "fun", FunctionName);
-}
-
-bool SanitizerBlacklist::isBlacklistedFile(SanitizerMask Mask,
- StringRef FileName,
- StringRef Category) const {
- return SSCL->inSection(Mask, "src", FileName, Category);
-}
-
-bool SanitizerBlacklist::isBlacklistedLocation(SanitizerMask Mask,
- SourceLocation Loc,
- StringRef Category) const {
- return Loc.isValid() &&
- isBlacklistedFile(Mask, SM.getFilename(SM.getFileLoc(Loc)), Category);
-}
-
diff --git a/clang/lib/Basic/Sanitizers.cpp b/clang/lib/Basic/Sanitizers.cpp
index f5f81b5fb3e5..7d903c8fdf5e 100644
--- a/clang/lib/Basic/Sanitizers.cpp
+++ b/clang/lib/Basic/Sanitizers.cpp
@@ -12,7 +12,9 @@
#include "clang/Basic/Sanitizers.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/MathExtras.h"
using namespace clang;
@@ -34,6 +36,14 @@ SanitizerMask clang::parseSanitizerValue(StringRef Value, bool AllowGroups) {
return ParsedKind;
}
+void clang::serializeSanitizerSet(SanitizerSet Set,
+ SmallVectorImpl<StringRef> &Values) {
+#define SANITIZER(NAME, ID) \
+ if (Set.has(SanitizerKind::ID)) \
+ Values.push_back(NAME);
+#include "clang/Basic/Sanitizers.def"
+}
+
SanitizerMask clang::expandSanitizerGroups(SanitizerMask Kinds) {
#define SANITIZER(NAME, ID)
#define SANITIZER_GROUP(NAME, ID, ALIAS) \
@@ -48,7 +58,58 @@ llvm::hash_code SanitizerMask::hash_value() const {
}
namespace clang {
+unsigned SanitizerMask::countPopulation() const {
+ unsigned total = 0;
+ for (const auto &Val : maskLoToHigh)
+ total += llvm::countPopulation(Val);
+ return total;
+}
+
llvm::hash_code hash_value(const clang::SanitizerMask &Arg) {
return Arg.hash_value();
}
+
+StringRef AsanDtorKindToString(llvm::AsanDtorKind kind) {
+ switch (kind) {
+ case llvm::AsanDtorKind::None:
+ return "none";
+ case llvm::AsanDtorKind::Global:
+ return "global";
+ case llvm::AsanDtorKind::Invalid:
+ return "invalid";
+ }
+ return "invalid";
+}
+
+llvm::AsanDtorKind AsanDtorKindFromString(StringRef kindStr) {
+ return llvm::StringSwitch<llvm::AsanDtorKind>(kindStr)
+ .Case("none", llvm::AsanDtorKind::None)
+ .Case("global", llvm::AsanDtorKind::Global)
+ .Default(llvm::AsanDtorKind::Invalid);
+}
+
+StringRef AsanDetectStackUseAfterReturnModeToString(
+ llvm::AsanDetectStackUseAfterReturnMode mode) {
+ switch (mode) {
+ case llvm::AsanDetectStackUseAfterReturnMode::Always:
+ return "always";
+ case llvm::AsanDetectStackUseAfterReturnMode::Runtime:
+ return "runtime";
+ case llvm::AsanDetectStackUseAfterReturnMode::Never:
+ return "never";
+ case llvm::AsanDetectStackUseAfterReturnMode::Invalid:
+ return "invalid";
+ }
+ return "invalid";
+}
+
+llvm::AsanDetectStackUseAfterReturnMode
+AsanDetectStackUseAfterReturnModeFromString(StringRef modeStr) {
+ return llvm::StringSwitch<llvm::AsanDetectStackUseAfterReturnMode>(modeStr)
+ .Case("always", llvm::AsanDetectStackUseAfterReturnMode::Always)
+ .Case("runtime", llvm::AsanDetectStackUseAfterReturnMode::Runtime)
+ .Case("never", llvm::AsanDetectStackUseAfterReturnMode::Never)
+ .Default(llvm::AsanDetectStackUseAfterReturnMode::Invalid);
+}
+
} // namespace clang
diff --git a/clang/lib/Basic/SourceLocation.cpp b/clang/lib/Basic/SourceLocation.cpp
index 6f6412028d77..6986fcd322f2 100644
--- a/clang/lib/Basic/SourceLocation.cpp
+++ b/clang/lib/Basic/SourceLocation.cpp
@@ -51,7 +51,7 @@ static_assert(std::is_trivially_destructible<SourceRange>::value,
"used in unions");
unsigned SourceLocation::getHashValue() const {
- return llvm::DenseMapInfo<unsigned>::getHashValue(ID);
+ return llvm::DenseMapInfo<UIntTy>::getHashValue(ID);
}
void llvm::FoldingSetTrait<SourceLocation>::Profile(
diff --git a/clang/lib/Basic/SourceManager.cpp b/clang/lib/Basic/SourceManager.cpp
index c0b22837693b..8cba379aa0f8 100644
--- a/clang/lib/Basic/SourceManager.cpp
+++ b/clang/lib/Basic/SourceManager.cpp
@@ -26,6 +26,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Capacity.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MathExtras.h"
@@ -449,9 +450,9 @@ const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
return LoadedSLocEntryTable[Index];
}
-std::pair<int, unsigned>
+std::pair<int, SourceLocation::UIntTy>
SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
- unsigned TotalSize) {
+ SourceLocation::UIntTy TotalSize) {
assert(ExternalSLocEntries && "Don't have an external sloc source");
// Make sure we're not about to run out of source locations.
if (CurrentLoadedOffset - TotalSize < NextLocalOffset)
@@ -531,7 +532,8 @@ FileID SourceManager::getNextFileID(FileID FID) const {
FileID SourceManager::createFileID(const FileEntry *SourceFile,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset) {
+ int LoadedID,
+ SourceLocation::UIntTy LoadedOffset) {
return createFileID(SourceFile->getLastRef(), IncludePos, FileCharacter,
LoadedID, LoadedOffset);
}
@@ -539,7 +541,8 @@ FileID SourceManager::createFileID(const FileEntry *SourceFile,
FileID SourceManager::createFileID(FileEntryRef SourceFile,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset) {
+ int LoadedID,
+ SourceLocation::UIntTy LoadedOffset) {
SrcMgr::ContentCache &IR = getOrCreateContentCache(SourceFile,
isSystem(FileCharacter));
@@ -558,7 +561,8 @@ FileID SourceManager::createFileID(FileEntryRef SourceFile,
/// MemoryBuffer, so only pass a MemoryBuffer to this once.
FileID SourceManager::createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset,
+ int LoadedID,
+ SourceLocation::UIntTy LoadedOffset,
SourceLocation IncludeLoc) {
StringRef Name = Buffer->getBufferIdentifier();
return createFileIDImpl(createMemBufferContentCache(std::move(Buffer)), Name,
@@ -571,7 +575,8 @@ FileID SourceManager::createFileID(std::unique_ptr<llvm::MemoryBuffer> Buffer,
/// outlive the SourceManager.
FileID SourceManager::createFileID(const llvm::MemoryBufferRef &Buffer,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset,
+ int LoadedID,
+ SourceLocation::UIntTy LoadedOffset,
SourceLocation IncludeLoc) {
return createFileID(llvm::MemoryBuffer::getMemBuffer(Buffer), FileCharacter,
LoadedID, LoadedOffset, IncludeLoc);
@@ -593,7 +598,8 @@ SourceManager::getOrCreateFileID(const FileEntry *SourceFile,
FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
SourceLocation IncludePos,
SrcMgr::CharacteristicKind FileCharacter,
- int LoadedID, unsigned LoadedOffset) {
+ int LoadedID,
+ SourceLocation::UIntTy LoadedOffset) {
if (LoadedID < 0) {
assert(LoadedID != -1 && "Loading sentinel FileID");
unsigned Index = unsigned(-LoadedID) - 2;
@@ -632,14 +638,11 @@ SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
return createExpansionLocImpl(Info, TokLength);
}
-SourceLocation
-SourceManager::createExpansionLoc(SourceLocation SpellingLoc,
- SourceLocation ExpansionLocStart,
- SourceLocation ExpansionLocEnd,
- unsigned TokLength,
- bool ExpansionIsTokenRange,
- int LoadedID,
- unsigned LoadedOffset) {
+SourceLocation SourceManager::createExpansionLoc(
+ SourceLocation SpellingLoc, SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd, unsigned TokLength,
+ bool ExpansionIsTokenRange, int LoadedID,
+ SourceLocation::UIntTy LoadedOffset) {
ExpansionInfo Info = ExpansionInfo::create(
SpellingLoc, ExpansionLocStart, ExpansionLocEnd, ExpansionIsTokenRange);
return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
@@ -657,9 +660,8 @@ SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
SourceLocation
SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
- unsigned TokLength,
- int LoadedID,
- unsigned LoadedOffset) {
+ unsigned TokLength, int LoadedID,
+ SourceLocation::UIntTy LoadedOffset) {
if (LoadedID < 0) {
assert(LoadedID != -1 && "Loading sentinel FileID");
unsigned Index = unsigned(-LoadedID) - 2;
@@ -761,7 +763,7 @@ llvm::Optional<StringRef> SourceManager::getBufferDataOrNone(FileID FID) const {
/// This is the cache-miss path of getFileID. Not as hot as that function, but
/// still very important. It is responsible for finding the entry in the
/// SLocEntry tables that contains the specified location.
-FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
+FileID SourceManager::getFileIDSlow(SourceLocation::UIntTy SLocOffset) const {
if (!SLocOffset)
return FileID::get(0);
@@ -776,7 +778,7 @@ FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
///
/// This function knows that the SourceLocation is in a local buffer, not a
/// loaded one.
-FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
+FileID SourceManager::getFileIDLocal(SourceLocation::UIntTy SLocOffset) const {
assert(SLocOffset < NextLocalOffset && "Bad function choice");
// After the first and second level caches, I see two common sorts of
@@ -827,7 +829,8 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
NumProbes = 0;
while (true) {
unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
- unsigned MidOffset = getLocalSLocEntry(MiddleIndex).getOffset();
+ SourceLocation::UIntTy MidOffset =
+ getLocalSLocEntry(MiddleIndex).getOffset();
++NumProbes;
@@ -858,7 +861,7 @@ FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
///
/// This function knows that the SourceLocation is in a loaded buffer, not a
/// local one.
-FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
+FileID SourceManager::getFileIDLoaded(SourceLocation::UIntTy SLocOffset) const {
// Sanity checking, otherwise a bug may lead to hanging in release build.
if (SLocOffset < CurrentLoadedOffset) {
assert(0 && "Invalid SLocOffset or bad function choice");
@@ -1252,12 +1255,22 @@ unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
return PLoc.getColumn();
}
-#ifdef __SSE2__
-#include <emmintrin.h>
-#endif
+// Check if mutli-byte word x has bytes between m and n, included. This may also
+// catch bytes equal to n + 1.
+// The returned value holds a 0x80 at each byte position that holds a match.
+// see http://graphics.stanford.edu/~seander/bithacks.html#HasBetweenInWord
+template <class T>
+static constexpr inline T likelyhasbetween(T x, unsigned char m,
+ unsigned char n) {
+ return ((x - ~static_cast<T>(0) / 255 * (n + 1)) & ~x &
+ ((x & ~static_cast<T>(0) / 255 * 127) +
+ (~static_cast<T>(0) / 255 * (127 - (m - 1))))) &
+ ~static_cast<T>(0) / 255 * 128;
+}
LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
llvm::BumpPtrAllocator &Alloc) {
+
// Find the file offsets of all of the *physical* source lines. This does
// not look at trigraphs, escaped newlines, or anything else tricky.
SmallVector<unsigned, 256> LineOffsets;
@@ -1268,7 +1281,43 @@ LineOffsetMapping LineOffsetMapping::get(llvm::MemoryBufferRef Buffer,
const unsigned char *Buf = (const unsigned char *)Buffer.getBufferStart();
const unsigned char *End = (const unsigned char *)Buffer.getBufferEnd();
const std::size_t BufLen = End - Buf;
+
unsigned I = 0;
+ uint64_t Word;
+
+ // scan sizeof(Word) bytes at a time for new lines.
+ // This is much faster than scanning each byte independently.
+ if (BufLen > sizeof(Word)) {
+ do {
+ Word = llvm::support::endian::read64(Buf + I, llvm::support::little);
+ // no new line => jump over sizeof(Word) bytes.
+ auto Mask = likelyhasbetween(Word, '\n', '\r');
+ if (!Mask) {
+ I += sizeof(Word);
+ continue;
+ }
+
+ // At that point, Mask contains 0x80 set at each byte that holds a value
+ // in [\n, \r + 1 [
+
+ // Scan for the next newline - it's very likely there's one.
+ unsigned N =
+ llvm::countTrailingZeros(Mask) - 7; // -7 because 0x80 is the marker
+ Word >>= N;
+ I += N / 8 + 1;
+ unsigned char Byte = Word;
+ if (Byte == '\n') {
+ LineOffsets.push_back(I);
+ } else if (Byte == '\r') {
+ // If this is \r\n, skip both characters.
+ if (Buf[I] == '\n')
+ ++I;
+ LineOffsets.push_back(I);
+ }
+ } while (I < BufLen - sizeof(Word) - 1);
+ }
+
+ // Handle tail using a regular check.
while (I < BufLen) {
if (Buf[I] == '\n') {
LineOffsets.push_back(I + 1);
@@ -1572,7 +1621,7 @@ unsigned SourceManager::getFileIDSize(FileID FID) const {
return 0;
int ID = FID.ID;
- unsigned NextOffset;
+ SourceLocation::UIntTy NextOffset;
if ((ID > 0 && unsigned(ID+1) == local_sloc_entry_size()))
NextOffset = getNextLocalOffset();
else if (ID+1 == -1)
@@ -1780,8 +1829,8 @@ void SourceManager::associateFileChunkWithMacroArgExp(
SourceLocation ExpansionLoc,
unsigned ExpansionLength) const {
if (!SpellLoc.isFileID()) {
- unsigned SpellBeginOffs = SpellLoc.getOffset();
- unsigned SpellEndOffs = SpellBeginOffs + ExpansionLength;
+ SourceLocation::UIntTy SpellBeginOffs = SpellLoc.getOffset();
+ SourceLocation::UIntTy SpellEndOffs = SpellBeginOffs + ExpansionLength;
// The spelling range for this macro argument expansion can span multiple
// consecutive FileID entries. Go through each entry contained in the
@@ -1793,9 +1842,9 @@ void SourceManager::associateFileChunkWithMacroArgExp(
std::tie(SpellFID, SpellRelativeOffs) = getDecomposedLoc(SpellLoc);
while (true) {
const SLocEntry &Entry = getSLocEntry(SpellFID);
- unsigned SpellFIDBeginOffs = Entry.getOffset();
+ SourceLocation::UIntTy SpellFIDBeginOffs = Entry.getOffset();
unsigned SpellFIDSize = getFileIDSize(SpellFID);
- unsigned SpellFIDEndOffs = SpellFIDBeginOffs + SpellFIDSize;
+ SourceLocation::UIntTy SpellFIDEndOffs = SpellFIDBeginOffs + SpellFIDSize;
const ExpansionInfo &Info = Entry.getExpansion();
if (Info.isMacroArgExpansion()) {
unsigned CurrSpellLength;
@@ -1887,7 +1936,7 @@ SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
--I;
- unsigned MacroArgBeginOffs = I->first;
+ SourceLocation::UIntTy MacroArgBeginOffs = I->first;
SourceLocation MacroArgExpandedLoc = I->second;
if (MacroArgExpandedLoc.isValid())
return MacroArgExpandedLoc.getLocWithOffset(Offset - MacroArgBeginOffs);
@@ -2107,7 +2156,7 @@ LLVM_DUMP_METHOD void SourceManager::dump() const {
llvm::raw_ostream &out = llvm::errs();
auto DumpSLocEntry = [&](int ID, const SrcMgr::SLocEntry &Entry,
- llvm::Optional<unsigned> NextStart) {
+ llvm::Optional<SourceLocation::UIntTy> NextStart) {
out << "SLocEntry <FileID " << ID << "> " << (Entry.isFile() ? "file" : "expansion")
<< " <SourceLocation " << Entry.getOffset() << ":";
if (NextStart)
@@ -2147,7 +2196,7 @@ LLVM_DUMP_METHOD void SourceManager::dump() const {
: LocalSLocEntryTable[ID + 1].getOffset());
}
// Dump loaded SLocEntries.
- llvm::Optional<unsigned> NextStart;
+ llvm::Optional<SourceLocation::UIntTy> NextStart;
for (unsigned Index = 0; Index != LoadedSLocEntryTable.size(); ++Index) {
int ID = -(int)Index - 2;
if (SLocEntryLoaded[Index]) {
diff --git a/clang/lib/Basic/TargetInfo.cpp b/clang/lib/Basic/TargetInfo.cpp
index 642ee753d224..b647a2fb8a67 100644
--- a/clang/lib/Basic/TargetInfo.cpp
+++ b/clang/lib/Basic/TargetInfo.cpp
@@ -17,7 +17,6 @@
#include "clang/Basic/LangOptions.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/IR/DataLayout.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TargetParser.h"
#include <cstdlib>
@@ -67,9 +66,12 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// From the glibc documentation, on GNU systems, malloc guarantees 16-byte
// alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See
// https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html.
- // This alignment guarantee also applies to Windows and Android.
+ // This alignment guarantee also applies to Windows and Android. On Darwin,
+ // the alignment is 16 bytes on both 64-bit and 32-bit systems.
if (T.isGNUEnvironment() || T.isWindowsMSVCEnvironment() || T.isAndroid())
NewAlign = Triple.isArch64Bit() ? 128 : Triple.isArch32Bit() ? 64 : 0;
+ else if (T.isOSDarwin())
+ NewAlign = 128;
else
NewAlign = 0; // Infer from basic type alignment.
HalfWidth = 16;
@@ -96,25 +98,30 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
Char16Type = UnsignedShort;
Char32Type = UnsignedInt;
Int64Type = SignedLongLong;
+ Int16Type = SignedShort;
SigAtomicType = SignedInt;
ProcessIDType = SignedInt;
UseSignedCharForObjCBool = true;
UseBitFieldTypeAlignment = true;
UseZeroLengthBitfieldAlignment = false;
+ UseLeadingZeroLengthBitfield = true;
UseExplicitBitFieldAlignment = true;
ZeroLengthBitfieldBoundary = 0;
+ MaxAlignedAttribute = 0;
HalfFormat = &llvm::APFloat::IEEEhalf();
FloatFormat = &llvm::APFloat::IEEEsingle();
DoubleFormat = &llvm::APFloat::IEEEdouble();
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
Float128Format = &llvm::APFloat::IEEEquad();
MCountName = "mcount";
+ UserLabelPrefix = "_";
RegParmMax = 0;
SSERegParmMax = 0;
HasAlignMac68kSupport = false;
HasBuiltinMSVaList = false;
IsRenderScriptTarget = false;
HasAArch64SVETypes = false;
+ HasRISCVVTypes = false;
AllowAMDGPUUnsafeFPAtomics = false;
ARMCDECoprocMask = 0;
@@ -143,8 +150,9 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : TargetOpts(), Triple(T) {
// Out of line virtual dtor for TargetInfo.
TargetInfo::~TargetInfo() {}
-void TargetInfo::resetDataLayout(StringRef DL) {
- DataLayout.reset(new llvm::DataLayout(DL));
+void TargetInfo::resetDataLayout(StringRef DL, const char *ULP) {
+ DataLayoutString = DL.str();
+ UserLabelPrefix = ULP;
}
bool
@@ -338,7 +346,7 @@ bool TargetInfo::isTypeSigned(IntType T) {
/// Apply changes to the target information with respect to certain
/// language options which change the target configuration and adjust
/// the language based on the target options where applicable.
-void TargetInfo::adjust(LangOptions &Opts) {
+void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
if (Opts.NoBitFieldTypeAlign)
UseBitFieldTypeAlignment = false;
@@ -388,6 +396,19 @@ void TargetInfo::adjust(LangOptions &Opts) {
HalfFormat = &llvm::APFloat::IEEEhalf();
FloatFormat = &llvm::APFloat::IEEEsingle();
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+
+ // OpenCL C v3.0 s6.7.5 - The generic address space requires support for
+ // OpenCL C 2.0 or OpenCL C 3.0 with the __opencl_c_generic_address_space
+ // feature
+ // FIXME: OpenCLGenericAddressSpace is also defined in setLangDefaults()
+ // for OpenCL C 2.0 but with no access to target capabilities. Target
+ // should be immutable once created and thus this language option needs
+ // to be defined only once.
+ if (Opts.OpenCLVersion >= 300) {
+ const auto &OpenCLFeaturesMap = getSupportedOpenCLOpts();
+ Opts.OpenCLGenericAddressSpace = hasFeatureEnabled(
+ OpenCLFeaturesMap, "__opencl_c_generic_address_space");
+ }
}
if (Opts.DoubleSize) {
@@ -422,6 +443,11 @@ void TargetInfo::adjust(LangOptions &Opts) {
// its corresponding signed type.
PaddingOnUnsignedFixedPoint |= Opts.PaddingOnUnsignedFixedPoint;
CheckFixedPointBits();
+
+ if (Opts.ProtectParens && !checkArithmeticFenceSupported()) {
+ Diags.Report(diag::err_opt_not_valid_on_target) << "-fprotect-parens";
+ Opts.ProtectParens = false;
+ }
}
bool TargetInfo::initFeatureMap(
@@ -472,8 +498,8 @@ static StringRef removeGCCRegisterPrefix(StringRef Name) {
/// a valid clobber in an inline asm statement. This is used by
/// Sema.
bool TargetInfo::isValidClobber(StringRef Name) const {
- return (isValidGCCRegisterName(Name) ||
- Name == "memory" || Name == "cc");
+ return (isValidGCCRegisterName(Name) || Name == "memory" || Name == "cc" ||
+ Name == "unwind");
}
/// isValidGCCRegisterName - Returns whether the passed in string
diff --git a/clang/lib/Basic/Targets.cpp b/clang/lib/Basic/Targets.cpp
index 90a67d03b7b2..ba91d0439968 100644
--- a/clang/lib/Basic/Targets.cpp
+++ b/clang/lib/Basic/Targets.cpp
@@ -22,6 +22,7 @@
#include "Targets/Hexagon.h"
#include "Targets/Lanai.h"
#include "Targets/Le64.h"
+#include "Targets/M68k.h"
#include "Targets/MSP430.h"
#include "Targets/Mips.h"
#include "Targets/NVPTX.h"
@@ -303,6 +304,16 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new MipsTargetInfo(Triple, Opts);
}
+ case llvm::Triple::m68k:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<M68kTargetInfo>(Triple, Opts);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<M68kTargetInfo>(Triple, Opts);
+ default:
+ return new M68kTargetInfo(Triple, Opts);
+ }
+
case llvm::Triple::le32:
switch (os) {
case llvm::Triple::NaCl:
@@ -584,13 +595,13 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
}
case llvm::Triple::spir: {
- if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
return new SPIR32TargetInfo(Triple, Opts);
}
case llvm::Triple::spir64: {
- if (Triple.getOS() != llvm::Triple::UnknownOS ||
+ if (os != llvm::Triple::UnknownOS ||
Triple.getEnvironment() != llvm::Triple::UnknownEnvironment)
return nullptr;
return new SPIR64TargetInfo(Triple, Opts);
@@ -600,7 +611,7 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
Triple.getVendor() != llvm::Triple::UnknownVendor ||
!Triple.isOSBinFormatWasm())
return nullptr;
- switch (Triple.getOS()) {
+ switch (os) {
case llvm::Triple::WASI:
return new WASITargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
case llvm::Triple::Emscripten:
@@ -615,7 +626,7 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
Triple.getVendor() != llvm::Triple::UnknownVendor ||
!Triple.isOSBinFormatWasm())
return nullptr;
- switch (Triple.getOS()) {
+ switch (os) {
case llvm::Triple::WASI:
return new WASITargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
case llvm::Triple::Emscripten:
@@ -715,29 +726,28 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
return Target.release();
}
-
-/// getOpenCLFeatureDefines - Define OpenCL macros based on target settings
-/// and language version
-void TargetInfo::getOpenCLFeatureDefines(const LangOptions &Opts,
- MacroBuilder &Builder) const {
-
- auto defineOpenCLExtMacro = [&](llvm::StringRef Name, unsigned AvailVer,
- unsigned CoreVersions,
- unsigned OptionalVersions) {
- // Check if extension is supported by target and is available in this
- // OpenCL version
- auto It = getTargetOpts().OpenCLFeaturesMap.find(Name);
- if ((It != getTargetOpts().OpenCLFeaturesMap.end()) && It->getValue() &&
- OpenCLOptions::OpenCLOptionInfo(AvailVer, CoreVersions,
- OptionalVersions)
- .isAvailableIn(Opts))
- Builder.defineMacro(Name);
+/// validateOpenCLTarget - Check that OpenCL target has valid
+/// options setting based on OpenCL version.
+bool TargetInfo::validateOpenCLTarget(const LangOptions &Opts,
+ DiagnosticsEngine &Diags) const {
+ const llvm::StringMap<bool> &OpenCLFeaturesMap = getSupportedOpenCLOpts();
+
+ auto diagnoseNotSupportedCore = [&](llvm::StringRef Name, auto... OptArgs) {
+ if (OpenCLOptions::isOpenCLOptionCoreIn(Opts, OptArgs...) &&
+ !hasFeatureEnabled(OpenCLFeaturesMap, Name))
+ Diags.Report(diag::warn_opencl_unsupported_core_feature)
+ << Name << Opts.OpenCLCPlusPlus
+ << Opts.getOpenCLVersionTuple().getAsString();
};
-#define OPENCL_GENERIC_EXTENSION(Ext, Avail, Core, Opt) \
- defineOpenCLExtMacro(#Ext, Avail, Core, Opt);
+#define OPENCL_GENERIC_EXTENSION(Ext, ...) \
+ diagnoseNotSupportedCore(#Ext, __VA_ARGS__);
#include "clang/Basic/OpenCLExtensions.def"
- // FIXME: OpenCL options which affect language semantics/syntax
- // should be moved into LangOptions, thus macro definitions of
- // such options is better to be done in clang::InitializePreprocessor
+ // Validate that feature macros are set properly for OpenCL C 3.0.
+ // In other cases assume that target is always valid.
+ if (Opts.OpenCLCPlusPlus || Opts.OpenCLVersion < 300)
+ return true;
+
+ return OpenCLOptions::diagnoseUnsupportedFeatureDependencies(*this, Diags) &&
+ OpenCLOptions::diagnoseFeatureExtensionDifferences(*this, Diags);
}
diff --git a/clang/lib/Basic/Targets/AArch64.cpp b/clang/lib/Basic/Targets/AArch64.cpp
index f17134623b8b..4070ac727d16 100644
--- a/clang/lib/Basic/Targets/AArch64.cpp
+++ b/clang/lib/Basic/Targets/AArch64.cpp
@@ -182,6 +182,7 @@ void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ Builder.defineMacro("__ARM_FEATURE_FRINT", "1");
// Also include the Armv8.4 defines
getTargetDefinesARMV84A(Opts, Builder);
}
@@ -286,9 +287,27 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
- if (HasCrypto)
+ // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained feature
+ // macros for AES, SHA2, SHA3 and SM4
+ if (HasAES && HasSHA2)
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+ if (HasAES)
+ Builder.defineMacro("__ARM_FEATURE_AES", "1");
+
+ if (HasSHA2)
+ Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
+
+ if (HasSHA3) {
+ Builder.defineMacro("__ARM_FEATURE_SHA3", "1");
+ Builder.defineMacro("__ARM_FEATURE_SHA512", "1");
+ }
+
+ if (HasSM4) {
+ Builder.defineMacro("__ARM_FEATURE_SM3", "1");
+ Builder.defineMacro("__ARM_FEATURE_SM4", "1");
+ }
+
if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
@@ -333,7 +352,7 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_FEATURE_SVE_MATMUL_INT8", "1");
if ((FPU & NeonMode) && HasFP16FML)
- Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
+ Builder.defineMacro("__ARM_FEATURE_FP16_FML", "1");
if (Opts.hasSignReturnAddress()) {
// Bitmask:
@@ -359,6 +378,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasLS64)
Builder.defineMacro("__ARM_FEATURE_LS64", "1");
+ if (HasRandGen)
+ Builder.defineMacro("__ARM_FEATURE_RNG", "1");
+
switch (ArchKind) {
default:
break;
@@ -417,6 +439,10 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
FPU = FPUMode;
HasCRC = false;
HasCrypto = false;
+ HasAES = false;
+ HasSHA2 = false;
+ HasSHA3 = false;
+ HasSM4 = false;
HasUnaligned = true;
HasFullFP16 = false;
HasDotProd = false;
@@ -424,6 +450,7 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMTE = false;
HasTME = false;
HasLS64 = false;
+ HasRandGen = false;
HasMatMul = false;
HasBFloat16 = false;
HasSVE2 = false;
@@ -485,6 +512,16 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasCRC = true;
if (Feature == "+crypto")
HasCrypto = true;
+ if (Feature == "+aes")
+ HasAES = true;
+ if (Feature == "+sha2")
+ HasSHA2 = true;
+ if (Feature == "+sha3") {
+ HasSHA2 = true;
+ HasSHA3 = true;
+ }
+ if (Feature == "+sm4")
+ HasSM4 = true;
if (Feature == "+strict-align")
HasUnaligned = false;
if (Feature == "+v8.1a")
@@ -523,6 +560,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasLSE = true;
if (Feature == "+ls64")
HasLS64 = true;
+ if (Feature == "+rand")
+ HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
}
@@ -537,6 +576,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
switch (CC) {
case CC_C:
case CC_Swift:
+ case CC_SwiftAsync:
case CC_PreserveMost:
case CC_PreserveAll:
case CC_OpenCLKernel:
@@ -738,9 +778,9 @@ AArch64leTargetInfo::AArch64leTargetInfo(const llvm::Triple &Triple,
void AArch64leTargetInfo::setDataLayout() {
if (getTriple().isOSBinFormatMachO()) {
if(getTriple().isArch32Bit())
- resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128");
+ resetDataLayout("e-m:o-p:32:32-i64:64-i128:128-n32:64-S128", "_");
else
- resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128");
+ resetDataLayout("e-m:o-i64:64-i128:128-n32:64-S128", "_");
} else
resetDataLayout("e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128");
}
@@ -789,7 +829,8 @@ WindowsARM64TargetInfo::WindowsARM64TargetInfo(const llvm::Triple &Triple,
void WindowsARM64TargetInfo::setDataLayout() {
resetDataLayout(Triple.isOSBinFormatMachO()
? "e-m:o-i64:64-i128:128-n32:64-S128"
- : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128");
+ : "e-m:w-p:64:64-i32:32-i64:64-i128:128-n32:64-S128",
+ Triple.isOSBinFormatMachO() ? "_" : "");
}
TargetInfo::BuiltinVaListKind
@@ -810,6 +851,7 @@ WindowsARM64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveMost:
case CC_PreserveAll:
case CC_Swift:
+ case CC_SwiftAsync:
case CC_Win64:
return CCCR_OK;
default:
diff --git a/clang/lib/Basic/Targets/AArch64.h b/clang/lib/Basic/Targets/AArch64.h
index 2809fbce9c88..46882a808336 100644
--- a/clang/lib/Basic/Targets/AArch64.h
+++ b/clang/lib/Basic/Targets/AArch64.h
@@ -30,6 +30,10 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
unsigned FPU;
bool HasCRC;
bool HasCrypto;
+ bool HasAES;
+ bool HasSHA2;
+ bool HasSHA3;
+ bool HasSM4;
bool HasUnaligned;
bool HasFullFP16;
bool HasDotProd;
@@ -38,6 +42,7 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasTME;
bool HasPAuth;
bool HasLS64;
+ bool HasRandGen;
bool HasMatMul;
bool HasSVE2;
bool HasSVE2AES;
diff --git a/clang/lib/Basic/Targets/AMDGPU.cpp b/clang/lib/Basic/Targets/AMDGPU.cpp
index 91c1e83f61cb..fac786dbcf9e 100644
--- a/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -18,7 +18,6 @@
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Frontend/OpenMP/OMPGridValues.h"
-#include "llvm/IR/DataLayout.h"
using namespace clang;
using namespace clang::targets;
@@ -51,6 +50,11 @@ const LangASMap AMDGPUTargetInfo::AMDGPUDefIsGenMap = {
Global, // cuda_device
Constant, // cuda_constant
Local, // cuda_shared
+ Global, // sycl_global
+ Global, // sycl_global_device
+ Global, // sycl_global_host
+ Local, // sycl_local
+ Private, // sycl_private
Generic, // ptr32_sptr
Generic, // ptr32_uptr
Generic // ptr64
@@ -68,6 +72,12 @@ const LangASMap AMDGPUTargetInfo::AMDGPUDefIsPrivMap = {
Global, // cuda_device
Constant, // cuda_constant
Local, // cuda_shared
+ // SYCL address space values for this map are dummy
+ Generic, // sycl_global
+ Generic, // sycl_global_device
+ Generic, // sycl_global_host
+ Generic, // sycl_local
+ Generic, // sycl_private
Generic, // ptr32_sptr
Generic, // ptr32_uptr
Generic // ptr64
@@ -174,6 +184,8 @@ bool AMDGPUTargetInfo::initFeatureMap(
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1035:
+ case GK_GFX1034:
case GK_GFX1033:
case GK_GFX1032:
case GK_GFX1031:
@@ -183,6 +195,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dot2-insts"] = true;
Features["dot5-insts"] = true;
Features["dot6-insts"] = true;
+ Features["dot7-insts"] = true;
Features["dl-insts"] = true;
Features["flat-address-space"] = true;
Features["16-bit-insts"] = true;
@@ -192,6 +205,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["gfx10-insts"] = true;
Features["gfx10-3-insts"] = true;
Features["s-memrealtime"] = true;
+ Features["s-memtime-inst"] = true;
break;
case GK_GFX1012:
case GK_GFX1011:
@@ -199,7 +213,9 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dot2-insts"] = true;
Features["dot5-insts"] = true;
Features["dot6-insts"] = true;
+ Features["dot7-insts"] = true;
LLVM_FALLTHROUGH;
+ case GK_GFX1013:
case GK_GFX1010:
Features["dl-insts"] = true;
Features["ci-insts"] = true;
@@ -210,7 +226,11 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["gfx9-insts"] = true;
Features["gfx10-insts"] = true;
Features["s-memrealtime"] = true;
+ Features["s-memtime-inst"] = true;
break;
+ case GK_GFX90A:
+ Features["gfx90a-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX908:
Features["dot3-insts"] = true;
Features["dot4-insts"] = true;
@@ -222,6 +242,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["dl-insts"] = true;
Features["dot1-insts"] = true;
Features["dot2-insts"] = true;
+ Features["dot7-insts"] = true;
LLVM_FALLTHROUGH;
case GK_GFX90C:
case GK_GFX909:
@@ -252,6 +273,7 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX602:
case GK_GFX601:
case GK_GFX600:
+ Features["s-memtime-inst"] = true;
break;
case GK_NONE:
break;
@@ -313,7 +335,6 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
llvm::AMDGPU::getArchAttrR600(GPUKind)) {
resetDataLayout(isAMDGCN(getTriple()) ? DataLayoutStringAMDGCN
: DataLayoutStringR600);
- assert(DataLayout->getAllocaAddrSpace() == Private);
GridValues = llvm::omp::AMDGPUGpuGridValues;
setAddressSpaceMap(Triple.getOS() == llvm::Triple::Mesa3D ||
@@ -326,7 +347,7 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
AllowAMDGPUUnsafeFPAtomics = Opts.AllowAMDGPUUnsafeFPAtomics;
// Set pointer width and alignment for target address space 0.
- PointerWidth = PointerAlign = DataLayout->getPointerSizeInBits();
+ PointerWidth = PointerAlign = getPointerWidthV(Generic);
if (getMaxPointerWidth() == 64) {
LongWidth = LongAlign = 64;
SizeType = UnsignedLong;
@@ -337,8 +358,8 @@ AMDGPUTargetInfo::AMDGPUTargetInfo(const llvm::Triple &Triple,
MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
}
-void AMDGPUTargetInfo::adjust(LangOptions &Opts) {
- TargetInfo::adjust(Opts);
+void AMDGPUTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
+ TargetInfo::adjust(Diags, Opts);
// ToDo: There are still a few places using default address space as private
// address space in OpenCL, which needs to be cleaned up, then Opts.OpenCL
// can be removed from the following line.
diff --git a/clang/lib/Basic/Targets/AMDGPU.h b/clang/lib/Basic/Targets/AMDGPU.h
index 8ee0ca30d305..244a6e044690 100644
--- a/clang/lib/Basic/Targets/AMDGPU.h
+++ b/clang/lib/Basic/Targets/AMDGPU.h
@@ -93,7 +93,7 @@ public:
void setAddressSpaceMap(bool DefaultIsPrivate);
- void adjust(LangOptions &Opts) override;
+ void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override;
uint64_t getPointerWidthV(unsigned AddrSpace) const override {
if (isR600(getTriple()))
@@ -287,10 +287,13 @@ public:
Opts["cl_clang_storage_class_specifiers"] = true;
Opts["__cl_clang_variadic_functions"] = true;
Opts["__cl_clang_function_pointers"] = true;
+ Opts["__cl_clang_non_portable_kernel_param_types"] = true;
+ Opts["__cl_clang_bitfields"] = true;
bool IsAMDGCN = isAMDGCN(getTriple());
Opts["cl_khr_fp64"] = hasFP64();
+ Opts["__opencl_c_fp64"] = hasFP64();
if (IsAMDGCN || GPUKind >= llvm::AMDGPU::GK_CEDAR) {
Opts["cl_khr_byte_addressable_store"] = true;
diff --git a/clang/lib/Basic/Targets/ARM.cpp b/clang/lib/Basic/Targets/ARM.cpp
index a2c96ad12a76..0e4048f8d5ff 100644
--- a/clang/lib/Basic/Targets/ARM.cpp
+++ b/clang/lib/Basic/Targets/ARM.cpp
@@ -44,7 +44,8 @@ void ARMTargetInfo::setABIAAPCS() {
if (T.isOSBinFormatMachO()) {
resetDataLayout(BigEndian
? "E-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
- : "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64");
+ : "e-m:o-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64",
+ "_");
} else if (T.isOSWindows()) {
assert(!BigEndian && "Windows on ARM does not support big endian");
resetDataLayout("e"
@@ -93,12 +94,13 @@ void ARMTargetInfo::setABIAPCS(bool IsAAPCS16) {
if (T.isOSBinFormatMachO() && IsAAPCS16) {
assert(!BigEndian && "AAPCS16 does not support big-endian");
- resetDataLayout("e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128");
+ resetDataLayout("e-m:o-p:32:32-Fi8-i64:64-a:0:32-n32-S128", "_");
} else if (T.isOSBinFormatMachO())
resetDataLayout(
BigEndian
? "E-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32"
- : "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32");
+ : "e-m:o-p:32:32-Fi8-f64:32:64-v64:32:64-v128:32:128-a:0:32-n32-S32",
+ "_");
else
resetDataLayout(
BigEndian
@@ -426,6 +428,8 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
MVE = 0;
CRC = 0;
Crypto = 0;
+ SHA2 = 0;
+ AES = 0;
DSP = 0;
Unaligned = 1;
SoftFloat = false;
@@ -476,6 +480,10 @@ bool ARMTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
CRC = 1;
} else if (Feature == "+crypto") {
Crypto = 1;
+ } else if (Feature == "+sha2") {
+ SHA2 = 1;
+ } else if (Feature == "+aes") {
+ AES = 1;
} else if (Feature == "+dsp") {
DSP = 1;
} else if (Feature == "+fp64") {
@@ -639,8 +647,14 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ArchVersion >= 8) {
// ACLE 6.5.7 Crypto Extension
- if (Crypto)
+ // The __ARM_FEATURE_CRYPTO is deprecated in favor of finer grained
+ // feature macros for AES and SHA2
+ if (SHA2 && AES)
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
+ if (SHA2)
+ Builder.defineMacro("__ARM_FEATURE_SHA2", "1");
+ if (AES)
+ Builder.defineMacro("__ARM_FEATURE_AES", "1");
// ACLE 6.5.8 CRC32 Extension
if (CRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
@@ -755,8 +769,12 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
// Note, this is always on in gcc, even though it doesn't make sense.
Builder.defineMacro("__APCS_32__");
+ // __VFP_FP__ means that the floating-point format is VFP, not that a hardware
+ // FPU is present. Moreover, the VFP format is the only one supported by
+ // clang. For these reasons, this macro is always defined.
+ Builder.defineMacro("__VFP_FP__");
+
if (FPUModeIsVFP((FPUMode)FPU)) {
- Builder.defineMacro("__VFP_FP__");
if (FPU & VFP2FPU)
Builder.defineMacro("__ARM_VFPV2__");
if (FPU & VFP3FPU)
@@ -1120,6 +1138,7 @@ ARMTargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_Swift:
+ case CC_SwiftAsync:
case CC_OpenCLKernel:
return CCCR_OK;
default:
@@ -1199,6 +1218,7 @@ WindowsARMTargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveMost:
case CC_PreserveAll:
case CC_Swift:
+ case CC_SwiftAsync:
return CCCR_OK;
default:
return CCCR_Warning;
diff --git a/clang/lib/Basic/Targets/ARM.h b/clang/lib/Basic/Targets/ARM.h
index 1e80f74d0766..0910064a033b 100644
--- a/clang/lib/Basic/Targets/ARM.h
+++ b/clang/lib/Basic/Targets/ARM.h
@@ -72,6 +72,8 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
unsigned CRC : 1;
unsigned Crypto : 1;
+ unsigned SHA2 : 1;
+ unsigned AES : 1;
unsigned DSP : 1;
unsigned Unaligned : 1;
unsigned DotProd : 1;
diff --git a/clang/lib/Basic/Targets/AVR.cpp b/clang/lib/Basic/Targets/AVR.cpp
index 664eea0de841..e87b7338c4d6 100644
--- a/clang/lib/Basic/Targets/AVR.cpp
+++ b/clang/lib/Basic/Targets/AVR.cpp
@@ -308,6 +308,7 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__flash", "__attribute__((address_space(1)))");
if (!this->CPU.empty()) {
auto It = llvm::find_if(
diff --git a/clang/lib/Basic/Targets/AVR.h b/clang/lib/Basic/Targets/AVR.h
index 94f006ee1b8a..89a80ca6a39a 100644
--- a/clang/lib/Basic/Targets/AVR.h
+++ b/clang/lib/Basic/Targets/AVR.h
@@ -52,6 +52,7 @@ public:
IntPtrType = SignedInt;
Char16Type = UnsignedInt;
WIntType = SignedInt;
+ Int16Type = SignedInt;
Char32Type = UnsignedLong;
SigAtomicType = SignedChar;
resetDataLayout("e-P1-p:16:8-i8:8-i16:8-i32:8-i64:8-f32:8-f64:8-n8-a:8");
diff --git a/clang/lib/Basic/Targets/BPF.cpp b/clang/lib/Basic/Targets/BPF.cpp
index 2fe2450b9a65..0b0298df30a5 100644
--- a/clang/lib/Basic/Targets/BPF.cpp
+++ b/clang/lib/Basic/Targets/BPF.cpp
@@ -46,3 +46,14 @@ ArrayRef<Builtin::Info> BPFTargetInfo::getTargetBuiltins() const {
return llvm::makeArrayRef(BuiltinInfo, clang::BPF::LastTSBuiltin -
Builtin::FirstTSBuiltin);
}
+
+bool BPFTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+alu32") {
+ HasAlu32 = true;
+ }
+ }
+
+ return true;
+}
diff --git a/clang/lib/Basic/Targets/BPF.h b/clang/lib/Basic/Targets/BPF.h
index 43e55dfbfb2b..393a91ff53a5 100644
--- a/clang/lib/Basic/Targets/BPF.h
+++ b/clang/lib/Basic/Targets/BPF.h
@@ -23,6 +23,7 @@ namespace targets {
class LLVM_LIBRARY_VISIBILITY BPFTargetInfo : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
+ bool HasAlu32 = false;
public:
BPFTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
@@ -55,6 +56,8 @@ public:
bool Enabled) const override {
Features[Name] = Enabled;
}
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
ArrayRef<Builtin::Info> getTargetBuiltins() const override;
@@ -68,7 +71,16 @@ public:
ArrayRef<const char *> getGCCRegNames() const override { return None; }
bool validateAsmConstraint(const char *&Name,
- TargetInfo::ConstraintInfo &info) const override {
+ TargetInfo::ConstraintInfo &Info) const override {
+ switch (*Name) {
+ default:
+ break;
+ case 'w':
+ if (HasAlu32) {
+ Info.setAllowsRegister();
+ }
+ break;
+ }
return true;
}
@@ -76,7 +88,7 @@ public:
return None;
}
- bool allowDebugInfoForExternalVar() const override { return true; }
+ bool allowDebugInfoForExternalRef() const override { return true; }
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
switch (CC) {
@@ -93,6 +105,10 @@ public:
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;
bool setCPU(const std::string &Name) override {
+ if (Name == "v3") {
+ HasAlu32 = true;
+ }
+
StringRef CPUName(Name);
return isValidCPUName(CPUName);
}
diff --git a/clang/lib/Basic/Targets/Hexagon.cpp b/clang/lib/Basic/Targets/Hexagon.cpp
index a8b4380b6a87..9c37dee7e89a 100644
--- a/clang/lib/Basic/Targets/Hexagon.cpp
+++ b/clang/lib/Basic/Targets/Hexagon.cpp
@@ -65,6 +65,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
} else if (CPU == "hexagonv67t") {
Builder.defineMacro("__HEXAGON_V67T__");
Builder.defineMacro("__HEXAGON_ARCH__", "67");
+ } else if (CPU == "hexagonv68") {
+ Builder.defineMacro("__HEXAGON_V68__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "68");
}
if (hasFeature("hvx-length64b")) {
@@ -129,14 +132,37 @@ bool HexagonTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
}
const char *const HexagonTargetInfo::GCCRegNames[] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8",
- "r9", "r10", "r11", "r12", "r13", "r14", "r15", "r16", "r17",
- "r18", "r19", "r20", "r21", "r22", "r23", "r24", "r25", "r26",
- "r27", "r28", "r29", "r30", "r31", "p0", "p1", "p2", "p3",
- "sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp",
+ // Scalar registers:
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+ "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
"r1:0", "r3:2", "r5:4", "r7:6", "r9:8", "r11:10", "r13:12", "r15:14",
"r17:16", "r19:18", "r21:20", "r23:22", "r25:24", "r27:26", "r29:28",
- "r31:30"
+ "r31:30",
+ // Predicate registers:
+ "p0", "p1", "p2", "p3",
+ // Control registers:
+ "c0", "c1", "c2", "c3", "c4", "c5", "c6", "c7", "c8", "c9", "c10", "c11",
+ "c12", "c13", "c14", "c15", "c16", "c17", "c18", "c19", "c20", "c21",
+ "c22", "c23", "c24", "c25", "c26", "c27", "c28", "c29", "c30", "c31",
+ "c1:0", "c3:2", "c5:4", "c7:6", "c9:8", "c11:10", "c13:12", "c15:14",
+ "c17:16", "c19:18", "c21:20", "c23:22", "c25:24", "c27:26", "c29:28",
+ "c31:30",
+ // Control register aliases:
+ "sa0", "lc0", "sa1", "lc1", "p3:0", "m0", "m1", "usr", "pc", "ugp",
+ "gp", "cs0", "cs1", "upcyclelo", "upcyclehi", "framelimit", "framekey",
+ "pktcountlo", "pktcounthi", "utimerlo", "utimerhi",
+ "upcycle", "pktcount", "utimer",
+ // HVX vector registers:
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11",
+ "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+ "v1:0", "v3:2", "v5:4", "v7:6", "v9:8", "v11:10", "v13:12", "v15:14",
+ "v17:16", "v19:18", "v21:20", "v23:22", "v25:24", "v27:26", "v29:28",
+ "v31:30",
+ "v3:0", "v7:4", "v11:8", "v15:12", "v19:16", "v23:20", "v27:24", "v31:28",
+ // HVX vector predicates:
+ "q0", "q1", "q2", "q3",
};
ArrayRef<const char *> HexagonTargetInfo::getGCCRegNames() const {
@@ -188,6 +214,7 @@ static constexpr CPUSuffix Suffixes[] = {
{{"hexagonv60"}, {"60"}}, {{"hexagonv62"}, {"62"}},
{{"hexagonv65"}, {"65"}}, {{"hexagonv66"}, {"66"}},
{{"hexagonv67"}, {"67"}}, {{"hexagonv67t"}, {"67t"}},
+ {{"hexagonv68"}, {"68"}},
};
const char *HexagonTargetInfo::getHexagonCPUSuffix(StringRef Name) {
diff --git a/clang/lib/Basic/Targets/Le64.cpp b/clang/lib/Basic/Targets/Le64.cpp
index cacd10dc8936..5c961ff81e05 100644
--- a/clang/lib/Basic/Targets/Le64.cpp
+++ b/clang/lib/Basic/Targets/Le64.cpp
@@ -19,15 +19,8 @@
using namespace clang;
using namespace clang::targets;
-const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
-#define BUILTIN(ID, TYPE, ATTRS) \
- {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
-#include "clang/Basic/BuiltinsLe64.def"
-};
-
ArrayRef<Builtin::Info> Le64TargetInfo::getTargetBuiltins() const {
- return llvm::makeArrayRef(BuiltinInfo, clang::Le64::LastTSBuiltin -
- Builtin::FirstTSBuiltin);
+ return {};
}
void Le64TargetInfo::getTargetDefines(const LangOptions &Opts,
diff --git a/clang/lib/Basic/Targets/Le64.h b/clang/lib/Basic/Targets/Le64.h
index 253d5681abc2..13a0b04d9f09 100644
--- a/clang/lib/Basic/Targets/Le64.h
+++ b/clang/lib/Basic/Targets/Le64.h
@@ -22,7 +22,6 @@ namespace clang {
namespace targets {
class LLVM_LIBRARY_VISIBILITY Le64TargetInfo : public TargetInfo {
- static const Builtin::Info BuiltinInfo[];
public:
Le64TargetInfo(const llvm::Triple &Triple, const TargetOptions &)
diff --git a/clang/lib/Basic/Targets/M68k.cpp b/clang/lib/Basic/Targets/M68k.cpp
new file mode 100644
index 000000000000..31cb36d37636
--- /dev/null
+++ b/clang/lib/Basic/Targets/M68k.cpp
@@ -0,0 +1,236 @@
+//===--- M68k.cpp - Implement M68k targets feature support-------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements M68k TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "M68k.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/TargetParser.h"
+#include <cstdint>
+#include <cstring>
+#include <limits>
+
+namespace clang {
+namespace targets {
+
+M68kTargetInfo::M68kTargetInfo(const llvm::Triple &Triple,
+ const TargetOptions &)
+ : TargetInfo(Triple) {
+
+ std::string Layout = "";
+
+ // M68k is Big Endian
+ Layout += "E";
+
+ // FIXME how to wire it with the used object format?
+ Layout += "-m:e";
+
+ // M68k pointers are always 32 bit wide even for 16 bit cpus
+ Layout += "-p:32:32";
+
+ // M68k integer data types
+ Layout += "-i8:8:8-i16:16:16-i32:16:32";
+
+ // FIXME no floats at the moment
+
+ // The registers can hold 8, 16, 32 bits
+ Layout += "-n8:16:32";
+
+ // 16 bit alignment for both stack and aggregate
+ // in order to conform to ABI used by GCC
+ Layout += "-a:0:16-S16";
+
+ resetDataLayout(Layout);
+
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+}
+
+bool M68kTargetInfo::setCPU(const std::string &Name) {
+ StringRef N = Name;
+ CPU = llvm::StringSwitch<CPUKind>(N)
+ .Case("generic", CK_68000)
+ .Case("M68000", CK_68000)
+ .Case("M68010", CK_68010)
+ .Case("M68020", CK_68020)
+ .Case("M68030", CK_68030)
+ .Case("M68040", CK_68040)
+ .Case("M68060", CK_68060)
+ .Default(CK_Unknown);
+ return CPU != CK_Unknown;
+}
+
+void M68kTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ using llvm::Twine;
+
+ Builder.defineMacro("__m68k__");
+
+ Builder.defineMacro("mc68000");
+ Builder.defineMacro("__mc68000");
+ Builder.defineMacro("__mc68000__");
+
+ // For sub-architecture
+ switch (CPU) {
+ case CK_68010:
+ Builder.defineMacro("mc68010");
+ Builder.defineMacro("__mc68010");
+ Builder.defineMacro("__mc68010__");
+ break;
+ case CK_68020:
+ Builder.defineMacro("mc68020");
+ Builder.defineMacro("__mc68020");
+ Builder.defineMacro("__mc68020__");
+ break;
+ case CK_68030:
+ Builder.defineMacro("mc68030");
+ Builder.defineMacro("__mc68030");
+ Builder.defineMacro("__mc68030__");
+ break;
+ case CK_68040:
+ Builder.defineMacro("mc68040");
+ Builder.defineMacro("__mc68040");
+ Builder.defineMacro("__mc68040__");
+ break;
+ case CK_68060:
+ Builder.defineMacro("mc68060");
+ Builder.defineMacro("__mc68060");
+ Builder.defineMacro("__mc68060__");
+ break;
+ default:
+ break;
+ }
+}
+
+ArrayRef<Builtin::Info> M68kTargetInfo::getTargetBuiltins() const {
+ // FIXME: Implement.
+ return None;
+}
+
+bool M68kTargetInfo::hasFeature(StringRef Feature) const {
+ // FIXME elaborate moar
+ return Feature == "M68000";
+}
+
+const char *const M68kTargetInfo::GCCRegNames[] = {
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+ "pc"};
+
+ArrayRef<const char *> M68kTargetInfo::getGCCRegNames() const {
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+ArrayRef<TargetInfo::GCCRegAlias> M68kTargetInfo::getGCCRegAliases() const {
+ // No aliases.
+ return None;
+}
+
+bool M68kTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &info) const {
+ switch (*Name) {
+ case 'a': // address register
+ case 'd': // data register
+ info.setAllowsRegister();
+ return true;
+ case 'I': // constant integer in the range [1,8]
+ info.setRequiresImmediate(1, 8);
+ return true;
+ case 'J': // constant signed 16-bit integer
+ info.setRequiresImmediate(std::numeric_limits<int16_t>::min(),
+ std::numeric_limits<int16_t>::max());
+ return true;
+ case 'K': // constant that is NOT in the range of [-0x80, 0x80)
+ info.setRequiresImmediate();
+ return true;
+ case 'L': // constant integer in the range [-8,-1]
+ info.setRequiresImmediate(-8, -1);
+ return true;
+ case 'M': // constant that is NOT in the range of [-0x100, 0x100]
+ info.setRequiresImmediate();
+ return true;
+ case 'N': // constant integer in the range [24,31]
+ info.setRequiresImmediate(24, 31);
+ return true;
+ case 'O': // constant integer 16
+ info.setRequiresImmediate(16);
+ return true;
+ case 'P': // constant integer in the range [8,15]
+ info.setRequiresImmediate(8, 15);
+ return true;
+ case 'C':
+ ++Name;
+ switch (*Name) {
+ case '0': // constant integer 0
+ info.setRequiresImmediate(0);
+ return true;
+ case 'i': // constant integer
+ case 'j': // integer constant that doesn't fit in 16 bits
+ info.setRequiresImmediate();
+ return true;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return false;
+}
+
+llvm::Optional<std::string>
+M68kTargetInfo::handleAsmEscapedChar(char EscChar) const {
+ char C;
+ switch (EscChar) {
+ case '.':
+ case '#':
+ C = EscChar;
+ break;
+ case '/':
+ C = '%';
+ break;
+ case '$':
+ C = 's';
+ break;
+ case '&':
+ C = 'd';
+ break;
+ default:
+ return llvm::None;
+ }
+
+ return std::string(1, C);
+}
+
+std::string M68kTargetInfo::convertConstraint(const char *&Constraint) const {
+ if (*Constraint == 'C')
+ // Two-character constraint; add "^" hint for later parsing
+ return std::string("^") + std::string(Constraint++, 2);
+
+ return std::string(1, *Constraint);
+}
+
+const char *M68kTargetInfo::getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+}
+
+TargetInfo::BuiltinVaListKind M68kTargetInfo::getBuiltinVaListKind() const {
+ return TargetInfo::VoidPtrBuiltinVaList;
+}
+
+} // namespace targets
+} // namespace clang
diff --git a/clang/lib/Basic/Targets/M68k.h b/clang/lib/Basic/Targets/M68k.h
new file mode 100644
index 000000000000..a42ca674ef9c
--- /dev/null
+++ b/clang/lib/Basic/Targets/M68k.h
@@ -0,0 +1,59 @@
+//===--- M68k.h - Declare M68k target feature support -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares M68k TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_M68K_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_M68K_H
+
+#include "OSTargets.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY M68kTargetInfo : public TargetInfo {
+ static const char *const GCCRegNames[];
+
+ enum CPUKind {
+ CK_Unknown,
+ CK_68000,
+ CK_68010,
+ CK_68020,
+ CK_68030,
+ CK_68040,
+ CK_68060
+ } CPU = CK_Unknown;
+
+public:
+ M68kTargetInfo(const llvm::Triple &Triple, const TargetOptions &);
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+ bool hasFeature(StringRef Feature) const override;
+ ArrayRef<const char *> getGCCRegNames() const override;
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override;
+ std::string convertConstraint(const char *&Constraint) const override;
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override;
+ llvm::Optional<std::string> handleAsmEscapedChar(char EscChar) const override;
+ const char *getClobbers() const override;
+ BuiltinVaListKind getBuiltinVaListKind() const override;
+ bool setCPU(const std::string &Name) override;
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif
diff --git a/clang/lib/Basic/Targets/NVPTX.cpp b/clang/lib/Basic/Targets/NVPTX.cpp
index b7f0dce33d2b..56f8a179db3c 100644
--- a/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/clang/lib/Basic/Targets/NVPTX.cpp
@@ -45,6 +45,8 @@ NVPTXTargetInfo::NVPTXTargetInfo(const llvm::Triple &Triple,
if (!Feature.startswith("+ptx"))
continue;
PTXVersion = llvm::StringSwitch<unsigned>(Feature)
+ .Case("+ptx72", 72)
+ .Case("+ptx71", 71)
.Case("+ptx70", 70)
.Case("+ptx65", 65)
.Case("+ptx64", 64)
@@ -200,14 +202,18 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX906:
case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX90a:
case CudaArch::GFX90c:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1013:
case CudaArch::GFX1030:
case CudaArch::GFX1031:
case CudaArch::GFX1032:
case CudaArch::GFX1033:
+ case CudaArch::GFX1034:
+ case CudaArch::GFX1035:
case CudaArch::LAST:
break;
case CudaArch::UNUSED:
@@ -246,6 +252,8 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
return "750";
case CudaArch::SM_80:
return "800";
+ case CudaArch::SM_86:
+ return "860";
}
llvm_unreachable("unhandled CudaArch");
}();
diff --git a/clang/lib/Basic/Targets/NVPTX.h b/clang/lib/Basic/Targets/NVPTX.h
index 038dec4a28bd..c7db3cdaaf10 100644
--- a/clang/lib/Basic/Targets/NVPTX.h
+++ b/clang/lib/Basic/Targets/NVPTX.h
@@ -35,6 +35,11 @@ static const unsigned NVPTXAddrSpaceMap[] = {
1, // cuda_device
4, // cuda_constant
3, // cuda_shared
+ 1, // sycl_global
+ 1, // sycl_global_device
+ 1, // sycl_global_host
+ 3, // sycl_local
+ 0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
0 // ptr64
@@ -130,8 +135,11 @@ public:
Opts["cl_clang_storage_class_specifiers"] = true;
Opts["__cl_clang_function_pointers"] = true;
Opts["__cl_clang_variadic_functions"] = true;
+ Opts["__cl_clang_non_portable_kernel_param_types"] = true;
+ Opts["__cl_clang_bitfields"] = true;
Opts["cl_khr_fp64"] = true;
+ Opts["__opencl_c_fp64"] = true;
Opts["cl_khr_byte_addressable_store"] = true;
Opts["cl_khr_global_int32_base_atomics"] = true;
Opts["cl_khr_global_int32_extended_atomics"] = true;
diff --git a/clang/lib/Basic/Targets/OSTargets.cpp b/clang/lib/Basic/Targets/OSTargets.cpp
index 15e475a31d64..7cd4a5190120 100644
--- a/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/clang/lib/Basic/Targets/OSTargets.cpp
@@ -55,6 +55,8 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
} else {
Triple.getOSVersion(Maj, Min, Rev);
PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
+ if (PlatformName == "ios" && Triple.isMacCatalystEnvironment())
+ PlatformName = "maccatalyst";
}
// If -target arch-pc-win32-macho option specified, we're
diff --git a/clang/lib/Basic/Targets/OSTargets.h b/clang/lib/Basic/Targets/OSTargets.h
index 67fa1a537fea..e24fb5cf082d 100644
--- a/clang/lib/Basic/Targets/OSTargets.h
+++ b/clang/lib/Basic/Targets/OSTargets.h
@@ -13,7 +13,6 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_OSTARGETS_H
#include "Targets.h"
-#include "llvm/MC/MCSectionMachO.h"
namespace clang {
namespace targets {
@@ -114,15 +113,6 @@ public:
this->MCountName = "\01mcount";
}
- std::string isValidSectionSpecifier(StringRef SR) const override {
- // Let MCSectionMachO validate this.
- StringRef Segment, Section;
- unsigned TAA, StubSize;
- bool HasTAA;
- return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
- TAA, HasTAA, StubSize);
- }
-
const char *getStaticInitSectionSpecifier() const override {
// FIXME: We should return 0 when building kexts.
return "__TEXT,__StaticInit,regular,pure_instructions";
@@ -261,6 +251,9 @@ public:
case llvm::Triple::arm:
this->MCountName = "__mcount";
break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ break;
}
}
};
@@ -491,6 +484,9 @@ public:
case llvm::Triple::sparcv9:
this->MCountName = "_mcount";
break;
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ break;
}
}
};
@@ -679,6 +675,12 @@ protected:
Builder.defineMacro("_POWER");
Builder.defineMacro("_AIX");
+ Builder.defineMacro("__TOS_AIX__");
+
+ if (Opts.C11) {
+ Builder.defineMacro("__STDC_NO_ATOMICS__");
+ Builder.defineMacro("__STDC_NO_THREADS__");
+ }
if (Opts.EnableAIXExtendedAltivecABI)
Builder.defineMacro("__EXTABI__");
@@ -699,6 +701,7 @@ protected:
if (OsVersion >= std::make_pair(6, 1)) Builder.defineMacro("_AIX61");
if (OsVersion >= std::make_pair(7, 1)) Builder.defineMacro("_AIX71");
if (OsVersion >= std::make_pair(7, 2)) Builder.defineMacro("_AIX72");
+ if (OsVersion >= std::make_pair(7, 3)) Builder.defineMacro("_AIX73");
// FIXME: Do not define _LONG_LONG when -fno-long-long is specified.
Builder.defineMacro("_LONG_LONG");
@@ -788,10 +791,11 @@ public:
ZOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedInt;
+ this->MaxAlignedAttribute = 128;
this->UseBitFieldTypeAlignment = false;
this->UseZeroLengthBitfieldAlignment = true;
+ this->UseLeadingZeroLengthBitfield = false;
this->ZeroLengthBitfieldBoundary = 32;
- this->MinGlobalAlign = 0;
this->DefaultAlignForAttributeAligned = 128;
}
};
@@ -940,11 +944,21 @@ class LLVM_LIBRARY_VISIBILITY EmscriptenTargetInfo
MacroBuilder &Builder) const final {
WebAssemblyOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
Builder.defineMacro("__EMSCRIPTEN__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("__EMSCRIPTEN_PTHREADS__");
}
public:
- explicit EmscriptenTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
- : WebAssemblyOSTargetInfo<Target>(Triple, Opts) {}
+ explicit EmscriptenTargetInfo(const llvm::Triple &Triple,
+ const TargetOptions &Opts)
+ : WebAssemblyOSTargetInfo<Target>(Triple, Opts) {
+ // Keeping the alignment of long double to 8 bytes even though its size is
+ // 16 bytes allows emscripten to have an 8-byte-aligned max_align_t which
+ // in turn gives is a 8-byte aligned malloc.
+ // Emscripten's ABI is unstable and we may change this back to 128 to match
+ // the WebAssembly default in the future.
+ this->LongDoubleAlign = 64;
+ }
};
} // namespace targets
diff --git a/clang/lib/Basic/Targets/PPC.cpp b/clang/lib/Basic/Targets/PPC.cpp
index cfede6e6e756..59656888e25f 100644
--- a/clang/lib/Basic/Targets/PPC.cpp
+++ b/clang/lib/Basic/Targets/PPC.cpp
@@ -56,7 +56,10 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasP10Vector = true;
} else if (Feature == "+pcrelative-memops") {
HasPCRelativeMemops = true;
+ } else if (Feature == "+prefix-instrs") {
+ HasPrefixInstrs = true;
} else if (Feature == "+spe" || Feature == "+efpu2") {
+ HasStrictFP = false;
HasSPE = true;
LongDoubleWidth = LongDoubleAlign = 64;
LongDoubleFormat = &llvm::APFloat::IEEEdouble();
@@ -66,6 +69,16 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
PairedVectorMemops = true;
} else if (Feature == "+mma") {
HasMMA = true;
+ } else if (Feature == "+rop-protect") {
+ HasROPProtect = true;
+ } else if (Feature == "+privileged") {
+ HasPrivileged = true;
+ } else if (Feature == "+isa-v207-instructions") {
+ IsISA2_07 = true;
+ } else if (Feature == "+isa-v30-instructions") {
+ IsISA3_0 = true;
+ } else if (Feature == "+isa-v31-instructions") {
+ IsISA3_1 = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -74,10 +87,164 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
return true;
}
+static void defineXLCompatMacros(MacroBuilder &Builder) {
+ Builder.defineMacro("__popcntb", "__builtin_ppc_popcntb");
+ Builder.defineMacro("__poppar4", "__builtin_ppc_poppar4");
+ Builder.defineMacro("__poppar8", "__builtin_ppc_poppar8");
+ Builder.defineMacro("__eieio", "__builtin_ppc_eieio");
+ Builder.defineMacro("__iospace_eieio", "__builtin_ppc_iospace_eieio");
+ Builder.defineMacro("__isync", "__builtin_ppc_isync");
+ Builder.defineMacro("__lwsync", "__builtin_ppc_lwsync");
+ Builder.defineMacro("__iospace_lwsync", "__builtin_ppc_iospace_lwsync");
+ Builder.defineMacro("__sync", "__builtin_ppc_sync");
+ Builder.defineMacro("__iospace_sync", "__builtin_ppc_iospace_sync");
+ Builder.defineMacro("__dcbfl", "__builtin_ppc_dcbfl");
+ Builder.defineMacro("__dcbflp", "__builtin_ppc_dcbflp");
+ Builder.defineMacro("__dcbst", "__builtin_ppc_dcbst");
+ Builder.defineMacro("__dcbt", "__builtin_ppc_dcbt");
+ Builder.defineMacro("__dcbtst", "__builtin_ppc_dcbtst");
+ Builder.defineMacro("__dcbz", "__builtin_ppc_dcbz");
+ Builder.defineMacro("__icbt", "__builtin_ppc_icbt");
+ Builder.defineMacro("__compare_and_swap", "__builtin_ppc_compare_and_swap");
+ Builder.defineMacro("__compare_and_swaplp",
+ "__builtin_ppc_compare_and_swaplp");
+ Builder.defineMacro("__fetch_and_add", "__builtin_ppc_fetch_and_add");
+ Builder.defineMacro("__fetch_and_addlp", "__builtin_ppc_fetch_and_addlp");
+ Builder.defineMacro("__fetch_and_and", "__builtin_ppc_fetch_and_and");
+ Builder.defineMacro("__fetch_and_andlp", "__builtin_ppc_fetch_and_andlp");
+ Builder.defineMacro("__fetch_and_or", "__builtin_ppc_fetch_and_or");
+ Builder.defineMacro("__fetch_and_orlp", "__builtin_ppc_fetch_and_orlp");
+ Builder.defineMacro("__fetch_and_swap", "__builtin_ppc_fetch_and_swap");
+ Builder.defineMacro("__fetch_and_swaplp", "__builtin_ppc_fetch_and_swaplp");
+ Builder.defineMacro("__ldarx", "__builtin_ppc_ldarx");
+ Builder.defineMacro("__lwarx", "__builtin_ppc_lwarx");
+ Builder.defineMacro("__lharx", "__builtin_ppc_lharx");
+ Builder.defineMacro("__lbarx", "__builtin_ppc_lbarx");
+ Builder.defineMacro("__stfiw", "__builtin_ppc_stfiw");
+ Builder.defineMacro("__stdcx", "__builtin_ppc_stdcx");
+ Builder.defineMacro("__stwcx", "__builtin_ppc_stwcx");
+ Builder.defineMacro("__sthcx", "__builtin_ppc_sthcx");
+ Builder.defineMacro("__stbcx", "__builtin_ppc_stbcx");
+ Builder.defineMacro("__tdw", "__builtin_ppc_tdw");
+ Builder.defineMacro("__tw", "__builtin_ppc_tw");
+ Builder.defineMacro("__trap", "__builtin_ppc_trap");
+ Builder.defineMacro("__trapd", "__builtin_ppc_trapd");
+ Builder.defineMacro("__fcfid", "__builtin_ppc_fcfid");
+ Builder.defineMacro("__fcfud", "__builtin_ppc_fcfud");
+ Builder.defineMacro("__fctid", "__builtin_ppc_fctid");
+ Builder.defineMacro("__fctidz", "__builtin_ppc_fctidz");
+ Builder.defineMacro("__fctiw", "__builtin_ppc_fctiw");
+ Builder.defineMacro("__fctiwz", "__builtin_ppc_fctiwz");
+ Builder.defineMacro("__fctudz", "__builtin_ppc_fctudz");
+ Builder.defineMacro("__fctuwz", "__builtin_ppc_fctuwz");
+ Builder.defineMacro("__cmpeqb", "__builtin_ppc_cmpeqb");
+ Builder.defineMacro("__cmprb", "__builtin_ppc_cmprb");
+ Builder.defineMacro("__setb", "__builtin_ppc_setb");
+ Builder.defineMacro("__cmpb", "__builtin_ppc_cmpb");
+ Builder.defineMacro("__mulhd", "__builtin_ppc_mulhd");
+ Builder.defineMacro("__mulhdu", "__builtin_ppc_mulhdu");
+ Builder.defineMacro("__mulhw", "__builtin_ppc_mulhw");
+ Builder.defineMacro("__mulhwu", "__builtin_ppc_mulhwu");
+ Builder.defineMacro("__maddhd", "__builtin_ppc_maddhd");
+ Builder.defineMacro("__maddhdu", "__builtin_ppc_maddhdu");
+ Builder.defineMacro("__maddld", "__builtin_ppc_maddld");
+ Builder.defineMacro("__rlwnm", "__builtin_ppc_rlwnm");
+ Builder.defineMacro("__rlwimi", "__builtin_ppc_rlwimi");
+ Builder.defineMacro("__rldimi", "__builtin_ppc_rldimi");
+ Builder.defineMacro("__load2r", "__builtin_ppc_load2r");
+ Builder.defineMacro("__load4r", "__builtin_ppc_load4r");
+ Builder.defineMacro("__load8r", "__builtin_ppc_load8r");
+ Builder.defineMacro("__store2r", "__builtin_ppc_store2r");
+ Builder.defineMacro("__store4r", "__builtin_ppc_store4r");
+ Builder.defineMacro("__store8r", "__builtin_ppc_store8r");
+ Builder.defineMacro("__extract_exp", "__builtin_ppc_extract_exp");
+ Builder.defineMacro("__extract_sig", "__builtin_ppc_extract_sig");
+ Builder.defineMacro("__mtfsb0", "__builtin_ppc_mtfsb0");
+ Builder.defineMacro("__mtfsb1", "__builtin_ppc_mtfsb1");
+ Builder.defineMacro("__mtfsf", "__builtin_ppc_mtfsf");
+ Builder.defineMacro("__mtfsfi", "__builtin_ppc_mtfsfi");
+ Builder.defineMacro("__insert_exp", "__builtin_ppc_insert_exp");
+ Builder.defineMacro("__fmsub", "__builtin_ppc_fmsub");
+ Builder.defineMacro("__fmsubs", "__builtin_ppc_fmsubs");
+ Builder.defineMacro("__fnmadd", "__builtin_ppc_fnmadd");
+ Builder.defineMacro("__fnmadds", "__builtin_ppc_fnmadds");
+ Builder.defineMacro("__fnmsub", "__builtin_ppc_fnmsub");
+ Builder.defineMacro("__fnmsubs", "__builtin_ppc_fnmsubs");
+ Builder.defineMacro("__fre", "__builtin_ppc_fre");
+ Builder.defineMacro("__fres", "__builtin_ppc_fres");
+ Builder.defineMacro("__swdiv_nochk", "__builtin_ppc_swdiv_nochk");
+ Builder.defineMacro("__swdivs_nochk", "__builtin_ppc_swdivs_nochk");
+ Builder.defineMacro("__alloca", "__builtin_alloca");
+ Builder.defineMacro("__vcipher", "__builtin_altivec_crypto_vcipher");
+ Builder.defineMacro("__vcipherlast", "__builtin_altivec_crypto_vcipherlast");
+ Builder.defineMacro("__vncipher", "__builtin_altivec_crypto_vncipher");
+ Builder.defineMacro("__vncipherlast",
+ "__builtin_altivec_crypto_vncipherlast");
+ Builder.defineMacro("__vpermxor", "__builtin_altivec_crypto_vpermxor");
+ Builder.defineMacro("__vpmsumb", "__builtin_altivec_crypto_vpmsumb");
+ Builder.defineMacro("__vpmsumd", "__builtin_altivec_crypto_vpmsumd");
+ Builder.defineMacro("__vpmsumh", "__builtin_altivec_crypto_vpmsumh");
+ Builder.defineMacro("__vpmsumw", "__builtin_altivec_crypto_vpmsumw");
+ Builder.defineMacro("__divde", "__builtin_divde");
+ Builder.defineMacro("__divwe", "__builtin_divwe");
+ Builder.defineMacro("__divdeu", "__builtin_divdeu");
+ Builder.defineMacro("__divweu", "__builtin_divweu");
+ Builder.defineMacro("__alignx", "__builtin_ppc_alignx");
+ Builder.defineMacro("__bcopy", "bcopy");
+ Builder.defineMacro("__bpermd", "__builtin_bpermd");
+ Builder.defineMacro("__cntlz4", "__builtin_clz");
+ Builder.defineMacro("__cntlz8", "__builtin_clzll");
+ Builder.defineMacro("__cmplx", "__builtin_complex");
+ Builder.defineMacro("__cmplxf", "__builtin_complex");
+ Builder.defineMacro("__cnttz4", "__builtin_ctz");
+ Builder.defineMacro("__cnttz8", "__builtin_ctzll");
+ Builder.defineMacro("__darn", "__builtin_darn");
+ Builder.defineMacro("__darn_32", "__builtin_darn_32");
+ Builder.defineMacro("__darn_raw", "__builtin_darn_raw");
+ Builder.defineMacro("__dcbf", "__builtin_dcbf");
+ Builder.defineMacro("__fmadd", "__builtin_fma");
+ Builder.defineMacro("__fmadds", "__builtin_fmaf");
+ Builder.defineMacro("__labs", "__builtin_labs");
+ Builder.defineMacro("__llabs", "__builtin_llabs");
+ Builder.defineMacro("__popcnt4", "__builtin_popcount");
+ Builder.defineMacro("__popcnt8", "__builtin_popcountll");
+ Builder.defineMacro("__readflm", "__builtin_readflm");
+ Builder.defineMacro("__rotatel4", "__builtin_rotateleft32");
+ Builder.defineMacro("__rotatel8", "__builtin_rotateleft64");
+ Builder.defineMacro("__rdlam", "__builtin_ppc_rdlam");
+ Builder.defineMacro("__setflm", "__builtin_setflm");
+ Builder.defineMacro("__setrnd", "__builtin_setrnd");
+ Builder.defineMacro("__dcbtstt", "__builtin_ppc_dcbtstt");
+ Builder.defineMacro("__dcbtt", "__builtin_ppc_dcbtt");
+ Builder.defineMacro("__mftbu", "__builtin_ppc_mftbu");
+ Builder.defineMacro("__mfmsr", "__builtin_ppc_mfmsr");
+ Builder.defineMacro("__mtmsr", "__builtin_ppc_mtmsr");
+ Builder.defineMacro("__mfspr", "__builtin_ppc_mfspr");
+ Builder.defineMacro("__mtspr", "__builtin_ppc_mtspr");
+ Builder.defineMacro("__fric", "__builtin_ppc_fric");
+ Builder.defineMacro("__frim", "__builtin_ppc_frim");
+ Builder.defineMacro("__frims", "__builtin_ppc_frims");
+ Builder.defineMacro("__frin", "__builtin_ppc_frin");
+ Builder.defineMacro("__frins", "__builtin_ppc_frins");
+ Builder.defineMacro("__frip", "__builtin_ppc_frip");
+ Builder.defineMacro("__frips", "__builtin_ppc_frips");
+ Builder.defineMacro("__friz", "__builtin_ppc_friz");
+ Builder.defineMacro("__frizs", "__builtin_ppc_frizs");
+ Builder.defineMacro("__fsel", "__builtin_ppc_fsel");
+ Builder.defineMacro("__fsels", "__builtin_ppc_fsels");
+ Builder.defineMacro("__frsqrte", "__builtin_ppc_frsqrte");
+ Builder.defineMacro("__frsqrtes", "__builtin_ppc_frsqrtes");
+ Builder.defineMacro("__fsqrt", "__builtin_ppc_fsqrt");
+ Builder.defineMacro("__fsqrts", "__builtin_ppc_fsqrts");
+}
+
/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
/// #defines that are not tied to a specific subtarget.
void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+
+ defineXLCompatMacros(Builder);
+
// Target identification.
Builder.defineMacro("__ppc__");
Builder.defineMacro("__PPC__");
@@ -129,6 +296,11 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__LONG_DOUBLE_IBM128__");
}
+ if (getTriple().isOSAIX() && Opts.LongDoubleSize == 64) {
+ assert(LongDoubleWidth == 64);
+ Builder.defineMacro("__LONGDOUBLE64");
+ }
+
// Define this for elfv2 (64-bit only) or 64-bit darwin.
if (ABI == "elfv2" ||
(getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
@@ -193,8 +365,14 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__POWER9_VECTOR__");
if (HasMMA)
Builder.defineMacro("__MMA__");
+ if (HasROPProtect)
+ Builder.defineMacro("__ROP_PROTECT__");
+ if (HasPrivileged)
+ Builder.defineMacro("__PRIVILEGED__");
if (HasP10Vector)
Builder.defineMacro("__POWER10_VECTOR__");
+ if (HasPCRelativeMemops)
+ Builder.defineMacro("__PCREL__");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
@@ -318,15 +496,26 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr9", true)
.Case("pwr8", true)
.Default(false);
- Features["float128"] = llvm::StringSwitch<bool>(CPU)
- .Case("pwr9", true)
- .Default(false);
+
+ // ROP Protect is off by default.
+ Features["rop-protect"] = false;
+ // Privileged instructions are off by default.
+ Features["privileged"] = false;
Features["spe"] = llvm::StringSwitch<bool>(CPU)
.Case("8548", true)
.Case("e500", true)
.Default(false);
+ Features["isa-v207-instructions"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
+
+ Features["isa-v30-instructions"] =
+ llvm::StringSwitch<bool>(CPU).Case("pwr9", true).Default(false);
+
// Power10 includes all the same features as Power9 plus any features specific
// to the Power10 core.
if (CPU == "pwr10" || CPU == "power10") {
@@ -358,6 +547,19 @@ bool PPCTargetInfo::initFeatureMap(
return false;
}
+ if (!(ArchDefs & ArchDefinePwr8) &&
+ llvm::find(FeaturesVec, "+rop-protect") != FeaturesVec.end()) {
+ // We can turn on ROP Protect on Power 8 and above.
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mrop-protect" << CPU;
+ return false;
+ }
+
+ if (!(ArchDefs & ArchDefinePwr8) &&
+ llvm::find(FeaturesVec, "+privileged") != FeaturesVec.end()) {
+ Diags.Report(diag::err_opt_not_valid_with_opt) << "-mprivileged" << CPU;
+ return false;
+ }
+
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -369,6 +571,8 @@ void PPCTargetInfo::addP10SpecificFeatures(
Features["mma"] = true;
Features["power10-vector"] = true;
Features["pcrelative-memops"] = true;
+ Features["prefix-instrs"] = true;
+ Features["isa-v31-instructions"] = true;
return;
}
@@ -394,8 +598,14 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("paired-vector-memops", PairedVectorMemops)
.Case("power10-vector", HasP10Vector)
.Case("pcrelative-memops", HasPCRelativeMemops)
+ .Case("prefix-instrs", HasPrefixInstrs)
.Case("spe", HasSPE)
.Case("mma", HasMMA)
+ .Case("rop-protect", HasROPProtect)
+ .Case("privileged", HasPrivileged)
+ .Case("isa-v207-instructions", IsISA2_07)
+ .Case("isa-v30-instructions", IsISA3_0)
+ .Case("isa-v31-instructions", IsISA3_1)
.Default(false);
}
@@ -424,6 +634,8 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["power8-vector"] = Features["power9-vector"] = true;
if (Name == "pcrel")
Features["pcrelative-memops"] = true;
+ else if (Name == "prefixed")
+ Features["prefix-instrs"] = true;
else
Features[Name] = true;
} else {
@@ -444,6 +656,8 @@ void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
Features["power10-vector"] = false;
if (Name == "pcrel")
Features["pcrelative-memops"] = false;
+ else if (Name == "prefixed")
+ Features["prefix-instrs"] = false;
else
Features[Name] = false;
}
@@ -526,17 +740,17 @@ ArrayRef<TargetInfo::AddlRegName> PPCTargetInfo::getGCCAddlRegNames() const {
}
static constexpr llvm::StringLiteral ValidCPUNames[] = {
- {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
- {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
- {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
- {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
- {"g5"}, {"a2"}, {"e500"}, {"e500mc"}, {"e5500"},
- {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"},
- {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"},
- {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"},
- {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"}, {"pwr10"},
- {"powerpc"}, {"ppc"}, {"powerpc64"}, {"ppc64"}, {"powerpc64le"},
- {"ppc64le"}, {"future"}};
+ {"generic"}, {"440"}, {"450"}, {"601"}, {"602"},
+ {"603"}, {"603e"}, {"603ev"}, {"604"}, {"604e"},
+ {"620"}, {"630"}, {"g3"}, {"7400"}, {"g4"},
+ {"7450"}, {"g4+"}, {"750"}, {"8548"}, {"970"},
+ {"g5"}, {"a2"}, {"e500"}, {"e500mc"}, {"e5500"},
+ {"power3"}, {"pwr3"}, {"power4"}, {"pwr4"}, {"power5"},
+ {"pwr5"}, {"power5x"}, {"pwr5x"}, {"power6"}, {"pwr6"},
+ {"power6x"}, {"pwr6x"}, {"power7"}, {"pwr7"}, {"power8"},
+ {"pwr8"}, {"power9"}, {"pwr9"}, {"power10"}, {"pwr10"},
+ {"powerpc"}, {"ppc"}, {"ppc32"}, {"powerpc64"}, {"ppc64"},
+ {"powerpc64le"}, {"ppc64le"}, {"future"}};
bool PPCTargetInfo::isValidCPUName(StringRef Name) const {
return llvm::find(ValidCPUNames, Name) != std::end(ValidCPUNames);
@@ -546,14 +760,15 @@ void PPCTargetInfo::fillValidCPUList(SmallVectorImpl<StringRef> &Values) const {
Values.append(std::begin(ValidCPUNames), std::end(ValidCPUNames));
}
-void PPCTargetInfo::adjust(LangOptions &Opts) {
+void PPCTargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
if (HasAltivec)
Opts.AltiVec = 1;
- TargetInfo::adjust(Opts);
+ TargetInfo::adjust(Diags, Opts);
if (LongDoubleFormat != &llvm::APFloat::IEEEdouble())
LongDoubleFormat = Opts.PPCIEEELongDouble
? &llvm::APFloat::IEEEquad()
: &llvm::APFloat::PPCDoubleDouble();
+ Opts.IEEE128 = 1;
}
ArrayRef<Builtin::Info> PPCTargetInfo::getTargetBuiltins() const {
diff --git a/clang/lib/Basic/Targets/PPC.h b/clang/lib/Basic/Targets/PPC.h
index 56c8f33ef221..7c14a4eb9410 100644
--- a/clang/lib/Basic/Targets/PPC.h
+++ b/clang/lib/Basic/Targets/PPC.h
@@ -59,6 +59,8 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
// Target cpu features.
bool HasAltivec = false;
bool HasMMA = false;
+ bool HasROPProtect = false;
+ bool HasPrivileged = false;
bool HasVSX = false;
bool HasP8Vector = false;
bool HasP8Crypto = false;
@@ -71,6 +73,10 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool PairedVectorMemops = false;
bool HasP10Vector = false;
bool HasPCRelativeMemops = false;
+ bool HasPrefixInstrs = false;
+ bool IsISA2_07 = false;
+ bool IsISA3_0 = false;
+ bool IsISA3_1 = false;
protected:
std::string ABI;
@@ -86,7 +92,7 @@ public:
}
// Set the language option for altivec based on our value.
- void adjust(LangOptions &Opts) override;
+ void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override;
// Note: GCC recognizes the following additional cpus:
// 401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801,
@@ -427,7 +433,7 @@ public:
}
if (Triple.isOSAIX() || Triple.isOSLinux())
- DataLayout += "-v256:256:256-v512:512:512";
+ DataLayout += "-S128-v256:256:256-v512:512:512";
resetDataLayout(DataLayout);
// PPC64 supports atomics up to 8 bytes.
@@ -451,6 +457,8 @@ public:
switch (CC) {
case CC_Swift:
return CCCR_OK;
+ case CC_SwiftAsync:
+ return CCCR_Error;
default:
return CCCR_Warning;
}
@@ -466,7 +474,7 @@ public:
BoolWidth = BoolAlign = 32; // XXX support -mone-byte-bool?
PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
LongLongAlign = 32;
- resetDataLayout("E-m:o-p:32:32-f64:32:64-n32");
+ resetDataLayout("E-m:o-p:32:32-f64:32:64-n32", "_");
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -480,7 +488,7 @@ public:
DarwinPPC64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: DarwinTargetInfo<PPC64TargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
- resetDataLayout("E-m:o-i64:64-n32:64");
+ resetDataLayout("E-m:o-i64:64-n32:64", "_");
}
};
diff --git a/clang/lib/Basic/Targets/RISCV.cpp b/clang/lib/Basic/Targets/RISCV.cpp
index 0bf02e605740..9705129b39d8 100644
--- a/clang/lib/Basic/Targets/RISCV.cpp
+++ b/clang/lib/Basic/Targets/RISCV.cpp
@@ -12,6 +12,7 @@
#include "RISCV.h"
#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/TargetParser.h"
@@ -30,7 +31,13 @@ ArrayRef<const char *> RISCVTargetInfo::getGCCRegNames() const {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
- "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+
+ // Vector registers
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
return llvm::makeArrayRef(GCCRegNames);
}
@@ -80,7 +87,32 @@ bool RISCVTargetInfo::validateAsmConstraint(
// An address that is held in a general-purpose register.
Info.setAllowsMemory();
return true;
+ case 'S': // A symbolic address
+ Info.setAllowsRegister();
+ return true;
+ case 'v':
+ // A vector register.
+ if (Name[1] == 'r' || Name[1] == 'm') {
+ Info.setAllowsRegister();
+ Name += 1;
+ return true;
+ }
+ return false;
+ }
+}
+
+std::string RISCVTargetInfo::convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'v':
+ R = std::string("v");
+ Constraint += 1;
+ break;
+ default:
+ R = TargetInfo::convertConstraint(Constraint);
+ break;
}
+ return R;
}
void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
@@ -150,7 +182,7 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
}
if (HasV) {
- Builder.defineMacro("__riscv_v", "1000000");
+ Builder.defineMacro("__riscv_v", "10000");
Builder.defineMacro("__riscv_vector");
}
@@ -191,10 +223,33 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__riscv_zfh", "1000");
if (HasZvamo)
- Builder.defineMacro("__riscv_zvamo", "1000000");
+ Builder.defineMacro("__riscv_zvamo", "10000");
if (HasZvlsseg)
- Builder.defineMacro("__riscv_zvlsseg", "1000000");
+ Builder.defineMacro("__riscv_zvlsseg", "10000");
+}
+
+const Builtin::Info RISCVTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#define TARGET_BUILTIN(ID, TYPE, ATTRS, FEATURE) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, FEATURE},
+#include "clang/Basic/BuiltinsRISCV.def"
+};
+
+ArrayRef<Builtin::Info> RISCVTargetInfo::getTargetBuiltins() const {
+ return llvm::makeArrayRef(BuiltinInfo, clang::RISCV::LastTSBuiltin -
+ Builtin::FirstTSBuiltin);
+}
+
+bool RISCVTargetInfo::initFeatureMap(
+ llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const {
+
+ if (getTriple().getArch() == llvm::Triple::riscv64)
+ Features["64bit"] = true;
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
@@ -204,6 +259,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
.Case("riscv", true)
.Case("riscv32", !Is64Bit)
.Case("riscv64", Is64Bit)
+ .Case("64bit", Is64Bit)
.Case("m", HasM)
.Case("a", HasA)
.Case("f", HasF)
diff --git a/clang/lib/Basic/Targets/RISCV.h b/clang/lib/Basic/Targets/RISCV.h
index f1e9215b2d17..7e0846581ca1 100644
--- a/clang/lib/Basic/Targets/RISCV.h
+++ b/clang/lib/Basic/Targets/RISCV.h
@@ -47,6 +47,8 @@ protected:
bool HasZvamo = false;
bool HasZvlsseg = false;
+ static const Builtin::Info BuiltinInfo[];
+
public:
RISCVTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
@@ -56,6 +58,9 @@ public:
SuitableAlign = 128;
WCharType = SignedInt;
WIntType = UnsignedInt;
+ HasRISCVVTypes = true;
+ MCountName = "_mcount";
+ HasFloat16 = true;
}
bool setCPU(const std::string &Name) override {
@@ -69,7 +74,7 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override;
- ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
BuiltinVaListKind getBuiltinVaListKind() const override {
return TargetInfo::VoidPtrBuiltinVaList;
@@ -93,6 +98,13 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override;
+ std::string convertConstraint(const char *&Constraint) const override;
+
+ bool
+ initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,
+ StringRef CPU,
+ const std::vector<std::string> &FeaturesVec) const override;
+
bool hasFeature(StringRef Feature) const override;
bool handleTargetFeatures(std::vector<std::string> &Features,
diff --git a/clang/lib/Basic/Targets/SPIR.h b/clang/lib/Basic/Targets/SPIR.h
index a2e812624d37..50f34abd6630 100644
--- a/clang/lib/Basic/Targets/SPIR.h
+++ b/clang/lib/Basic/Targets/SPIR.h
@@ -21,7 +21,7 @@
namespace clang {
namespace targets {
-static const unsigned SPIRAddrSpaceMap[] = {
+static const unsigned SPIRDefIsPrivMap[] = {
0, // Default
1, // opencl_global
3, // opencl_local
@@ -33,6 +33,35 @@ static const unsigned SPIRAddrSpaceMap[] = {
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
+ // SYCL address space values for this map are dummy
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
+ 0, // ptr32_sptr
+ 0, // ptr32_uptr
+ 0 // ptr64
+};
+
+static const unsigned SPIRDefIsGenMap[] = {
+ 4, // Default
+ // OpenCL address space values for this map are dummy and they can't be used
+ 0, // opencl_global
+ 0, // opencl_local
+ 0, // opencl_constant
+ 0, // opencl_private
+ 0, // opencl_generic
+ 0, // opencl_global_device
+ 0, // opencl_global_host
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0, // cuda_shared
+ 1, // sycl_global
+ 5, // sycl_global_device
+ 6, // sycl_global_host
+ 3, // sycl_local
+ 0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
0 // ptr64
@@ -49,7 +78,7 @@ public:
TLSSupported = false;
VLASupported = false;
LongWidth = LongAlign = 64;
- AddrSpaceMap = &SPIRAddrSpaceMap;
+ AddrSpaceMap = &SPIRDefIsPrivMap;
UseAddrSpaceMapMangling = true;
HasLegalHalfType = true;
HasFloat16 = true;
@@ -88,6 +117,11 @@ public:
return TargetInfo::VoidPtrBuiltinVaList;
}
+ Optional<unsigned>
+ getDWARFAddressSpace(unsigned AddressSpace) const override {
+ return AddressSpace;
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
return (CC == CC_SpirFunction || CC == CC_OpenCLKernel) ? CCCR_OK
: CCCR_Warning;
@@ -97,6 +131,22 @@ public:
return CC_SpirFunction;
}
+ void setAddressSpaceMap(bool DefaultIsGeneric) {
+ AddrSpaceMap = DefaultIsGeneric ? &SPIRDefIsGenMap : &SPIRDefIsPrivMap;
+ }
+
+ void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override {
+ TargetInfo::adjust(Diags, Opts);
+ // FIXME: SYCL specification considers unannotated pointers and references
+ // to be pointing to the generic address space. See section 5.9.3 of
+ // SYCL 2020 specification.
+ // Currently, there is no way of representing SYCL's default address space
+ // language semantic along with the semantics of embedded C's default
+ // address space in the same address space map. Hence the map needs to be
+ // reset to allow mapping to the desired value of 'Default' entry for SYCL.
+ setAddressSpaceMap(/*DefaultIsGeneric=*/Opts.SYCLIsDevice);
+ }
+
void setSupportedOpenCLOpts() override {
// Assume all OpenCL extensions and optional core features are supported
// for SPIR since it is a generic target.
@@ -107,6 +157,7 @@ public:
bool hasInt128Type() const override { return false; }
};
+
class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
SPIR32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
diff --git a/clang/lib/Basic/Targets/SystemZ.cpp b/clang/lib/Basic/Targets/SystemZ.cpp
index ad3915e4d5dd..e3e0da21f8d5 100644
--- a/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/clang/lib/Basic/Targets/SystemZ.cpp
@@ -92,7 +92,8 @@ static constexpr ISANameRevision ISARevisions[] = {
{{"arch10"}, 10}, {{"zEC12"}, 10},
{{"arch11"}, 11}, {{"z13"}, 11},
{{"arch12"}, 12}, {{"z14"}, 12},
- {{"arch13"}, 13}, {{"z15"}, 13}
+ {{"arch13"}, 13}, {{"z15"}, 13},
+ {{"arch14"}, 14}
};
int SystemZTargetInfo::getISARevision(StringRef Name) const {
@@ -120,6 +121,7 @@ bool SystemZTargetInfo::hasFeature(StringRef Feature) const {
.Case("arch11", ISARevision >= 11)
.Case("arch12", ISARevision >= 12)
.Case("arch13", ISARevision >= 13)
+ .Case("arch14", ISARevision >= 14)
.Case("htm", HasTransactionalExecution)
.Case("vx", HasVector)
.Default(false);
@@ -144,7 +146,7 @@ void SystemZTargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasVector)
Builder.defineMacro("__VX__");
if (Opts.ZVector)
- Builder.defineMacro("__VEC__", "10303");
+ Builder.defineMacro("__VEC__", "10304");
}
ArrayRef<Builtin::Info> SystemZTargetInfo::getTargetBuiltins() const {
diff --git a/clang/lib/Basic/Targets/SystemZ.h b/clang/lib/Basic/Targets/SystemZ.h
index 39fdcf90d0c8..b749c3f75d18 100644
--- a/clang/lib/Basic/Targets/SystemZ.h
+++ b/clang/lib/Basic/Targets/SystemZ.h
@@ -108,6 +108,8 @@ public:
Features["vector-enhancements-1"] = true;
if (ISARevision >= 13)
Features["vector-enhancements-2"] = true;
+ if (ISARevision >= 14)
+ Features["nnp-assist"] = true;
return TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec);
}
@@ -143,6 +145,8 @@ public:
case CC_Swift:
case CC_OpenCLKernel:
return CCCR_OK;
+ case CC_SwiftAsync:
+ return CCCR_Error;
default:
return CCCR_Warning;
}
diff --git a/clang/lib/Basic/Targets/TCE.h b/clang/lib/Basic/Targets/TCE.h
index 445fe4fe7293..251b4d4b56f7 100644
--- a/clang/lib/Basic/Targets/TCE.h
+++ b/clang/lib/Basic/Targets/TCE.h
@@ -42,6 +42,11 @@ static const unsigned TCEOpenCLAddrSpaceMap[] = {
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
0, // ptr32_sptr
0, // ptr32_uptr
0, // ptr64
diff --git a/clang/lib/Basic/Targets/WebAssembly.cpp b/clang/lib/Basic/Targets/WebAssembly.cpp
index dcb3d8fd7790..7ef79849cb75 100644
--- a/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -46,7 +46,6 @@ bool WebAssemblyTargetInfo::setABI(const std::string &Name) {
bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
.Case("simd128", SIMDLevel >= SIMD128)
- .Case("unimplemented-simd128", SIMDLevel >= UnimplementedSIMD128)
.Case("nontrapping-fptoint", HasNontrappingFPToInt)
.Case("sign-ext", HasSignExt)
.Case("exception-handling", HasExceptionHandling)
@@ -73,8 +72,6 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
defineCPUMacros(Builder, "wasm", /*Tuning=*/false);
if (SIMDLevel >= SIMD128)
Builder.defineMacro("__wasm_simd128__");
- if (SIMDLevel >= UnimplementedSIMD128)
- Builder.defineMacro("__wasm_unimplemented_simd128__");
if (HasNontrappingFPToInt)
Builder.defineMacro("__wasm_nontrapping_fptoint__");
if (HasSignExt)
@@ -99,9 +96,6 @@ void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
SIMDEnum Level, bool Enabled) {
if (Enabled) {
switch (Level) {
- case UnimplementedSIMD128:
- Features["unimplemented-simd128"] = true;
- LLVM_FALLTHROUGH;
case SIMD128:
Features["simd128"] = true;
LLVM_FALLTHROUGH;
@@ -115,9 +109,6 @@ void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
case NoSIMD:
case SIMD128:
Features["simd128"] = false;
- LLVM_FALLTHROUGH;
- case UnimplementedSIMD128:
- Features["unimplemented-simd128"] = false;
break;
}
}
@@ -127,8 +118,6 @@ void WebAssemblyTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
bool Enabled) const {
if (Name == "simd128")
setSIMDLevel(Features, SIMD128, Enabled);
- else if (Name == "unimplemented-simd128")
- setSIMDLevel(Features, UnimplementedSIMD128, Enabled);
else
Features[Name] = Enabled;
}
@@ -160,14 +149,6 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
SIMDLevel = std::min(SIMDLevel, SIMDEnum(SIMD128 - 1));
continue;
}
- if (Feature == "+unimplemented-simd128") {
- SIMDLevel = std::max(SIMDLevel, SIMDEnum(UnimplementedSIMD128));
- continue;
- }
- if (Feature == "-unimplemented-simd128") {
- SIMDLevel = std::min(SIMDLevel, SIMDEnum(UnimplementedSIMD128 - 1));
- continue;
- }
if (Feature == "+nontrapping-fptoint") {
HasNontrappingFPToInt = true;
continue;
@@ -253,6 +234,16 @@ ArrayRef<Builtin::Info> WebAssemblyTargetInfo::getTargetBuiltins() const {
Builtin::FirstTSBuiltin);
}
+void WebAssemblyTargetInfo::adjust(DiagnosticsEngine &Diags,
+ LangOptions &Opts) {
+ // If the Atomics feature isn't available, turn off POSIXThreads and
+ // ThreadModel, so that we don't predefine _REENTRANT or __STDCPP_THREADS__.
+ if (!HasAtomics) {
+ Opts.POSIXThreads = false;
+ Opts.setThreadModel(LangOptions::ThreadModelKind::Single);
+ }
+}
+
void WebAssembly32TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
WebAssemblyTargetInfo::getTargetDefines(Opts, Builder);
diff --git a/clang/lib/Basic/Targets/WebAssembly.h b/clang/lib/Basic/Targets/WebAssembly.h
index 0068ccb5d71f..4a5ba25c75e7 100644
--- a/clang/lib/Basic/Targets/WebAssembly.h
+++ b/clang/lib/Basic/Targets/WebAssembly.h
@@ -27,7 +27,6 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
enum SIMDEnum {
NoSIMD,
SIMD128,
- UnimplementedSIMD128,
} SIMDLevel = NoSIMD;
bool HasNontrappingFPToInt = false;
@@ -130,6 +129,8 @@ private:
case CC_C:
case CC_Swift:
return CCCR_OK;
+ case CC_SwiftAsync:
+ return CCCR_Error;
default:
return CCCR_Warning;
}
@@ -138,6 +139,8 @@ private:
bool hasExtIntType() const override { return true; }
bool hasProtectedVisibility() const override { return false; }
+
+ void adjust(DiagnosticsEngine &Diags, LangOptions &Opts) override;
};
class LLVM_LIBRARY_VISIBILITY WebAssembly32TargetInfo
@@ -146,7 +149,10 @@ public:
explicit WebAssembly32TargetInfo(const llvm::Triple &T,
const TargetOptions &Opts)
: WebAssemblyTargetInfo(T, Opts) {
- resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128");
+ if (T.isOSEmscripten())
+ resetDataLayout("e-m:e-p:32:32-i64:64-f128:64-n32:64-S128-ni:1:10:20");
+ else
+ resetDataLayout("e-m:e-p:32:32-i64:64-n32:64-S128-ni:1:10:20");
}
protected:
@@ -165,7 +171,10 @@ public:
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntPtrType = SignedLong;
- resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128");
+ if (T.isOSEmscripten())
+ resetDataLayout("e-m:e-p:64:64-i64:64-f128:64-n32:64-S128-ni:1:10:20");
+ else
+ resetDataLayout("e-m:e-p:64:64-i64:64-n32:64-S128-ni:1:10:20");
}
protected:
diff --git a/clang/lib/Basic/Targets/X86.cpp b/clang/lib/Basic/Targets/X86.cpp
index 694a8095e336..9db96c20250f 100644
--- a/clang/lib/Basic/Targets/X86.cpp
+++ b/clang/lib/Basic/Targets/X86.cpp
@@ -117,7 +117,20 @@ bool X86TargetInfo::initFeatureMap(
for (auto &F : CPUFeatures)
setFeatureEnabled(Features, F, true);
- if (!TargetInfo::initFeatureMap(Features, Diags, CPU, FeaturesVec))
+ std::vector<std::string> UpdatedFeaturesVec;
+ for (const auto &Feature : FeaturesVec) {
+ // Expand general-regs-only to -x86, -mmx and -sse
+ if (Feature == "+general-regs-only") {
+ UpdatedFeaturesVec.push_back("-x87");
+ UpdatedFeaturesVec.push_back("-mmx");
+ UpdatedFeaturesVec.push_back("-sse");
+ continue;
+ }
+
+ UpdatedFeaturesVec.push_back(Feature);
+ }
+
+ if (!TargetInfo::initFeatureMap(Features, Diags, CPU, UpdatedFeaturesVec))
return false;
// Can't do this earlier because we need to be able to explicitly enable
@@ -126,20 +139,20 @@ bool X86TargetInfo::initFeatureMap(
// Enable popcnt if sse4.2 is enabled and popcnt is not explicitly disabled.
auto I = Features.find("sse4.2");
if (I != Features.end() && I->getValue() &&
- llvm::find(FeaturesVec, "-popcnt") == FeaturesVec.end())
+ llvm::find(UpdatedFeaturesVec, "-popcnt") == UpdatedFeaturesVec.end())
Features["popcnt"] = true;
// Additionally, if SSE is enabled and mmx is not explicitly disabled,
// then enable MMX.
I = Features.find("sse");
if (I != Features.end() && I->getValue() &&
- llvm::find(FeaturesVec, "-mmx") == FeaturesVec.end())
+ llvm::find(UpdatedFeaturesVec, "-mmx") == UpdatedFeaturesVec.end())
Features["mmx"] = true;
// Enable xsave if avx is enabled and xsave is not explicitly disabled.
I = Features.find("avx");
if (I != Features.end() && I->getValue() &&
- llvm::find(FeaturesVec, "-xsave") == FeaturesVec.end())
+ llvm::find(UpdatedFeaturesVec, "-xsave") == UpdatedFeaturesVec.end())
Features["xsave"] = true;
return true;
@@ -467,6 +480,7 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_Cooperlake:
case CK_Cannonlake:
case CK_IcelakeClient:
+ case CK_Rocketlake:
case CK_IcelakeServer:
case CK_Tigerlake:
case CK_SapphireRapids:
@@ -513,10 +527,11 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
case CK_K8:
case CK_K8SSE3:
case CK_x86_64:
+ defineCPUMacros(Builder, "k8");
+ break;
case CK_x86_64_v2:
case CK_x86_64_v3:
case CK_x86_64_v4:
- defineCPUMacros(Builder, "k8");
break;
case CK_AMDFAM10:
defineCPUMacros(Builder, "amdfam10");
@@ -864,6 +879,7 @@ bool X86TargetInfo::isValidFeatureName(StringRef Name) const {
.Case("fma4", true)
.Case("fsgsbase", true)
.Case("fxsr", true)
+ .Case("general-regs-only", true)
.Case("gfni", true)
.Case("hreset", true)
.Case("invpcid", true)
@@ -1314,6 +1330,7 @@ Optional<unsigned> X86TargetInfo::getCPUCacheLineSize() const {
case CK_Tigerlake:
case CK_SapphireRapids:
case CK_IcelakeClient:
+ case CK_Rocketlake:
case CK_IcelakeServer:
case CK_Alderlake:
case CK_KNL:
@@ -1396,13 +1413,13 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
return Size <= 64;
case 'z':
// XMM0/YMM/ZMM0
- if (FeatureMap.lookup("avx512f"))
+ if (hasFeatureEnabled(FeatureMap, "avx512f"))
// ZMM0 can be used if target supports AVX512F.
return Size <= 512U;
- else if (FeatureMap.lookup("avx"))
+ else if (hasFeatureEnabled(FeatureMap, "avx"))
// YMM0 can be used if target supports AVX.
return Size <= 256U;
- else if (FeatureMap.lookup("sse"))
+ else if (hasFeatureEnabled(FeatureMap, "sse"))
return Size <= 128U;
return false;
case 'i':
@@ -1416,10 +1433,10 @@ bool X86TargetInfo::validateOperandSize(const llvm::StringMap<bool> &FeatureMap,
break;
case 'v':
case 'x':
- if (FeatureMap.lookup("avx512f"))
+ if (hasFeatureEnabled(FeatureMap, "avx512f"))
// 512-bit zmm registers can be used if target supports AVX512F.
return Size <= 512U;
- else if (FeatureMap.lookup("avx"))
+ else if (hasFeatureEnabled(FeatureMap, "avx"))
// 256-bit ymm registers can be used if target supports AVX.
return Size <= 256U;
return Size <= 128U;
diff --git a/clang/lib/Basic/Targets/X86.h b/clang/lib/Basic/Targets/X86.h
index 91a365c7d405..fcaaf50624e9 100644
--- a/clang/lib/Basic/Targets/X86.h
+++ b/clang/lib/Basic/Targets/X86.h
@@ -35,6 +35,11 @@ static const unsigned X86AddrSpaceMap[] = {
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
270, // ptr32_sptr
271, // ptr32_uptr
272 // ptr64
@@ -333,6 +338,10 @@ public:
bool setFPMath(StringRef Name) override;
+ bool supportsExtendIntArgs() const override {
+ return getTriple().getArch() != llvm::Triple::x86;
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
// Most of the non-ARM calling conventions are i386 conventions.
switch (CC) {
@@ -348,11 +357,15 @@ public:
case CC_IntelOclBicc:
case CC_OpenCLKernel:
return CCCR_OK;
+ case CC_SwiftAsync:
+ return CCCR_Error;
default:
return CCCR_Warning;
}
}
+ bool checkArithmeticFenceSupported() const override { return true; }
+
CallingConv getDefaultCallingConv() const override {
return CC_C;
}
@@ -383,11 +396,13 @@ public:
LongDoubleWidth = 96;
LongDoubleAlign = 32;
SuitableAlign = 128;
- resetDataLayout(Triple.isOSBinFormatMachO() ?
- "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:32-n8:16:32-S128" :
- "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:32-n8:16:32-S128");
+ resetDataLayout(
+ Triple.isOSBinFormatMachO()
+ ? "e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
+ "f80:32-n8:16:32-S128"
+ : "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
+ "f80:32-n8:16:32-S128",
+ Triple.isOSBinFormatMachO() ? "_" : "");
SizeType = UnsignedInt;
PtrDiffType = SignedInt;
IntPtrType = SignedInt;
@@ -491,7 +506,7 @@ public:
SizeType = UnsignedLong;
IntPtrType = SignedLong;
resetDataLayout("e-m:o-p:32:32-p270:32:32-p271:32:32-p272:64:64-f64:32:64-"
- "f80:128-n8:16:32-S128");
+ "f80:128-n8:16:32-S128", "_");
HasAlignMac68kSupport = true;
}
@@ -519,7 +534,8 @@ public:
resetDataLayout(IsWinCOFF ? "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:"
"64-i64:64-f80:32-n8:16:32-a:0:32-S32"
: "e-m:e-p:32:32-p270:32:32-p271:32:32-p272:64:"
- "64-i64:64-f80:32-n8:16:32-a:0:32-S32");
+ "64-i64:64-f80:32-n8:16:32-a:0:32-S32",
+ IsWinCOFF ? "_" : "");
}
};
@@ -568,7 +584,8 @@ public:
this->WCharType = TargetInfo::UnsignedShort;
DoubleAlign = LongLongAlign = 64;
resetDataLayout("e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:"
- "32-n8:16:32-a:0:32-S32");
+ "32-n8:16:32-a:0:32-S32",
+ "_");
}
void getTargetDefines(const LangOptions &Opts,
@@ -648,7 +665,7 @@ class LLVM_LIBRARY_VISIBILITY X86_64TargetInfo : public X86TargetInfo {
public:
X86_64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: X86TargetInfo(Triple, Opts) {
- const bool IsX32 = getTriple().getEnvironment() == llvm::Triple::GNUX32;
+ const bool IsX32 = getTriple().isX32();
bool IsWinCOFF =
getTriple().isOSWindows() && getTriple().isOSBinFormatCOFF();
LongWidth = LongAlign = PointerWidth = PointerAlign = IsX32 ? 32 : 64;
@@ -702,6 +719,7 @@ public:
switch (CC) {
case CC_C:
case CC_Swift:
+ case CC_SwiftAsync:
case CC_X86VectorCall:
case CC_IntelOclBicc:
case CC_Win64:
@@ -783,6 +801,7 @@ public:
case CC_PreserveAll:
case CC_X86_64SysV:
case CC_Swift:
+ case CC_SwiftAsync:
case CC_X86RegCall:
case CC_OpenCLKernel:
return CCCR_OK;
@@ -863,7 +882,7 @@ public:
if (T.isiOS())
UseSignedCharForObjCBool = false;
resetDataLayout("e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:"
- "16:32:64-S128");
+ "16:32:64-S128", "_");
}
bool handleTargetFeatures(std::vector<std::string> &Features,
diff --git a/clang/lib/Basic/Targets/XCore.cpp b/clang/lib/Basic/Targets/XCore.cpp
index da614f10e338..ba64f15f3394 100644
--- a/clang/lib/Basic/Targets/XCore.cpp
+++ b/clang/lib/Basic/Targets/XCore.cpp
@@ -28,6 +28,7 @@ const Builtin::Info XCoreTargetInfo::BuiltinInfo[] = {
void XCoreTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
+ Builder.defineMacro("__xcore__");
Builder.defineMacro("__XS1B__");
}
diff --git a/clang/lib/Basic/XRayInstr.cpp b/clang/lib/Basic/XRayInstr.cpp
index 79052e05860e..822e14bbb622 100644
--- a/clang/lib/Basic/XRayInstr.cpp
+++ b/clang/lib/Basic/XRayInstr.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/XRayInstr.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringSwitch.h"
namespace clang {
@@ -30,4 +31,30 @@ XRayInstrMask parseXRayInstrValue(StringRef Value) {
return ParsedKind;
}
+void serializeXRayInstrValue(XRayInstrSet Set,
+ SmallVectorImpl<StringRef> &Values) {
+ if (Set.Mask == XRayInstrKind::All) {
+ Values.push_back("all");
+ return;
+ }
+
+ if (Set.Mask == XRayInstrKind::None) {
+ Values.push_back("none");
+ return;
+ }
+
+ if (Set.has(XRayInstrKind::Custom))
+ Values.push_back("custom");
+
+ if (Set.has(XRayInstrKind::Typed))
+ Values.push_back("typed");
+
+ if (Set.has(XRayInstrKind::FunctionEntry) &&
+ Set.has(XRayInstrKind::FunctionExit))
+ Values.push_back("function");
+ else if (Set.has(XRayInstrKind::FunctionEntry))
+ Values.push_back("function-entry");
+ else if (Set.has(XRayInstrKind::FunctionExit))
+ Values.push_back("function-exit");
+}
} // namespace clang
diff --git a/clang/lib/CodeGen/BackendUtil.cpp b/clang/lib/CodeGen/BackendUtil.cpp
index 52bcd971dc8c..481f5347d978 100644
--- a/clang/lib/CodeGen/BackendUtil.cpp
+++ b/clang/lib/CodeGen/BackendUtil.cpp
@@ -65,6 +65,7 @@
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
@@ -76,14 +77,15 @@
#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Scalar/EarlyCSE.h"
#include "llvm/Transforms/Scalar/GVN.h"
#include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
#include "llvm/Transforms/Utils.h"
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+#include "llvm/Transforms/Utils/Debugify.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
-#include "llvm/Transforms/Utils/UniqueInternalLinkageNames.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -243,7 +245,7 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
auto Opts = getSancovOptsFromCGOpts(CGOpts);
PM.add(createModuleSanitizerCoverageLegacyPassPass(
Opts, CGOpts.SanitizeCoverageAllowlistFiles,
- CGOpts.SanitizeCoverageBlocklistFiles));
+ CGOpts.SanitizeCoverageIgnorelistFiles));
}
// Check if ASan should use GC-friendly instrumentation for globals.
@@ -286,16 +288,21 @@ static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
bool UseOdrIndicator = CGOpts.SanitizeAddressUseOdrIndicator;
bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
+ llvm::AsanDtorKind DestructorKind = CGOpts.getSanitizeAddressDtor();
+ llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
+ CGOpts.getSanitizeAddressUseAfterReturn();
PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
- UseAfterScope));
+ UseAfterScope, UseAfterReturn));
PM.add(createModuleAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ false, Recover, UseGlobalsGC, UseOdrIndicator));
+ /*CompileKernel*/ false, Recover, UseGlobalsGC, UseOdrIndicator,
+ DestructorKind));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createAddressSanitizerFunctionPass(
- /*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false));
+ /*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false,
+ /*UseAfterReturn*/ llvm::AsanDetectStackUseAfterReturnMode::Never));
PM.add(createModuleAddressSanitizerLegacyPassPass(
/*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
/*UseOdrIndicator*/ false));
@@ -307,14 +314,19 @@ static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
static_cast<const PassManagerBuilderWrapper &>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- PM.add(
- createHWAddressSanitizerLegacyPassPass(/*CompileKernel*/ false, Recover));
+ PM.add(createHWAddressSanitizerLegacyPassPass(
+ /*CompileKernel*/ false, Recover,
+ /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
}
static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
+ legacy::PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper &>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
PM.add(createHWAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ true, /*Recover*/ true));
+ /*CompileKernel*/ true, /*Recover*/ true,
+ /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
}
static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
@@ -361,8 +373,18 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
- PM.add(
- createDataFlowSanitizerLegacyPassPass(LangOpts.SanitizerBlacklistFiles));
+ PM.add(createDataFlowSanitizerLegacyPassPass(LangOpts.NoSanitizeFiles));
+}
+
+static void addEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createEntryExitInstrumenterPass());
+}
+
+static void
+addPostInlineEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createPostInlineEntryExitInstrumenterPass());
}
static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
@@ -389,6 +411,10 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
case CodeGenOptions::SVML:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
break;
+ case CodeGenOptions::Darwin_libsystem_m:
+ TLII->addVectorizableFunctionsFromVecLib(
+ TargetLibraryInfoImpl::DarwinLibSystemM);
+ break;
default:
break;
}
@@ -513,7 +539,6 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.NoNaNsFPMath = LangOpts.NoHonorNaNs;
Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
Options.UnsafeFPMath = LangOpts.UnsafeFPMath;
- Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
Options.BBSections =
llvm::StringSwitch<llvm::BasicBlockSection>(CodeGenOpts.BBSections)
@@ -537,23 +562,16 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.EnableMachineFunctionSplitter = CodeGenOpts.SplitMachineFunctions;
Options.FunctionSections = CodeGenOpts.FunctionSections;
Options.DataSections = CodeGenOpts.DataSections;
- Options.IgnoreXCOFFVisibility = CodeGenOpts.IgnoreXCOFFVisibility;
+ Options.IgnoreXCOFFVisibility = LangOpts.IgnoreXCOFFVisibility;
Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
Options.UniqueBasicBlockSectionNames =
CodeGenOpts.UniqueBasicBlockSectionNames;
- Options.StackProtectorGuard =
- llvm::StringSwitch<llvm::StackProtectorGuards>(CodeGenOpts
- .StackProtectorGuard)
- .Case("tls", llvm::StackProtectorGuards::TLS)
- .Case("global", llvm::StackProtectorGuards::Global)
- .Default(llvm::StackProtectorGuards::None);
- Options.StackProtectorGuardOffset = CodeGenOpts.StackProtectorGuardOffset;
- Options.StackProtectorGuardReg = CodeGenOpts.StackProtectorGuardReg;
Options.TLSSize = CodeGenOpts.TLSSize;
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
Options.ExplicitEmulatedTLS = CodeGenOpts.ExplicitEmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
Options.EmitStackSizeSection = CodeGenOpts.StackSizeSection;
+ Options.StackUsageOutput = CodeGenOpts.StackUsageOutput;
Options.EmitAddrsig = CodeGenOpts.Addrsig;
Options.ForceDwarfFrameSection = CodeGenOpts.ForceDwarfFrameSection;
Options.EmitCallSiteInfo = CodeGenOpts.EmitCallSiteInfo;
@@ -585,6 +603,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
+ Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
return true;
}
@@ -716,9 +735,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addBoundsCheckingPass);
}
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
+ if (CodeGenOpts.hasSanitizeCoverage()) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addSanitizerCoveragePass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -781,6 +798,20 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addDataFlowSanitizerPass);
}
+ if (CodeGenOpts.InstrumentFunctions ||
+ CodeGenOpts.InstrumentFunctionEntryBare ||
+ CodeGenOpts.InstrumentFunctionsAfterInlining ||
+ CodeGenOpts.InstrumentForProfiling) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addEntryExitInstrumentationPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addEntryExitInstrumentationPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addPostInlineEntryExitInstrumentationPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addPostInlineEntryExitInstrumentationPass);
+ }
+
// Set up the per-function pass manager.
FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
if (CodeGenOpts.VerifyModule)
@@ -790,12 +821,6 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.RewriteMapFiles.empty())
addSymbolRewriterPass(CodeGenOpts, &MPM);
- // Add UniqueInternalLinkageNames Pass which renames internal linkage symbols
- // with unique names.
- if (CodeGenOpts.UniqueInternalLinkageNames) {
- MPM.add(createUniqueInternalLinkageNamesPass());
- }
-
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts)) {
MPM.add(createGCOVProfilerPass(*Options));
if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
@@ -850,7 +875,15 @@ static void setCommandLineOpts(const CodeGenOptions &CodeGenOpts) {
BackendArgs.push_back("-limit-float-precision");
BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
}
+ // Check for the default "clang" invocation that won't set any cl::opt values.
+ // Skip trying to parse the command line invocation to avoid the issues
+ // described below.
+ if (BackendArgs.size() == 1)
+ return;
BackendArgs.push_back(nullptr);
+ // FIXME: The command line parser below is not thread-safe and shares a global
+ // state, so this call might crash or overwrite the options of another Clang
+ // instance in the same process.
llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
BackendArgs.data());
}
@@ -925,7 +958,16 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
if (TM)
TheModule->setDataLayout(TM->createDataLayout());
- legacy::PassManager PerModulePasses;
+ DebugifyCustomPassManager PerModulePasses;
+ DebugInfoPerPassMap DIPreservationMap;
+ if (CodeGenOpts.EnableDIPreservationVerify) {
+ PerModulePasses.setDebugifyMode(DebugifyMode::OriginalDebugInfo);
+ PerModulePasses.setDIPreservationMap(DIPreservationMap);
+
+ if (!CodeGenOpts.DIBugsReportFilePath.empty())
+ PerModulePasses.setOrigDIVerifyBugsReportFilePath(
+ CodeGenOpts.DIBugsReportFilePath);
+ }
PerModulePasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
@@ -1058,6 +1100,89 @@ static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
}
}
+static void addSanitizers(const Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts,
+ const LangOptions &LangOpts, PassBuilder &PB) {
+ PB.registerOptimizerLastEPCallback([&](ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level) {
+ if (CodeGenOpts.hasSanitizeCoverage()) {
+ auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
+ MPM.addPass(ModuleSanitizerCoveragePass(
+ SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
+ CodeGenOpts.SanitizeCoverageIgnorelistFiles));
+ }
+
+ auto MSanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ if (LangOpts.Sanitize.has(Mask)) {
+ int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+
+ MPM.addPass(
+ MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ FunctionPassManager FPM;
+ FPM.addPass(
+ MemorySanitizerPass({TrackOrigins, Recover, CompileKernel}));
+ if (Level != PassBuilder::OptimizationLevel::O0) {
+ // MemorySanitizer inserts complex instrumentation that mostly
+ // follows the logic of the original code, but operates on
+ // "shadow" values. It can benefit from re-running some
+ // general purpose optimization passes.
+ FPM.addPass(EarlyCSEPass());
+ // TODO: Consider add more passes like in
+ // addGeneralOptsForMemorySanitizer. EarlyCSEPass makes visible
+ // difference on size. It's not clear if the rest is still
+ // usefull. InstCombinePass breakes
+ // compiler-rt/test/msan/select_origin.cpp.
+ }
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ }
+ };
+ MSanPass(SanitizerKind::Memory, false);
+ MSanPass(SanitizerKind::KernelMemory, true);
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
+ MPM.addPass(ThreadSanitizerPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
+ }
+
+ auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ if (LangOpts.Sanitize.has(Mask)) {
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
+ bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
+ bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
+ llvm::AsanDtorKind DestructorKind =
+ CodeGenOpts.getSanitizeAddressDtor();
+ llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
+ CodeGenOpts.getSanitizeAddressUseAfterReturn();
+ MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
+ MPM.addPass(ModuleAddressSanitizerPass(
+ CompileKernel, Recover, ModuleUseAfterScope, UseOdrIndicator,
+ DestructorKind));
+ MPM.addPass(createModuleToFunctionPassAdaptor(AddressSanitizerPass(
+ CompileKernel, Recover, UseAfterScope, UseAfterReturn)));
+ }
+ };
+ ASanPass(SanitizerKind::Address, false);
+ ASanPass(SanitizerKind::KernelAddress, true);
+
+ auto HWASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
+ if (LangOpts.Sanitize.has(Mask)) {
+ bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
+ MPM.addPass(HWAddressSanitizerPass(
+ CompileKernel, Recover,
+ /*DisableOptimization=*/CodeGenOpts.OptimizationLevel == 0));
+ }
+ };
+ HWASanPass(SanitizerKind::HWAddress, false);
+ HWASanPass(SanitizerKind::KernelHWAddress, true);
+
+ if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
+ MPM.addPass(DataFlowSanitizerPass(LangOpts.NoSanitizeFiles));
+ }
+ });
+}
+
/// A clean version of `EmitAssembly` that uses the new pass manager.
///
/// Not all features are currently supported in this system, but where
@@ -1147,13 +1272,22 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// Only enable CGProfilePass when using integrated assembler, since
// non-integrated assemblers don't recognize .cgprofile section.
PTO.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
- PTO.Coroutines = LangOpts.Coroutines;
- PTO.UniqueLinkageNames = CodeGenOpts.UniqueInternalLinkageNames;
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+
+ bool DebugPassStructure = CodeGenOpts.DebugPass == "Structure";
PassInstrumentationCallbacks PIC;
- StandardInstrumentations SI(CodeGenOpts.DebugPassManager);
- SI.registerCallbacks(PIC);
- PassBuilder PB(CodeGenOpts.DebugPassManager, TM.get(), PTO, PGOOpt, &PIC);
+ PrintPassOptions PrintPassOpts;
+ PrintPassOpts.Indent = DebugPassStructure;
+ PrintPassOpts.SkipAnalyses = DebugPassStructure;
+ StandardInstrumentations SI(CodeGenOpts.DebugPassManager ||
+ DebugPassStructure,
+ /*VerifyEach*/ false, PrintPassOpts);
+ SI.registerCallbacks(PIC, &FAM);
+ PassBuilder PB(TM.get(), PTO, PGOOpt, &PIC);
// Attempt to load pass plugins and register their callbacks with PB.
for (auto &PluginFN : CodeGenOpts.PassPlugins) {
@@ -1169,11 +1303,6 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
get##Ext##PluginInfo().RegisterPassBuilderCallbacks(PB);
#include "llvm/Support/Extension.def"
- LoopAnalysisManager LAM(CodeGenOpts.DebugPassManager);
- FunctionAnalysisManager FAM(CodeGenOpts.DebugPassManager);
- CGSCCAnalysisManager CGAM(CodeGenOpts.DebugPassManager);
- ModuleAnalysisManager MAM(CodeGenOpts.DebugPassManager);
-
// Register the AA manager first so that our version is the one used.
FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
@@ -1191,7 +1320,7 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
PB.registerLoopAnalyses(LAM);
PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
- ModulePassManager MPM(CodeGenOpts.DebugPassManager);
+ ModulePassManager MPM;
if (!CodeGenOpts.DisableLLVMPasses) {
// Map our optimization levels into one of the distinct levels used to
@@ -1222,10 +1351,11 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
// If we reached here with a non-empty index file name, then the index
// file was empty and we are not performing ThinLTO backend compilation
- // (used in testing in a distributed build environment). Drop any the type
- // test assume sequences inserted for whole program vtables so that
- // codegen doesn't complain.
- if (!CodeGenOpts.ThinLTOIndexFile.empty())
+ // (used in testing in a distributed build environment).
+ bool IsThinLTOPostLink = !CodeGenOpts.ThinLTOIndexFile.empty();
+ // If so drop any the type test assume sequences inserted for whole program
+ // vtables so that codegen doesn't complain.
+ if (IsThinLTOPostLink)
PB.registerPipelineStartEPCallback(
[](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
MPM.addPass(LowerTypeTestsPass(/*ExportSummary=*/nullptr,
@@ -1233,12 +1363,20 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
/*DropTypeTests=*/true));
});
- if (Level != PassBuilder::OptimizationLevel::O0) {
+ if (CodeGenOpts.InstrumentFunctions ||
+ CodeGenOpts.InstrumentFunctionEntryBare ||
+ CodeGenOpts.InstrumentFunctionsAfterInlining ||
+ CodeGenOpts.InstrumentForProfiling) {
PB.registerPipelineStartEPCallback(
[](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
MPM.addPass(createModuleToFunctionPassAdaptor(
EntryExitInstrumenterPass(/*PostInlining=*/false)));
});
+ PB.registerOptimizerLastEPCallback(
+ [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ EntryExitInstrumenterPass(/*PostInlining=*/true)));
+ });
}
// Register callbacks to schedule sanitizer passes at the appropriate part
@@ -1249,81 +1387,10 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
FPM.addPass(BoundsCheckingPass());
});
- if (CodeGenOpts.SanitizeCoverageType ||
- CodeGenOpts.SanitizeCoverageIndirectCalls ||
- CodeGenOpts.SanitizeCoverageTraceCmp) {
- PB.registerOptimizerLastEPCallback(
- [this](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- auto SancovOpts = getSancovOptsFromCGOpts(CodeGenOpts);
- MPM.addPass(ModuleSanitizerCoveragePass(
- SancovOpts, CodeGenOpts.SanitizeCoverageAllowlistFiles,
- CodeGenOpts.SanitizeCoverageBlocklistFiles));
- });
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- int TrackOrigins = CodeGenOpts.SanitizeMemoryTrackOrigins;
- bool Recover = CodeGenOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PB.registerOptimizerLastEPCallback(
- [TrackOrigins, Recover](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- MPM.addPass(MemorySanitizerPass({TrackOrigins, Recover, false}));
- MPM.addPass(createModuleToFunctionPassAdaptor(
- MemorySanitizerPass({TrackOrigins, Recover, false})));
- });
- }
- if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PB.registerOptimizerLastEPCallback(
- [](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- MPM.addPass(ThreadSanitizerPass());
- MPM.addPass(
- createModuleToFunctionPassAdaptor(ThreadSanitizerPass()));
- });
- }
-
- auto ASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
- if (LangOpts.Sanitize.has(Mask)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- bool UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
- bool ModuleUseAfterScope = asanUseGlobalsGC(TargetTriple, CodeGenOpts);
- bool UseOdrIndicator = CodeGenOpts.SanitizeAddressUseOdrIndicator;
- PB.registerOptimizerLastEPCallback(
- [CompileKernel, Recover, UseAfterScope, ModuleUseAfterScope,
- UseOdrIndicator](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- MPM.addPass(
- RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
- MPM.addPass(ModuleAddressSanitizerPass(CompileKernel, Recover,
- ModuleUseAfterScope,
- UseOdrIndicator));
- MPM.addPass(createModuleToFunctionPassAdaptor(
- AddressSanitizerPass(CompileKernel, Recover, UseAfterScope)));
- });
- }
- };
- ASanPass(SanitizerKind::Address, false);
- ASanPass(SanitizerKind::KernelAddress, true);
-
- auto HWASanPass = [&](SanitizerMask Mask, bool CompileKernel) {
- if (LangOpts.Sanitize.has(Mask)) {
- bool Recover = CodeGenOpts.SanitizeRecover.has(Mask);
- PB.registerOptimizerLastEPCallback(
- [CompileKernel, Recover](ModulePassManager &MPM,
- PassBuilder::OptimizationLevel Level) {
- MPM.addPass(HWAddressSanitizerPass(CompileKernel, Recover));
- });
- }
- };
- HWASanPass(SanitizerKind::HWAddress, false);
- HWASanPass(SanitizerKind::KernelHWAddress, true);
-
- if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
- PB.registerOptimizerLastEPCallback(
- [this](ModulePassManager &MPM, PassBuilder::OptimizationLevel Level) {
- MPM.addPass(
- DataFlowSanitizerPass(LangOpts.SanitizerBlacklistFiles));
- });
- }
+ // Don't add sanitizers if we are here from ThinLTO PostLink. That already
+ // done on PreLink stage.
+ if (!IsThinLTOPostLink)
+ addSanitizers(TargetTriple, CodeGenOpts, LangOpts, PB);
if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts))
PB.registerPipelineStartEPCallback(
@@ -1455,10 +1522,7 @@ static void runThinLTOBackend(
// we should only invoke this using the individual indexes written out
// via a WriteIndexesThinBackend.
FunctionImporter::ImportMapTy ImportList;
- std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
- MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;
- if (!lto::loadReferencedModules(*M, *CombinedIndex, ImportList, ModuleMap,
- OwnedImports))
+ if (!lto::initImportList(*M, *CombinedIndex, ImportList))
return;
auto AddStream = [&](size_t Task) {
@@ -1535,7 +1599,7 @@ static void runThinLTOBackend(
if (Error E =
thinBackend(Conf, -1, AddStream, *M, *CombinedIndex, ImportList,
ModuleToDefinedGVSummaries[M->getModuleIdentifier()],
- ModuleMap, CGOpts.CmdArgs)) {
+ /* ModuleMap */ nullptr, CGOpts.CmdArgs)) {
handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
});
@@ -1547,7 +1611,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
const LangOptions &LOpts,
- const llvm::DataLayout &TDesc, Module *M,
+ StringRef TDesc, Module *M,
BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
@@ -1601,11 +1665,11 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// DataLayout.
if (AsmHelper.TM) {
std::string DLDesc = M->getDataLayout().getStringRepresentation();
- if (DLDesc != TDesc.getStringRepresentation()) {
+ if (DLDesc != TDesc) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "backend data layout '%0' does not match "
"expected target description '%1'");
- Diags.Report(DiagID) << DLDesc << TDesc.getStringRepresentation();
+ Diags.Report(DiagID) << DLDesc << TDesc;
}
}
}
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index c7256e240a31..b6722ad4e4f1 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -85,7 +85,7 @@ namespace {
(C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
lvalue.getAlignment();
VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
- VoidPtrAddr, OffsetInChars.getQuantity());
+ CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
VoidPtrAddr,
CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
@@ -427,6 +427,8 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
else
switch ((llvm::AtomicOrderingCABI)FOS) {
case llvm::AtomicOrderingCABI::relaxed:
+ // 31.7.2.18: "The failure argument shall not be memory_order_release
+ // nor memory_order_acq_rel". Fallback to monotonic.
case llvm::AtomicOrderingCABI::release:
case llvm::AtomicOrderingCABI::acq_rel:
FailureOrder = llvm::AtomicOrdering::Monotonic;
@@ -439,59 +441,48 @@ static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E,
FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
break;
}
- if (isStrongerThan(FailureOrder, SuccessOrder)) {
- // Don't assert on undefined behavior "failure argument shall be no
- // stronger than the success argument".
- FailureOrder =
- llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
- }
+ // Prior to c++17, "the failure argument shall be no stronger than the
+ // success argument". This condition has been lifted and the only
+ // precondition is 31.7.2.18. Effectively treat this as a DR and skip
+ // language version checks.
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
FailureOrder, Scope);
return;
}
// Create all the relevant BB's
- llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
- *SeqCstBB = nullptr;
- MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
- if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
- SuccessOrder != llvm::AtomicOrdering::Release)
- AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
- if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
- SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
-
- llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
-
- llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
-
- // Emit all the different atomics
+ auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
+ auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
+ auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
+ auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
// MonotonicBB is arbitrarily chosen as the default case; in practice, this
// doesn't matter unless someone is crazy enough to use something that
// doesn't fold to a constant for the ordering.
+ llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
+ // Implemented as acquire, since it's the closest in LLVM.
+ SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
+ AcquireBB);
+ SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
+ AcquireBB);
+ SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
+ SeqCstBB);
+
+ // Emit all the different atomics
CGF.Builder.SetInsertPoint(MonotonicBB);
emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
CGF.Builder.CreateBr(ContBB);
- if (AcquireBB) {
- CGF.Builder.SetInsertPoint(AcquireBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
- Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
- CGF.Builder.CreateBr(ContBB);
- SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
- AcquireBB);
- SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
- AcquireBB);
- }
- if (SeqCstBB) {
- CGF.Builder.SetInsertPoint(SeqCstBB);
- emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
- llvm::AtomicOrdering::SequentiallyConsistent, Scope);
- CGF.Builder.CreateBr(ContBB);
- SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
- SeqCstBB);
- }
+ CGF.Builder.SetInsertPoint(AcquireBB);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
+ llvm::AtomicOrdering::Acquire, Scope);
+ CGF.Builder.CreateBr(ContBB);
+
+ CGF.Builder.SetInsertPoint(SeqCstBB);
+ emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
+ llvm::AtomicOrdering::SequentiallyConsistent, Scope);
+ CGF.Builder.CreateBr(ContBB);
CGF.Builder.SetInsertPoint(ContBB);
}
@@ -602,21 +593,25 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
break;
case AtomicExpr::AO__atomic_add_fetch:
- PostOp = llvm::Instruction::Add;
+ PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
+ : llvm::Instruction::Add;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
- Op = llvm::AtomicRMWInst::Add;
+ Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
+ : llvm::AtomicRMWInst::Add;
break;
case AtomicExpr::AO__atomic_sub_fetch:
- PostOp = llvm::Instruction::Sub;
+ PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
+ : llvm::Instruction::Sub;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
- Op = llvm::AtomicRMWInst::Sub;
+ Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
+ : llvm::AtomicRMWInst::Sub;
break;
case AtomicExpr::AO__atomic_min_fetch:
@@ -813,6 +808,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
bool UseLibcall = Misaligned | Oversized;
+ bool ShouldCastToIntPtrTy = true;
+
CharUnits MaxInlineWidth =
getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
@@ -892,11 +889,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
break;
}
- LLVM_FALLTHROUGH;
+ LLVM_FALLTHROUGH;
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_sub:
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
+ ShouldCastToIntPtrTy = !MemTy->isFloatingType();
+ LLVM_FALLTHROUGH;
+
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_store:
@@ -937,15 +937,23 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
AtomicInfo Atomics(*this, AtomicVal);
- Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
- if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
- if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
- if (Dest.isValid())
- Dest = Atomics.emitCastToAtomicIntPointer(Dest);
- else if (E->isCmpXChg())
+ if (ShouldCastToIntPtrTy) {
+ Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
+ if (Val1.isValid())
+ Val1 = Atomics.convertToAtomicIntPointer(Val1);
+ if (Val2.isValid())
+ Val2 = Atomics.convertToAtomicIntPointer(Val2);
+ }
+ if (Dest.isValid()) {
+ if (ShouldCastToIntPtrTy)
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ } else if (E->isCmpXChg())
Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
- else if (!RValTy->isVoidType())
- Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
+ else if (!RValTy->isVoidType()) {
+ Dest = Atomics.CreateTempAlloca();
+ if (ShouldCastToIntPtrTy)
+ Dest = Atomics.emitCastToAtomicIntPointer(Dest);
+ }
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
@@ -1722,11 +1730,6 @@ AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
llvm::AtomicOrdering Failure, bool IsWeak) {
- if (isStrongerThan(Failure, Success))
- // Don't assert on undefined behavior "failure argument shall be no stronger
- // than the success argument".
- Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
-
// Check whether we should use a library call.
if (shouldUseLibcall()) {
// Produce a source address.
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 91c726f4cf64..f39a56f81d41 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -1023,7 +1023,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
type, VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
- &declRef, VK_RValue, FPOptionsOverride());
+ &declRef, VK_PRValue, FPOptionsOverride());
// FIXME: Pass a specific location for the expr init so that the store is
// attributed to a reasonable location - otherwise it may be attributed to
// locations of subexpressions in the initialization.
@@ -1190,8 +1190,10 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
// First argument of a block call is a generic block literal casted to
// generic void pointer, i.e. i8 addrspace(4)*
+ llvm::Type *GenericVoidPtrTy =
+ CGM.getOpenCLRuntime().getGenericVoidPointerType();
llvm::Value *BlockDescriptor = Builder.CreatePointerCast(
- BlockPtr, CGM.getOpenCLRuntime().getGenericVoidPointerType());
+ BlockPtr, GenericVoidPtrTy);
QualType VoidPtrQualTy = Ctx.getPointerType(
Ctx.getAddrSpaceQualType(Ctx.VoidTy, LangAS::opencl_generic));
Args.add(RValue::get(BlockDescriptor), VoidPtrQualTy);
@@ -1203,7 +1205,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
Func = CGM.getOpenCLRuntime().getInvokeFunction(E->getCallee());
else {
llvm::Value *FuncPtr = Builder.CreateStructGEP(GenBlockTy, BlockPtr, 2);
- Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ Func = Builder.CreateAlignedLoad(GenericVoidPtrTy, FuncPtr,
+ getPointerAlign());
}
} else {
// Bitcast the block literal to a generic block literal.
@@ -1219,7 +1222,7 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(), E->arguments());
// Load the function.
- Func = Builder.CreateAlignedLoad(FuncPtr, getPointerAlign());
+ Func = Builder.CreateAlignedLoad(VoidPtrTy, FuncPtr, getPointerAlign());
}
const FunctionType *FuncTy = FnType->castAs<FunctionType>();
@@ -1372,7 +1375,7 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
llvm::IRBuilder<> b(llvm::BasicBlock::Create(CGM.getLLVMContext(), "entry",
Init));
b.CreateAlignedStore(CGM.getNSConcreteGlobalBlock(),
- b.CreateStructGEP(literal, 0),
+ b.CreateStructGEP(literal->getValueType(), literal, 0),
CGM.getPointerAlign().getAsAlign());
b.CreateRetVoid();
// We can't use the normal LLVM global initialisation array, because we
@@ -1899,7 +1902,7 @@ static void setBlockHelperAttributesVisibility(bool CapturesNonExternalType,
} else {
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
Fn->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
}
}
@@ -1945,21 +1948,13 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
if (CGM.supportsCOMDAT())
Fn->setComdat(CGM.getModule().getOrInsertComdat(FuncName));
- IdentifierInfo *II = &C.Idents.get(FuncName);
-
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(C.VoidPtrTy);
ArgTys.push_back(C.VoidPtrTy);
- QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
- FunctionDecl *FD = FunctionDecl::Create(
- C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FunctionTy, nullptr, SC_Static, false, false);
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
- // This is necessary to avoid inheriting the previous line number.
- FD->setImplicit();
- StartFunction(FD, ReturnTy, Fn, FI, args);
+ StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
auto AL = ApplyDebugLocation::CreateArtificial(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -2140,21 +2135,12 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
if (CGM.supportsCOMDAT())
Fn->setComdat(CGM.getModule().getOrInsertComdat(FuncName));
- IdentifierInfo *II = &C.Idents.get(FuncName);
-
SmallVector<QualType, 1> ArgTys;
ArgTys.push_back(C.VoidPtrTy);
- QualType FunctionTy = C.getFunctionType(ReturnTy, ArgTys, {});
-
- FunctionDecl *FD = FunctionDecl::Create(
- C, C.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FunctionTy, nullptr, SC_Static, false, false);
setBlockHelperAttributesVisibility(blockInfo.CapturesNonExternalType, Fn, FI,
CGM);
- // This is necessary to avoid inheriting the previous line number.
- FD->setImplicit();
- StartFunction(FD, ReturnTy, Fn, FI, args);
+ StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
markAsIgnoreThreadCheckingAtRuntime(Fn);
auto AL = ApplyDebugLocation::CreateArtificial(*this);
@@ -2392,21 +2378,15 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__Block_byref_object_copy_", &CGF.CGM.getModule());
- IdentifierInfo *II
- = &Context.Idents.get("__Block_byref_object_copy_");
-
SmallVector<QualType, 2> ArgTys;
ArgTys.push_back(Context.VoidPtrTy);
ArgTys.push_back(Context.VoidPtrTy);
- QualType FunctionTy = Context.getFunctionType(ReturnTy, ArgTys, {});
-
- FunctionDecl *FD = FunctionDecl::Create(
- Context, Context.getTranslationUnitDecl(), SourceLocation(),
- SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- CGF.StartFunction(FD, ReturnTy, Fn, FI, args);
+ CGF.StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
if (generator.needsCopy()) {
llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
@@ -2468,20 +2448,14 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
"__Block_byref_object_dispose_",
&CGF.CGM.getModule());
- IdentifierInfo *II
- = &Context.Idents.get("__Block_byref_object_dispose_");
-
SmallVector<QualType, 1> ArgTys;
ArgTys.push_back(Context.VoidPtrTy);
- QualType FunctionTy = Context.getFunctionType(R, ArgTys, {});
-
- FunctionDecl *FD = FunctionDecl::Create(
- Context, Context.getTranslationUnitDecl(), SourceLocation(),
- SourceLocation(), II, FunctionTy, nullptr, SC_Static, false, false);
CGF.CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
- CGF.StartFunction(FD, R, Fn, FI, args);
+ CGF.StartFunction(GlobalDecl(), R, Fn, FI, args);
+ // Create a scope with an artificial location for the body of this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
if (generator.needsDispose()) {
Address addr = CGF.GetAddrOfLocalVar(&Src);
@@ -2884,7 +2858,7 @@ static void configureBlocksRuntimeObject(CodeGenModule &CGM,
"expected Function or GlobalVariable");
const NamedDecl *ND = nullptr;
- for (const auto &Result : DC->lookup(&II))
+ for (const auto *Result : DC->lookup(&II))
if ((ND = dyn_cast<FunctionDecl>(Result)) ||
(ND = dyn_cast<VarDecl>(Result)))
break;
@@ -2936,9 +2910,8 @@ llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
if (NSConcreteGlobalBlock)
return NSConcreteGlobalBlock;
- NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock",
- Int8PtrTy->getPointerTo(),
- nullptr);
+ NSConcreteGlobalBlock =
+ GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock", Int8PtrTy, 0, nullptr);
configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
return NSConcreteGlobalBlock;
}
@@ -2947,9 +2920,8 @@ llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
if (NSConcreteStackBlock)
return NSConcreteStackBlock;
- NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock",
- Int8PtrTy->getPointerTo(),
- nullptr);
+ NSConcreteStackBlock =
+ GetOrCreateLLVMGlobal("_NSConcreteStackBlock", Int8PtrTy, 0, nullptr);
configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
return NSConcreteStackBlock;
}
diff --git a/clang/lib/CodeGen/CGBuilder.h b/clang/lib/CodeGen/CGBuilder.h
index 38e96c0f4ee6..4fad44a105cd 100644
--- a/clang/lib/CodeGen/CGBuilder.h
+++ b/clang/lib/CodeGen/CGBuilder.h
@@ -66,35 +66,28 @@ public:
// Note that we intentionally hide the CreateLoad APIs that don't
// take an alignment.
llvm::LoadInst *CreateLoad(Address Addr, const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, const char *Name) {
// This overload is required to prevent string literals from
// ending up in the IsVolatile overload.
- return CreateAlignedLoad(Addr.getPointer(),
+ return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
Addr.getAlignment().getAsAlign(), Name);
}
llvm::LoadInst *CreateLoad(Address Addr, bool IsVolatile,
const llvm::Twine &Name = "") {
- return CreateAlignedLoad(
- Addr.getPointer(), Addr.getAlignment().getAsAlign(), IsVolatile, Name);
+ return CreateAlignedLoad(Addr.getElementType(), Addr.getPointer(),
+ Addr.getAlignment().getAsAlign(), IsVolatile,
+ Name);
}
using CGBuilderBaseTy::CreateAlignedLoad;
- llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
- const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
- }
- llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
- const char *Name) {
- return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
- }
llvm::LoadInst *CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr,
CharUnits Align,
const llvm::Twine &Name = "") {
assert(Addr->getType()->getPointerElementType() == Ty);
- return CreateAlignedLoad(Addr, Align.getAsAlign(), Name);
+ return CreateAlignedLoad(Ty, Addr, Align.getAsAlign(), Name);
}
// Note that we intentionally hide the CreateStore APIs that don't
@@ -132,6 +125,28 @@ public:
return CreateAlignedStore(getInt1(Value), Addr, CharUnits::One());
}
+ // Temporarily use old signature; clang will be updated to an Address overload
+ // in a subsequent patch.
+ llvm::AtomicCmpXchgInst *
+ CreateAtomicCmpXchg(llvm::Value *Ptr, llvm::Value *Cmp, llvm::Value *New,
+ llvm::AtomicOrdering SuccessOrdering,
+ llvm::AtomicOrdering FailureOrdering,
+ llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
+ return CGBuilderBaseTy::CreateAtomicCmpXchg(
+ Ptr, Cmp, New, llvm::MaybeAlign(), SuccessOrdering, FailureOrdering,
+ SSID);
+ }
+
+ // Temporarily use old signature; clang will be updated to an Address overload
+ // in a subsequent patch.
+ llvm::AtomicRMWInst *
+ CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, llvm::Value *Ptr,
+ llvm::Value *Val, llvm::AtomicOrdering Ordering,
+ llvm::SyncScope::ID SSID = llvm::SyncScope::System) {
+ return CGBuilderBaseTy::CreateAtomicRMW(Op, Ptr, Val, llvm::MaybeAlign(),
+ Ordering, SSID);
+ }
+
using CGBuilderBaseTy::CreateBitCast;
Address CreateBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
@@ -198,7 +213,7 @@ public:
CharUnits::fromQuantity(DL.getTypeAllocSize(ElTy->getElementType()));
return Address(
- CreateInBoundsGEP(Addr.getPointer(),
+ CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
{getSize(CharUnits::Zero()), getSize(Index)}, Name),
Addr.getAlignment().alignmentAtOffset(Index * EltSize));
}
@@ -239,13 +254,15 @@ public:
Address CreateConstInBoundsByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateInBoundsGEP(Addr.getPointer(), getSize(Offset), Name),
+ return Address(CreateInBoundsGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Offset), Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
Address CreateConstByteGEP(Address Addr, CharUnits Offset,
const llvm::Twine &Name = "") {
assert(Addr.getElementType() == TypeCache.Int8Ty);
- return Address(CreateGEP(Addr.getPointer(), getSize(Offset), Name),
+ return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(),
+ getSize(Offset), Name),
Addr.getAlignment().alignmentAtOffset(Offset));
}
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 113541bd5024..d9b2a5fe16be 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
@@ -26,6 +27,8 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -40,6 +43,7 @@
#include "llvm/IR/IntrinsicsNVPTX.h"
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/IntrinsicsR600.h"
+#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
@@ -411,7 +415,7 @@ static Value *EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E) {
llvm::Type *ITy =
llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
Ptr = CGF.Builder.CreateBitCast(Ptr, ITy->getPointerTo());
- llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(Ptr, LoadSize);
+ llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
Load->setVolatile(true);
return Load;
}
@@ -990,6 +994,54 @@ static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
}
+static llvm::Value *emitPPCLoadReserveIntrinsic(CodeGenFunction &CGF,
+ unsigned BuiltinID,
+ const CallExpr *E) {
+ Value *Addr = CGF.EmitScalarExpr(E->getArg(0));
+
+ SmallString<64> Asm;
+ raw_svector_ostream AsmOS(Asm);
+ llvm::IntegerType *RetType = CGF.Int32Ty;
+
+ switch (BuiltinID) {
+ case clang::PPC::BI__builtin_ppc_ldarx:
+ AsmOS << "ldarx ";
+ RetType = CGF.Int64Ty;
+ break;
+ case clang::PPC::BI__builtin_ppc_lwarx:
+ AsmOS << "lwarx ";
+ RetType = CGF.Int32Ty;
+ break;
+ case clang::PPC::BI__builtin_ppc_lharx:
+ AsmOS << "lharx ";
+ RetType = CGF.Int16Ty;
+ break;
+ case clang::PPC::BI__builtin_ppc_lbarx:
+ AsmOS << "lbarx ";
+ RetType = CGF.Int8Ty;
+ break;
+ default:
+ llvm_unreachable("Expected only PowerPC load reserve intrinsics");
+ }
+
+ AsmOS << "$0, ${1:y}";
+
+ std::string Constraints = "=r,*Z,~{memory}";
+ std::string MachineClobbers = CGF.getTarget().getClobbers();
+ if (!MachineClobbers.empty()) {
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ llvm::Type *IntPtrType = RetType->getPointerTo();
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(RetType, {IntPtrType}, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
+ return CGF.Builder.CreateCall(IA, {Addr});
+}
+
namespace {
enum class MSVCSetJmpKind {
_setjmpex,
@@ -1677,7 +1729,6 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
}
QualType ReturnTy = Ctx.VoidTy;
- QualType FuncionTy = Ctx.getFunctionType(ReturnTy, ArgTys, {});
// The helper function has linkonce_odr linkage to enable the linker to merge
// identical functions. To ensure the merging always happens, 'noinline' is
@@ -1688,7 +1739,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
llvm::Function *Fn = llvm::Function::Create(
FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
Fn->setDoesNotThrow();
@@ -1697,14 +1748,7 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Fn->addFnAttr(llvm::Attribute::NoInline);
auto NL = ApplyDebugLocation::CreateEmpty(*this);
- IdentifierInfo *II = &Ctx.Idents.get(Name);
- FunctionDecl *FD = FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(), II,
- FuncionTy, nullptr, SC_PrivateExtern, false, false);
- // Avoid generating debug location info for the function.
- FD->setImplicit();
-
- StartFunction(FD, ReturnTy, Fn, FI, Args);
+ StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
// Create a scope with an artificial location for the body of this function.
auto AL = ApplyDebugLocation::CreateArtificial(*this);
@@ -2829,6 +2873,36 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
}
+ case Builtin::BI__arithmetic_fence: {
+ // Create the builtin call if FastMath is selected, and the target
+ // supports the builtin, otherwise just return the argument.
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ llvm::FastMathFlags FMF = Builder.getFastMathFlags();
+ bool isArithmeticFenceEnabled =
+ FMF.allowReassoc() &&
+ getContext().getTargetInfo().checkArithmeticFenceSupported();
+ QualType ArgType = E->getArg(0)->getType();
+ if (ArgType->isComplexType()) {
+ if (isArithmeticFenceEnabled) {
+ QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
+ ConvertType(ElementType));
+ Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
+ ConvertType(ElementType));
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
+ Value *Real = ComplexVal.first;
+ Value *Imag = ComplexVal.second;
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ if (isArithmeticFenceEnabled)
+ return RValue::get(
+ Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
+ return RValue::get(ArgValue);
+ }
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -2942,10 +3016,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_powi:
case Builtin::BI__builtin_powif:
- case Builtin::BI__builtin_powil:
- return RValue::get(emitBinaryMaybeConstrainedFPBuiltin(
- *this, E, Intrinsic::powi, Intrinsic::experimental_constrained_powi));
+ case Builtin::BI__builtin_powil: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+
+ if (Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
+ Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
+ Src0->getType());
+ return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
+ }
+ Function *F = CGM.getIntrinsic(Intrinsic::powi,
+ { Src0->getType(), Src1->getType() });
+ return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
+ }
case Builtin::BI__builtin_isgreater:
case Builtin::BI__builtin_isgreaterequal:
case Builtin::BI__builtin_isless:
@@ -2985,10 +3070,35 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_isnan: {
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
- V = Builder.CreateFCmpUNO(V, V, "cmp");
- return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ llvm::Type *Ty = V->getType();
+ const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
+ if (!Builder.getIsFPConstrained() ||
+ Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
+ !Ty->isIEEE()) {
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
+ return RValue::get(Result);
+
+ // NaN has all exp bits set and a non zero significand. Therefore:
+ // isnan(V) == ((exp mask - (abs(V) & exp mask)) < 0)
+ unsigned bitsize = Ty->getScalarSizeInBits();
+ llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
+ Value *IntV = Builder.CreateBitCast(V, IntTy);
+ APInt AndMask = APInt::getSignedMaxValue(bitsize);
+ Value *AbsV =
+ Builder.CreateAnd(IntV, llvm::ConstantInt::get(IntTy, AndMask));
+ APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
+ Value *Sub =
+ Builder.CreateSub(llvm::ConstantInt::get(IntTy, ExpMask), AbsV);
+ // V = sign bit (Sub) <=> V = (Sub < 0)
+ V = Builder.CreateLShr(Sub, llvm::ConstantInt::get(IntTy, bitsize - 1));
+ if (bitsize > 32)
+ V = Builder.CreateTrunc(V, ConvertType(E->getType()));
+ return RValue::get(V);
}
case Builtin::BI__builtin_matrix_transpose: {
@@ -3050,15 +3160,38 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// isfinite(x) --> fabs(x) != infinity
// x != NaN via the ordered compare in either case.
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
- // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
Value *V = EmitScalarExpr(E->getArg(0));
- Value *Fabs = EmitFAbs(*this, V);
- Constant *Infinity = ConstantFP::getInfinity(V->getType());
- CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
- ? CmpInst::FCMP_OEQ
- : CmpInst::FCMP_ONE;
- Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
- return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
+ llvm::Type *Ty = V->getType();
+ if (!Builder.getIsFPConstrained() ||
+ Builder.getDefaultConstrainedExcept() == fp::ebIgnore ||
+ !Ty->isIEEE()) {
+ Value *Fabs = EmitFAbs(*this, V);
+ Constant *Infinity = ConstantFP::getInfinity(V->getType());
+ CmpInst::Predicate Pred = (BuiltinID == Builtin::BI__builtin_isinf)
+ ? CmpInst::FCMP_OEQ
+ : CmpInst::FCMP_ONE;
+ Value *FCmp = Builder.CreateFCmp(Pred, Fabs, Infinity, "cmpinf");
+ return RValue::get(Builder.CreateZExt(FCmp, ConvertType(E->getType())));
+ }
+
+ if (Value *Result = getTargetHooks().testFPKind(V, BuiltinID, Builder, CGM))
+ return RValue::get(Result);
+
+ // Inf values have all exp bits set and a zero significand. Therefore:
+ // isinf(V) == ((V << 1) == ((exp mask) << 1))
+ // isfinite(V) == ((V << 1) < ((exp mask) << 1)) using unsigned comparison
+ unsigned bitsize = Ty->getScalarSizeInBits();
+ llvm::IntegerType *IntTy = Builder.getIntNTy(bitsize);
+ Value *IntV = Builder.CreateBitCast(V, IntTy);
+ Value *Shl1 = Builder.CreateShl(IntV, 1);
+ const llvm::fltSemantics &Semantics = Ty->getFltSemantics();
+ APInt ExpMask = APFloat::getInf(Semantics).bitcastToAPInt();
+ Value *ExpMaskShl1 = llvm::ConstantInt::get(IntTy, ExpMask.shl(1));
+ if (BuiltinID == Builtin::BI__builtin_isinf)
+ V = Builder.CreateICmpEQ(Shl1, ExpMaskShl1);
+ else
+ V = Builder.CreateICmpULT(Shl1, ExpMaskShl1);
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
case Builtin::BI__builtin_isinf_sign: {
@@ -3223,7 +3356,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemCpy(Dest, Src, SizeVal, false);
if (BuiltinID == Builtin::BImempcpy ||
BuiltinID == Builtin::BI__builtin_mempcpy)
- return RValue::get(Builder.CreateInBoundsGEP(Dest.getPointer(), SizeVal));
+ return RValue::get(Builder.CreateInBoundsGEP(Dest.getElementType(),
+ Dest.getPointer(), SizeVal));
else
return RValue::get(Dest.getPointer());
}
@@ -3328,6 +3462,52 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_wmemchr: {
+ // The MSVC runtime library does not provide a definition of wmemchr, so we
+ // need an inline implementation.
+ if (!getTarget().getTriple().isOSMSVCRT())
+ break;
+
+ llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
+ Value *Str = EmitScalarExpr(E->getArg(0));
+ Value *Chr = EmitScalarExpr(E->getArg(1));
+ Value *Size = EmitScalarExpr(E->getArg(2));
+
+ BasicBlock *Entry = Builder.GetInsertBlock();
+ BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
+ BasicBlock *Next = createBasicBlock("wmemchr.next");
+ BasicBlock *Exit = createBasicBlock("wmemchr.exit");
+ Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
+ Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
+
+ EmitBlock(CmpEq);
+ PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
+ StrPhi->addIncoming(Str, Entry);
+ PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
+ SizePhi->addIncoming(Size, Entry);
+ CharUnits WCharAlign =
+ getContext().getTypeAlignInChars(getContext().WCharTy);
+ Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
+ Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
+ Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
+ Builder.CreateCondBr(StrEqChr, Exit, Next);
+
+ EmitBlock(Next);
+ Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
+ Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
+ Value *NextSizeEq0 =
+ Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
+ Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
+ StrPhi->addIncoming(NextStr, Next);
+ SizePhi->addIncoming(NextSize, Next);
+
+ EmitBlock(Exit);
+ PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
+ Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
+ Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
+ Ret->addIncoming(FoundChr, CmpEq);
+ return RValue::get(Ret);
+ }
case Builtin::BI__builtin_wmemcmp: {
// The MSVC runtime library does not provide a definition of wmemcmp, so we
// need an inline implementation.
@@ -4654,7 +4834,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
auto *Zero = llvm::ConstantInt::get(IntTy, 0);
for (unsigned I = First; I < NumArgs; ++I) {
auto *Index = llvm::ConstantInt::get(IntTy, I - First);
- auto *GEP = Builder.CreateGEP(TmpPtr, {Zero, Index});
+ auto *GEP = Builder.CreateGEP(Tmp.getElementType(), TmpPtr,
+ {Zero, Index});
if (I == First)
ElemPtr = GEP;
auto *V =
@@ -4959,6 +5140,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
}
+
+ case Builtin::BI__builtin_get_device_side_mangled_name: {
+ auto Name = CGM.getCUDARuntime().getDeviceSideName(
+ cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
+ auto Str = CGM.GetAddrOfConstantCString(Name, "");
+ llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
+ llvm::ConstantInt::get(SizeTy, 0)};
+ auto *Ptr = llvm::ConstantExpr::getGetElementPtr(Str.getElementType(),
+ Str.getPointer(), Zeros);
+ return RValue::get(Ptr);
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -5138,6 +5330,9 @@ static Value *EmitTargetArchBuiltinExpr(CodeGenFunction *CGF,
return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
case llvm::Triple::hexagon:
return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
default:
return nullptr;
}
@@ -5344,7 +5539,9 @@ static const ARMVectorIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
NEONMAP1(vabs_v, arm_neon_vabs, 0),
NEONMAP1(vabsq_v, arm_neon_vabs, 0),
+ NEONMAP0(vadd_v),
NEONMAP0(vaddhn_v),
+ NEONMAP0(vaddq_v),
NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
NEONMAP1(vaeseq_v, arm_neon_aese, 0),
NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
@@ -5638,11 +5835,15 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(splatq_laneq_v),
NEONMAP1(vabs_v, aarch64_neon_abs, 0),
NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
+ NEONMAP0(vadd_v),
NEONMAP0(vaddhn_v),
+ NEONMAP0(vaddq_p128),
+ NEONMAP0(vaddq_v),
NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
+ NEONMAP2(vbcaxq_v, aarch64_crypto_bcaxu, aarch64_crypto_bcaxs, Add1ArgType | UnsignedAlts),
NEONMAP1(vbfdot_v, aarch64_neon_bfdot, 0),
NEONMAP1(vbfdotq_v, aarch64_neon_bfdot, 0),
NEONMAP1(vbfmlalbq_v, aarch64_neon_bfmlalb, 0),
@@ -5712,6 +5913,7 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
NEONMAP2(vdot_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
NEONMAP2(vdotq_v, aarch64_neon_udot, aarch64_neon_sdot, 0),
+ NEONMAP2(veor3q_v, aarch64_crypto_eor3u, aarch64_crypto_eor3s, Add1ArgType | UnsignedAlts),
NEONMAP0(vext_v),
NEONMAP0(vextq_v),
NEONMAP0(vfma_v),
@@ -5777,12 +5979,21 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
+ NEONMAP1(vrax1q_v, aarch64_crypto_rax1, 0),
NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd32x_v, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32xq_v, aarch64_neon_frint32x, Add1ArgType),
+ NEONMAP1(vrnd32z_v, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd32zq_v, aarch64_neon_frint32z, Add1ArgType),
+ NEONMAP1(vrnd64x_v, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64xq_v, aarch64_neon_frint64x, Add1ArgType),
+ NEONMAP1(vrnd64z_v, aarch64_neon_frint64z, Add1ArgType),
+ NEONMAP1(vrnd64zq_v, aarch64_neon_frint64z, Add1ArgType),
NEONMAP0(vrndi_v),
NEONMAP0(vrndiq_v),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
@@ -5800,6 +6011,10 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
+ NEONMAP1(vsha512h2q_v, aarch64_crypto_sha512h2, 0),
+ NEONMAP1(vsha512hq_v, aarch64_crypto_sha512h, 0),
+ NEONMAP1(vsha512su0q_v, aarch64_crypto_sha512su0, 0),
+ NEONMAP1(vsha512su1q_v, aarch64_crypto_sha512su1, 0),
NEONMAP0(vshl_n_v),
NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
NEONMAP0(vshll_n_v),
@@ -5808,6 +6023,15 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP0(vshr_n_v),
NEONMAP0(vshrn_n_v),
NEONMAP0(vshrq_n_v),
+ NEONMAP1(vsm3partw1q_v, aarch64_crypto_sm3partw1, 0),
+ NEONMAP1(vsm3partw2q_v, aarch64_crypto_sm3partw2, 0),
+ NEONMAP1(vsm3ss1q_v, aarch64_crypto_sm3ss1, 0),
+ NEONMAP1(vsm3tt1aq_v, aarch64_crypto_sm3tt1a, 0),
+ NEONMAP1(vsm3tt1bq_v, aarch64_crypto_sm3tt1b, 0),
+ NEONMAP1(vsm3tt2aq_v, aarch64_crypto_sm3tt2a, 0),
+ NEONMAP1(vsm3tt2bq_v, aarch64_crypto_sm3tt2b, 0),
+ NEONMAP1(vsm4ekeyq_v, aarch64_crypto_sm4ekey, 0),
+ NEONMAP1(vsm4eq_v, aarch64_crypto_sm4e, 0),
NEONMAP1(vst1_x2_v, aarch64_neon_st1x2, 0),
NEONMAP1(vst1_x3_v, aarch64_neon_st1x3, 0),
NEONMAP1(vst1_x4_v, aarch64_neon_st1x4, 0),
@@ -5820,6 +6044,7 @@ static const ARMVectorIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP1(vusdot_v, aarch64_neon_usdot, 0),
NEONMAP1(vusdotq_v, aarch64_neon_usdot, 0),
NEONMAP1(vusmmlaq_v, aarch64_neon_usmmla, 0),
+ NEONMAP1(vxarq_v, aarch64_crypto_xar, 0),
};
static const ARMVectorIntrinsicInfo AArch64SISDIntrinsicMap[] = {
@@ -6266,6 +6491,14 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
if (VTy->getElementType()->isFloatingPointTy())
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
+ case NEON::BI__builtin_neon_vadd_v:
+ case NEON::BI__builtin_neon_vaddq_v: {
+ llvm::Type *VTy = llvm::FixedVectorType::get(Int8Ty, Quad ? 16 : 8);
+ Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
+ return Builder.CreateBitCast(Ops[0], Ty);
+ }
case NEON::BI__builtin_neon_vaddhn_v: {
llvm::FixedVectorType *SrcTy =
llvm::FixedVectorType::getExtendedElementVectorType(VTy);
@@ -6638,6 +6871,13 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vrshrq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
1, true);
+ case NEON::BI__builtin_neon_vsha512hq_v:
+ case NEON::BI__builtin_neon_vsha512h2q_v:
+ case NEON::BI__builtin_neon_vsha512su0q_v:
+ case NEON::BI__builtin_neon_vsha512su1q_v: {
+ Function *F = CGM.getIntrinsic(Int);
+ return EmitNeonCall(F, Ops, "");
+ }
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
@@ -6686,6 +6926,22 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Ops.push_back(getAlignmentValue32(PtrOp0));
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
+ case NEON::BI__builtin_neon_vsm3partw1q_v:
+ case NEON::BI__builtin_neon_vsm3partw2q_v:
+ case NEON::BI__builtin_neon_vsm3ss1q_v:
+ case NEON::BI__builtin_neon_vsm4ekeyq_v:
+ case NEON::BI__builtin_neon_vsm4eq_v: {
+ Function *F = CGM.getIntrinsic(Int);
+ return EmitNeonCall(F, Ops, "");
+ }
+ case NEON::BI__builtin_neon_vsm3tt1aq_v:
+ case NEON::BI__builtin_neon_vsm3tt1bq_v:
+ case NEON::BI__builtin_neon_vsm3tt2aq_v:
+ case NEON::BI__builtin_neon_vsm3tt2bq_v: {
+ Function *F = CGM.getIntrinsic(Int);
+ Ops[3] = Builder.CreateZExt(Ops[3], Int64Ty);
+ return EmitNeonCall(F, Ops, "");
+ }
case NEON::BI__builtin_neon_vst1_x2_v:
case NEON::BI__builtin_neon_vst1q_x2_v:
case NEON::BI__builtin_neon_vst1_x3_v:
@@ -6767,6 +7023,11 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
}
return SV;
}
+ case NEON::BI__builtin_neon_vxarq_v: {
+ Function *F = CGM.getIntrinsic(Int);
+ Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+ return EmitNeonCall(F, Ops, "");
+ }
case NEON::BI__builtin_neon_vzip_v:
case NEON::BI__builtin_neon_vzipq_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
@@ -8579,7 +8840,7 @@ Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
bool IsZExtReturn) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
- LangPTy->getAs<PointerType>()->getPointeeType());
+ LangPTy->castAs<PointerType>()->getPointeeType());
// The vector type that is returned may be different from the
// eventual type loaded from memory.
@@ -8604,7 +8865,7 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
unsigned BuiltinID) {
QualType LangPTy = E->getArg(1)->getType();
llvm::Type *MemEltTy = CGM.getTypes().ConvertType(
- LangPTy->getAs<PointerType>()->getPointeeType());
+ LangPTy->castAs<PointerType>()->getPointeeType());
// The vector type that is stored may be different from the
// eventual type stored to memory.
@@ -8881,32 +9142,32 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
if (IsBoolTy)
EltTy = IntegerType::get(getLLVMContext(), SVEBitsPerBlock / NumOpnds);
- Address Alloca = CreateTempAlloca(llvm::ArrayType::get(EltTy, NumOpnds),
- CharUnits::fromQuantity(16));
+ SmallVector<llvm::Value *, 16> VecOps;
for (unsigned I = 0; I < NumOpnds; ++I)
- Builder.CreateDefaultAlignedStore(
- IsBoolTy ? Builder.CreateZExt(Ops[I], EltTy) : Ops[I],
- Builder.CreateGEP(Alloca.getPointer(),
- {Builder.getInt64(0), Builder.getInt64(I)}));
+ VecOps.push_back(Builder.CreateZExt(Ops[I], EltTy));
+ Value *Vec = BuildVector(VecOps);
SVETypeFlags TypeFlags(Builtin->TypeModifier);
Value *Pred = EmitSVEAllTruePred(TypeFlags);
llvm::Type *OverloadedTy = getSVEVectorForElementType(EltTy);
- Function *F = CGM.getIntrinsic(Intrinsic::aarch64_sve_ld1rq, OverloadedTy);
- Value *Alloca0 = Builder.CreateGEP(
- Alloca.getPointer(), {Builder.getInt64(0), Builder.getInt64(0)});
- Value *LD1RQ = Builder.CreateCall(F, {Pred, Alloca0});
+ Value *InsertSubVec = Builder.CreateInsertVector(
+ OverloadedTy, UndefValue::get(OverloadedTy), Vec, Builder.getInt64(0));
+
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::aarch64_sve_dupq_lane, OverloadedTy);
+ Value *DupQLane =
+ Builder.CreateCall(F, {InsertSubVec, Builder.getInt64(0)});
if (!IsBoolTy)
- return LD1RQ;
+ return DupQLane;
// For svdupq_n_b* we need to add an additional 'cmpne' with '0'.
F = CGM.getIntrinsic(NumOpnds == 2 ? Intrinsic::aarch64_sve_cmpne
: Intrinsic::aarch64_sve_cmpne_wide,
OverloadedTy);
- Value *Call =
- Builder.CreateCall(F, {Pred, LD1RQ, EmitSVEDupX(Builder.getInt64(0))});
+ Value *Call = Builder.CreateCall(
+ F, {Pred, DupQLane, EmitSVEDupX(Builder.getInt64(0))});
return EmitSVEPredicateCast(Call, cast<llvm::ScalableVectorType>(Ty));
}
@@ -9051,6 +9312,38 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
"cls");
}
+ if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
+ BuiltinID == AArch64::BI__builtin_arm_frint32z) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Ty = Arg->getType();
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
+ Arg, "frint32z");
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
+ BuiltinID == AArch64::BI__builtin_arm_frint64z) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Ty = Arg->getType();
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
+ Arg, "frint64z");
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
+ BuiltinID == AArch64::BI__builtin_arm_frint32x) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Ty = Arg->getType();
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
+ Arg, "frint32x");
+ }
+
+ if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
+ BuiltinID == AArch64::BI__builtin_arm_frint64x) {
+ llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *Ty = Arg->getType();
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
+ Arg, "frint64x");
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!");
@@ -9073,7 +9366,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Value *Val = Builder.CreateCall(F, MemAddr);
llvm::Value *ToRet;
for (size_t i = 0; i < 8; i++) {
- llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
+ llvm::Value *ValOffsetPtr =
+ Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
}
@@ -9084,7 +9378,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
SmallVector<llvm::Value *, 9> Args;
Args.push_back(MemAddr);
for (size_t i = 0; i < 8; i++) {
- llvm::Value *ValOffsetPtr = Builder.CreateGEP(ValPtr, Builder.getInt32(i));
+ llvm::Value *ValOffsetPtr =
+ Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
Args.push_back(Builder.CreateLoad(Addr));
}
@@ -9099,6 +9394,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
+ if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
+ BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
+
+ auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
+ ? Intrinsic::aarch64_rndr
+ : Intrinsic::aarch64_rndrrs);
+ Function *F = CGM.getIntrinsic(Intr);
+ llvm::Value *Val = Builder.CreateCall(F);
+ Value *RandomValue = Builder.CreateExtractValue(Val, 0);
+ Value *Status = Builder.CreateExtractValue(Val, 1);
+
+ Address MemAddress = EmitPointerWithAlignment(E->getArg(0));
+ Builder.CreateStore(RandomValue, MemAddress);
+ Status = Builder.CreateZExt(Status, Int32Ty);
+ return Status;
+ }
+
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
@@ -9212,7 +9524,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::APSInt Value = Result.Val.getInt();
LLVMContext &Context = CGM.getLLVMContext();
- std::string Reg = Value == 31 ? "sp" : "x" + Value.toString(10);
+ std::string Reg = Value == 31 ? "sp" : "x" + toString(Value, 10);
llvm::Metadata *Ops[] = {llvm::MDString::get(Context, Reg)};
llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
@@ -9491,6 +9803,15 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vabsh_f16:
Ops.push_back(EmitScalarExpr(E->getArg(0)));
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, HalfTy), Ops, "vabs");
+ case NEON::BI__builtin_neon_vaddq_p128: {
+ llvm::Type *Ty = GetNeonType(this, NeonTypeFlags::Poly128);
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateXor(Ops[0], Ops[1]);
+ llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
+ return Builder.CreateBitCast(Ops[0], Int128Ty);
+ }
case NEON::BI__builtin_neon_vldrq_p128: {
llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
@@ -10392,17 +10713,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vrndnh_f16: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::aarch64_neon_frintn;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_roundeven
+ : Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndn_v:
case NEON::BI__builtin_neon_vrndnq_v: {
- Int = Intrinsic::aarch64_neon_frintn;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_roundeven
+ : Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndns_f32: {
Ops.push_back(EmitScalarExpr(E->getArg(0)));
- Int = Intrinsic::aarch64_neon_frintn;
+ Int = Builder.getIsFPConstrained()
+ ? Intrinsic::experimental_constrained_roundeven
+ : Intrinsic::roundeven;
return EmitNeonCall(CGM.getIntrinsic(Int, FloatTy), Ops, "vrndn");
}
case NEON::BI__builtin_neon_vrndph_f16: {
@@ -10440,6 +10767,30 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
: Intrinsic::trunc;
return EmitNeonCall(CGM.getIntrinsic(Int, HalfTy), Ops, "vrndz");
}
+ case NEON::BI__builtin_neon_vrnd32x_v:
+ case NEON::BI__builtin_neon_vrnd32xq_v: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frint32x;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32x");
+ }
+ case NEON::BI__builtin_neon_vrnd32z_v:
+ case NEON::BI__builtin_neon_vrnd32zq_v: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frint32z;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd32z");
+ }
+ case NEON::BI__builtin_neon_vrnd64x_v:
+ case NEON::BI__builtin_neon_vrnd64xq_v: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frint64x;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64x");
+ }
+ case NEON::BI__builtin_neon_vrnd64z_v:
+ case NEON::BI__builtin_neon_vrnd64zq_v: {
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Int = Intrinsic::aarch64_neon_frint64z;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd64z");
+ }
case NEON::BI__builtin_neon_vrnd_v:
case NEON::BI__builtin_neon_vrndq_v: {
Int = Builder.getIsFPConstrained()
@@ -10610,7 +10961,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
case NEON::BI__builtin_neon_vrbit_v:
case NEON::BI__builtin_neon_vrbitq_v: {
- Int = Intrinsic::aarch64_neon_rbit;
+ Int = Intrinsic::bitreverse;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
}
case NEON::BI__builtin_neon_vaddv_u8:
@@ -11445,14 +11796,14 @@ static Value *EmitX86MaskedStore(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
static Value *EmitX86MaskedLoad(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
Align Alignment) {
// Cast the pointer to right type.
- Value *Ptr = CGF.Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()));
+ llvm::Type *Ty = Ops[1]->getType();
+ Value *Ptr =
+ CGF.Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
Value *MaskVec = getMaskVecValue(
- CGF, Ops[2],
- cast<llvm::FixedVectorType>(Ops[1]->getType())->getNumElements());
+ CGF, Ops[2], cast<llvm::FixedVectorType>(Ty)->getNumElements());
- return CGF.Builder.CreateMaskedLoad(Ptr, Alignment, MaskVec, Ops[1]);
+ return CGF.Builder.CreateMaskedLoad(Ty, Ptr, Alignment, MaskVec, Ops[1]);
}
static Value *EmitX86ExpandLoad(CodeGenFunction &CGF,
@@ -11664,7 +12015,7 @@ static Value *EmitX86ConvertToMask(CodeGenFunction &CGF, Value *In) {
return EmitX86MaskedCompare(CGF, 1, true, { In, Zero });
}
-static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
+static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF, const CallExpr *E,
ArrayRef<Value *> Ops, bool IsSigned) {
unsigned Rnd = cast<llvm::ConstantInt>(Ops[3])->getZExtValue();
llvm::Type *Ty = Ops[1]->getType();
@@ -11676,6 +12027,7 @@ static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
Function *F = CGF.CGM.getIntrinsic(IID, { Ty, Ops[0]->getType() });
Res = CGF.Builder.CreateCall(F, { Ops[0], Ops[3] });
} else {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Res = IsSigned ? CGF.Builder.CreateSIToFP(Ops[0], Ty)
: CGF.Builder.CreateUIToFP(Ops[0], Ty);
}
@@ -11684,8 +12036,9 @@ static Value *EmitX86ConvertIntToFp(CodeGenFunction &CGF,
}
// Lowers X86 FMA intrinsics to IR.
-static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
- unsigned BuiltinID, bool IsAddSub) {
+static Value *EmitX86FMAExpr(CodeGenFunction &CGF, const CallExpr *E,
+ ArrayRef<Value *> Ops, unsigned BuiltinID,
+ bool IsAddSub) {
bool Subtract = false;
Intrinsic::ID IID = Intrinsic::not_intrinsic;
@@ -11742,6 +12095,7 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
llvm::Type *Ty = A->getType();
Function *FMA;
if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
FMA = CGF.CGM.getIntrinsic(Intrinsic::experimental_constrained_fma, Ty);
Res = CGF.Builder.CreateConstrainedFPCall(FMA, {A, B, C});
} else {
@@ -11783,10 +12137,10 @@ static Value *EmitX86FMAExpr(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
return Res;
}
-static Value *
-EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
- Value *Upper, bool ZeroMask = false, unsigned PTIdx = 0,
- bool NegAcc = false) {
+static Value *EmitScalarFMAExpr(CodeGenFunction &CGF, const CallExpr *E,
+ MutableArrayRef<Value *> Ops, Value *Upper,
+ bool ZeroMask = false, unsigned PTIdx = 0,
+ bool NegAcc = false) {
unsigned Rnd = 4;
if (Ops.size() > 4)
Rnd = cast<llvm::ConstantInt>(Ops[4])->getZExtValue();
@@ -11805,6 +12159,7 @@ EmitScalarFMAExpr(CodeGenFunction &CGF, MutableArrayRef<Value *> Ops,
Res = CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IID),
{Ops[0], Ops[1], Ops[2], Ops[4]});
} else if (CGF.Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
Function *FMA = CGF.CGM.getIntrinsic(
Intrinsic::experimental_constrained_fma, Ops[0]->getType());
Res = CGF.Builder.CreateConstrainedFPCall(FMA, Ops.slice(0, 3));
@@ -11993,7 +12348,8 @@ Value *CodeGenFunction::EmitX86CpuIs(StringRef CPUStr) {
llvm::Value *Idxs[] = {ConstantInt::get(Int32Ty, 0),
ConstantInt::get(Int32Ty, Index)};
llvm::Value *CpuValue = Builder.CreateGEP(STy, CpuModel, Idxs);
- CpuValue = Builder.CreateAlignedLoad(CpuValue, CharUnits::fromQuantity(4));
+ CpuValue = Builder.CreateAlignedLoad(Int32Ty, CpuValue,
+ CharUnits::fromQuantity(4));
// Check the value of the field against the requested value.
return Builder.CreateICmpEQ(CpuValue,
@@ -12050,8 +12406,8 @@ llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
Value *Idxs[] = {Builder.getInt32(0), Builder.getInt32(3),
Builder.getInt32(0)};
Value *CpuFeatures = Builder.CreateGEP(STy, CpuModel, Idxs);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures, CharUnits::fromQuantity(4));
+ Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures,
+ CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Mask = Builder.getInt32(Features1);
@@ -12065,8 +12421,8 @@ llvm::Value *CodeGenFunction::EmitX86CpuSupports(uint64_t FeaturesMask) {
"__cpu_features2");
cast<llvm::GlobalValue>(CpuFeatures2)->setDSOLocal(true);
- Value *Features =
- Builder.CreateAlignedLoad(CpuFeatures2, CharUnits::fromQuantity(4));
+ Value *Features = Builder.CreateAlignedLoad(Int32Ty, CpuFeatures2,
+ CharUnits::fromQuantity(4));
// Check the value of the bit corresponding to the feature requested.
Value *Mask = Builder.getInt32(Features2);
@@ -12142,8 +12498,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// TODO: The builtins could be removed if the SSE header files used vector
// extension comparisons directly (vector ordered/unordered may need
// additional support via __builtin_isnan()).
- auto getVectorFCmpIR = [this, &Ops](CmpInst::Predicate Pred,
- bool IsSignaling) {
+ auto getVectorFCmpIR = [this, &Ops, E](CmpInst::Predicate Pred,
+ bool IsSignaling) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Value *Cmp;
if (IsSignaling)
Cmp = Builder.CreateFCmpS(Pred, Ops[0], Ops[1]);
@@ -12385,31 +12742,31 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_cvtdq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2ps512_mask:
case X86::BI__builtin_ia32_cvtqq2pd512_mask:
- return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/true);
+ return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ true);
case X86::BI__builtin_ia32_cvtudq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2ps512_mask:
case X86::BI__builtin_ia32_cvtuqq2pd512_mask:
- return EmitX86ConvertIntToFp(*this, Ops, /*IsSigned*/false);
+ return EmitX86ConvertIntToFp(*this, E, Ops, /*IsSigned*/ false);
case X86::BI__builtin_ia32_vfmaddss3:
case X86::BI__builtin_ia32_vfmaddsd3:
case X86::BI__builtin_ia32_vfmaddss3_mask:
case X86::BI__builtin_ia32_vfmaddsd3_mask:
- return EmitScalarFMAExpr(*this, Ops, Ops[0]);
+ return EmitScalarFMAExpr(*this, E, Ops, Ops[0]);
case X86::BI__builtin_ia32_vfmaddss:
case X86::BI__builtin_ia32_vfmaddsd:
- return EmitScalarFMAExpr(*this, Ops,
+ return EmitScalarFMAExpr(*this, E, Ops,
Constant::getNullValue(Ops[0]->getType()));
case X86::BI__builtin_ia32_vfmaddss3_maskz:
case X86::BI__builtin_ia32_vfmaddsd3_maskz:
- return EmitScalarFMAExpr(*this, Ops, Ops[0], /*ZeroMask*/true);
+ return EmitScalarFMAExpr(*this, E, Ops, Ops[0], /*ZeroMask*/ true);
case X86::BI__builtin_ia32_vfmaddss3_mask3:
case X86::BI__builtin_ia32_vfmaddsd3_mask3:
- return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2);
+ return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2);
case X86::BI__builtin_ia32_vfmsubss3_mask3:
case X86::BI__builtin_ia32_vfmsubsd3_mask3:
- return EmitScalarFMAExpr(*this, Ops, Ops[2], /*ZeroMask*/false, 2,
- /*NegAcc*/true);
+ return EmitScalarFMAExpr(*this, E, Ops, Ops[2], /*ZeroMask*/ false, 2,
+ /*NegAcc*/ true);
case X86::BI__builtin_ia32_vfmaddps:
case X86::BI__builtin_ia32_vfmaddpd:
case X86::BI__builtin_ia32_vfmaddps256:
@@ -12422,7 +12779,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddpd512_maskz:
case X86::BI__builtin_ia32_vfmaddpd512_mask3:
case X86::BI__builtin_ia32_vfmsubpd512_mask3:
- return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/false);
+ return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ false);
case X86::BI__builtin_ia32_vfmaddsubps512_mask:
case X86::BI__builtin_ia32_vfmaddsubps512_maskz:
case X86::BI__builtin_ia32_vfmaddsubps512_mask3:
@@ -12431,7 +12788,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vfmaddsubpd512_maskz:
case X86::BI__builtin_ia32_vfmaddsubpd512_mask3:
case X86::BI__builtin_ia32_vfmsubaddpd512_mask3:
- return EmitX86FMAExpr(*this, Ops, BuiltinID, /*IsAddSub*/true);
+ return EmitX86FMAExpr(*this, E, Ops, BuiltinID, /*IsAddSub*/ true);
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
@@ -13091,8 +13448,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue() & 0xff;
- // Mask the shift amount to width of two vectors.
- ShiftVal &= (2 * NumElts) - 1;
+ // Mask the shift amount to width of a vector.
+ ShiftVal &= NumElts - 1;
int Indices[16];
for (unsigned i = 0; i != NumElts; ++i)
@@ -13577,6 +13934,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *A = Builder.CreateExtractElement(Ops[0], (uint64_t)0);
Function *F;
if (Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
A->getType());
A = Builder.CreateConstrainedFPCall(F, {A});
@@ -13600,6 +13958,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *A = Builder.CreateExtractElement(Ops[1], (uint64_t)0);
Function *F;
if (Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
A->getType());
A = Builder.CreateConstrainedFPCall(F, A);
@@ -13629,6 +13988,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
}
if (Builder.getIsFPConstrained()) {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_sqrt,
Ops[0]->getType());
return Builder.CreateConstrainedFPCall(F, Ops[0]);
@@ -13794,14 +14154,30 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_reduce_fadd_ps512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Ops[1]->getType());
+ Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
case X86::BI__builtin_ia32_reduce_fmul_pd512:
case X86::BI__builtin_ia32_reduce_fmul_ps512: {
Function *F =
CGM.getIntrinsic(Intrinsic::vector_reduce_fmul, Ops[1]->getType());
+ Builder.getFastMathFlags().setAllowReassoc();
return Builder.CreateCall(F, {Ops[0], Ops[1]});
}
+ case X86::BI__builtin_ia32_reduce_fmax_pd512:
+ case X86::BI__builtin_ia32_reduce_fmax_ps512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_fmax, Ops[0]->getType());
+ Builder.getFastMathFlags().setNoNaNs();
+ return Builder.CreateCall(F, {Ops[0]});
+ }
+ case X86::BI__builtin_ia32_reduce_fmin_pd512:
+ case X86::BI__builtin_ia32_reduce_fmin_ps512: {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::vector_reduce_fmin, Ops[0]->getType());
+ Builder.getFastMathFlags().setNoNaNs();
+ return Builder.CreateCall(F, {Ops[0]});
+ }
case X86::BI__builtin_ia32_reduce_mul_d512:
case X86::BI__builtin_ia32_reduce_mul_q512: {
Function *F =
@@ -14173,6 +14549,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
if (IsMaskFCmp) {
// We ignore SAE if strict FP is disabled. We only keep precise
// exception behavior under strict FP.
+ // NOTE: If strict FP does ever go through here a CGFPOptionsRAII
+ // object will be required.
unsigned NumElts =
cast<llvm::FixedVectorType>(Ops[0]->getType())->getNumElements();
Value *Cmp;
@@ -14225,8 +14603,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vcvtph2ps256:
case X86::BI__builtin_ia32_vcvtph2ps_mask:
case X86::BI__builtin_ia32_vcvtph2ps256_mask:
- case X86::BI__builtin_ia32_vcvtph2ps512_mask:
+ case X86::BI__builtin_ia32_vcvtph2ps512_mask: {
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
return EmitX86CvtF16ToFloatExpr(*this, Ops, ConvertType(E->getType()));
+ }
// AVX512 bf16 intrinsics
case X86::BI__builtin_ia32_cvtneps2bf16_128_mask: {
@@ -14399,7 +14779,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (int i = 0; i < 6; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
- Value *Ptr = Builder.CreateConstGEP1_32(Ops[2], i * 16);
+ Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[2], i * 16);
Ptr = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
@@ -14415,7 +14795,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
for (int i = 0; i < 7; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
- Value *Ptr = Builder.CreateConstGEP1_32(Ops[3], i * 16);
+ Value *Ptr = Builder.CreateConstGEP1_32(Int8Ty, Ops[3], i * 16);
Ptr = Builder.CreateBitCast(
Ptr, llvm::PointerType::getUnqual(Extract->getType()));
Builder.CreateAlignedStore(Extract, Ptr, Align(1));
@@ -14428,27 +14808,50 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_aesenc256kl_u8:
case X86::BI__builtin_ia32_aesdec256kl_u8: {
Intrinsic::ID IID;
+ StringRef BlockName;
switch (BuiltinID) {
- default: llvm_unreachable("Unexpected builtin");
+ default:
+ llvm_unreachable("Unexpected builtin");
case X86::BI__builtin_ia32_aesenc128kl_u8:
IID = Intrinsic::x86_aesenc128kl;
+ BlockName = "aesenc128kl";
break;
case X86::BI__builtin_ia32_aesdec128kl_u8:
IID = Intrinsic::x86_aesdec128kl;
+ BlockName = "aesdec128kl";
break;
case X86::BI__builtin_ia32_aesenc256kl_u8:
IID = Intrinsic::x86_aesenc256kl;
+ BlockName = "aesenc256kl";
break;
case X86::BI__builtin_ia32_aesdec256kl_u8:
IID = Intrinsic::x86_aesdec256kl;
+ BlockName = "aesdec256kl";
break;
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), {Ops[1], Ops[2]});
- Builder.CreateDefaultAlignedStore(Builder.CreateExtractValue(Call, 1),
- Ops[0]);
+ BasicBlock *NoError =
+ createBasicBlock(BlockName + "_no_error", this->CurFn);
+ BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
+ BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
+
+ Value *Ret = Builder.CreateExtractValue(Call, 0);
+ Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
+ Value *Out = Builder.CreateExtractValue(Call, 1);
+ Builder.CreateCondBr(Succ, NoError, Error);
+ Builder.SetInsertPoint(NoError);
+ Builder.CreateDefaultAlignedStore(Out, Ops[0]);
+ Builder.CreateBr(End);
+
+ Builder.SetInsertPoint(Error);
+ Constant *Zero = llvm::Constant::getNullValue(Out->getType());
+ Builder.CreateDefaultAlignedStore(Zero, Ops[0]);
+ Builder.CreateBr(End);
+
+ Builder.SetInsertPoint(End);
return Builder.CreateExtractValue(Call, 0);
}
case X86::BI__builtin_ia32_aesencwide128kl_u8:
@@ -14456,36 +14859,63 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_aesencwide256kl_u8:
case X86::BI__builtin_ia32_aesdecwide256kl_u8: {
Intrinsic::ID IID;
+ StringRef BlockName;
switch (BuiltinID) {
case X86::BI__builtin_ia32_aesencwide128kl_u8:
IID = Intrinsic::x86_aesencwide128kl;
+ BlockName = "aesencwide128kl";
break;
case X86::BI__builtin_ia32_aesdecwide128kl_u8:
IID = Intrinsic::x86_aesdecwide128kl;
+ BlockName = "aesdecwide128kl";
break;
case X86::BI__builtin_ia32_aesencwide256kl_u8:
IID = Intrinsic::x86_aesencwide256kl;
+ BlockName = "aesencwide256kl";
break;
case X86::BI__builtin_ia32_aesdecwide256kl_u8:
IID = Intrinsic::x86_aesdecwide256kl;
+ BlockName = "aesdecwide256kl";
break;
}
+ llvm::Type *Ty = FixedVectorType::get(Builder.getInt64Ty(), 2);
Value *InOps[9];
InOps[0] = Ops[2];
for (int i = 0; i != 8; ++i) {
- Value *Ptr = Builder.CreateConstGEP1_32(Ops[1], i);
- InOps[i + 1] = Builder.CreateAlignedLoad(Ptr, Align(16));
+ Value *Ptr = Builder.CreateConstGEP1_32(Ty, Ops[1], i);
+ InOps[i + 1] = Builder.CreateAlignedLoad(Ty, Ptr, Align(16));
}
Value *Call = Builder.CreateCall(CGM.getIntrinsic(IID), InOps);
+ BasicBlock *NoError =
+ createBasicBlock(BlockName + "_no_error", this->CurFn);
+ BasicBlock *Error = createBasicBlock(BlockName + "_error", this->CurFn);
+ BasicBlock *End = createBasicBlock(BlockName + "_end", this->CurFn);
+
+ Value *Ret = Builder.CreateExtractValue(Call, 0);
+ Value *Succ = Builder.CreateTrunc(Ret, Builder.getInt1Ty());
+ Builder.CreateCondBr(Succ, NoError, Error);
+
+ Builder.SetInsertPoint(NoError);
for (int i = 0; i != 8; ++i) {
Value *Extract = Builder.CreateExtractValue(Call, i + 1);
- Value *Ptr = Builder.CreateConstGEP1_32(Ops[0], i);
+ Value *Ptr = Builder.CreateConstGEP1_32(Extract->getType(), Ops[0], i);
Builder.CreateAlignedStore(Extract, Ptr, Align(16));
}
+ Builder.CreateBr(End);
+ Builder.SetInsertPoint(Error);
+ for (int i = 0; i != 8; ++i) {
+ Value *Out = Builder.CreateExtractValue(Call, i + 1);
+ Constant *Zero = llvm::Constant::getNullValue(Out->getType());
+ Value *Ptr = Builder.CreateConstGEP1_32(Out->getType(), Ops[0], i);
+ Builder.CreateAlignedStore(Zero, Ptr, Align(16));
+ }
+ Builder.CreateBr(End);
+
+ Builder.SetInsertPoint(End);
return Builder.CreateExtractValue(Call, 0);
}
}
@@ -14528,7 +14958,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
}else {
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
Ops.pop_back();
}
@@ -14596,7 +15026,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
}else {
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
Ops.pop_back();
}
@@ -14639,6 +15069,143 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
+ case PPC::BI__builtin_vsx_ldrmb: {
+ // Essentially boils down to performing an unaligned VMX load sequence so
+ // as to avoid crossing a page boundary and then shuffling the elements
+ // into the right side of the vector register.
+ int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ llvm::Type *ResTy = ConvertType(E->getType());
+ bool IsLE = getTarget().isLittleEndian();
+
+ // If the user wants the entire vector, just load the entire vector.
+ if (NumBytes == 16) {
+ Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
+ Value *LD = Builder.CreateLoad(Address(BC, CharUnits::fromQuantity(1)));
+ if (!IsLE)
+ return LD;
+
+ // Reverse the bytes on LE.
+ SmallVector<int, 16> RevMask;
+ for (int Idx = 0; Idx < 16; Idx++)
+ RevMask.push_back(15 - Idx);
+ return Builder.CreateShuffleVector(LD, LD, RevMask);
+ }
+
+ llvm::Function *Lvx = CGM.getIntrinsic(Intrinsic::ppc_altivec_lvx);
+ llvm::Function *Lvs = CGM.getIntrinsic(IsLE ? Intrinsic::ppc_altivec_lvsr
+ : Intrinsic::ppc_altivec_lvsl);
+ llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
+ Value *HiMem = Builder.CreateGEP(
+ Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
+ Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
+ Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
+ Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
+
+ Ops.clear();
+ Ops.push_back(IsLE ? HiLd : LoLd);
+ Ops.push_back(IsLE ? LoLd : HiLd);
+ Ops.push_back(Mask1);
+ Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
+ Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
+
+ if (IsLE) {
+ SmallVector<int, 16> Consts;
+ for (int Idx = 0; Idx < 16; Idx++) {
+ int Val = (NumBytes - Idx - 1 >= 0) ? (NumBytes - Idx - 1)
+ : 16 - (NumBytes - Idx);
+ Consts.push_back(Val);
+ }
+ return Builder.CreateShuffleVector(Builder.CreateBitCast(AllElts, ResTy),
+ Zero, Consts);
+ }
+ SmallVector<Constant *, 16> Consts;
+ for (int Idx = 0; Idx < 16; Idx++)
+ Consts.push_back(Builder.getInt8(NumBytes + Idx));
+ Value *Mask2 = ConstantVector::get(Consts);
+ return Builder.CreateBitCast(
+ Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
+ }
+ case PPC::BI__builtin_vsx_strmb: {
+ int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ bool IsLE = getTarget().isLittleEndian();
+ auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
+ // Storing the whole vector, simply store it on BE and reverse bytes and
+ // store on LE.
+ if (Width == 16) {
+ Value *BC =
+ Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
+ Value *StVec = Ops[2];
+ if (IsLE) {
+ SmallVector<int, 16> RevMask;
+ for (int Idx = 0; Idx < 16; Idx++)
+ RevMask.push_back(15 - Idx);
+ StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
+ }
+ return Builder.CreateStore(StVec,
+ Address(BC, CharUnits::fromQuantity(1)));
+ }
+ auto *ConvTy = Int64Ty;
+ unsigned NumElts = 0;
+ switch (Width) {
+ default:
+ llvm_unreachable("width for stores must be a power of 2");
+ case 8:
+ ConvTy = Int64Ty;
+ NumElts = 2;
+ break;
+ case 4:
+ ConvTy = Int32Ty;
+ NumElts = 4;
+ break;
+ case 2:
+ ConvTy = Int16Ty;
+ NumElts = 8;
+ break;
+ case 1:
+ ConvTy = Int8Ty;
+ NumElts = 16;
+ break;
+ }
+ Value *Vec = Builder.CreateBitCast(
+ Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
+ Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
+ ConstantInt::get(Int64Ty, Offset));
+ Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
+ Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
+ if (IsLE && Width > 1) {
+ Function *F = CGM.getIntrinsic(Intrinsic::bswap, ConvTy);
+ Elt = Builder.CreateCall(F, Elt);
+ }
+ return Builder.CreateStore(Elt,
+ Address(PtrBC, CharUnits::fromQuantity(1)));
+ };
+ unsigned Stored = 0;
+ unsigned RemainingBytes = NumBytes;
+ Value *Result;
+ if (NumBytes == 16)
+ return StoreSubVec(16, 0, 0);
+ if (NumBytes >= 8) {
+ Result = StoreSubVec(8, NumBytes - 8, IsLE ? 0 : 1);
+ RemainingBytes -= 8;
+ Stored += 8;
+ }
+ if (RemainingBytes >= 4) {
+ Result = StoreSubVec(4, NumBytes - Stored - 4,
+ IsLE ? (Stored >> 2) : 3 - (Stored >> 2));
+ RemainingBytes -= 4;
+ Stored += 4;
+ }
+ if (RemainingBytes >= 2) {
+ Result = StoreSubVec(2, NumBytes - Stored - 2,
+ IsLE ? (Stored >> 1) : 7 - (Stored >> 1));
+ RemainingBytes -= 2;
+ Stored += 2;
+ }
+ if (RemainingBytes)
+ Result =
+ StoreSubVec(1, NumBytes - Stored - 1, IsLE ? Stored : 15 - Stored);
+ return Result;
+ }
// Square root
case PPC::BI__builtin_vsx_xvsqrtsp:
case PPC::BI__builtin_vsx_xvsqrtdp: {
@@ -14740,6 +15307,92 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
return Builder.CreateCall(F, X);
}
+ case PPC::BI__builtin_altivec_vadduqm:
+ case PPC::BI__builtin_altivec_vsubuqm: {
+ llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
+ Ops[0] =
+ Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
+ Ops[1] =
+ Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
+ if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
+ return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
+ else
+ return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
+ }
+ // Rotate and insert under mask operation.
+ // __rldimi(rs, is, shift, mask)
+ // (rotl64(rs, shift) & mask) | (is & ~mask)
+ // __rlwimi(rs, is, shift, mask)
+ // (rotl(rs, shift) & mask) | (is & ~mask)
+ case PPC::BI__builtin_ppc_rldimi:
+ case PPC::BI__builtin_ppc_rlwimi: {
+ llvm::Type *Ty = Ops[0]->getType();
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
+ if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
+ Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+ Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
+ Value *X = Builder.CreateAnd(Shift, Ops[3]);
+ Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
+ return Builder.CreateOr(X, Y);
+ }
+ // Rotate and insert under mask operation.
+ // __rlwnm(rs, shift, mask)
+ // rotl(rs, shift) & mask
+ case PPC::BI__builtin_ppc_rlwnm: {
+ llvm::Type *Ty = Ops[0]->getType();
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
+ Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
+ return Builder.CreateAnd(Shift, Ops[2]);
+ }
+ case PPC::BI__builtin_ppc_poppar4:
+ case PPC::BI__builtin_ppc_poppar8: {
+ llvm::Type *ArgType = Ops[0]->getType();
+ Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+ Value *Tmp = Builder.CreateCall(F, Ops[0]);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return Result;
+ }
+ case PPC::BI__builtin_ppc_cmpb: {
+ if (getTarget().getTriple().isPPC64()) {
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
+ return Builder.CreateCall(F, Ops, "cmpb");
+ }
+ // For 32 bit, emit the code as below:
+ // %conv = trunc i64 %a to i32
+ // %conv1 = trunc i64 %b to i32
+ // %shr = lshr i64 %a, 32
+ // %conv2 = trunc i64 %shr to i32
+ // %shr3 = lshr i64 %b, 32
+ // %conv4 = trunc i64 %shr3 to i32
+ // %0 = tail call i32 @llvm.ppc.cmpb32(i32 %conv, i32 %conv1)
+ // %conv5 = zext i32 %0 to i64
+ // %1 = tail call i32 @llvm.ppc.cmpb32(i32 %conv2, i32 %conv4)
+ // %conv614 = zext i32 %1 to i64
+ // %shl = shl nuw i64 %conv614, 32
+ // %or = or i64 %shl, %conv5
+ // ret i64 %or
+ Function *F =
+ CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
+ Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
+ Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
+ Value *ArgOneHi =
+ Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
+ Value *ArgTwoHi =
+ Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
+ Value *ResLo = Builder.CreateZExt(
+ Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
+ Value *ResHiShift = Builder.CreateZExt(
+ Builder.CreateCall(F, {ArgOneHi, ArgTwoHi}, "cmpb"), Int64Ty);
+ Value *ResHi = Builder.CreateShl(ResHiShift, ShiftAmt);
+ return Builder.CreateOr(ResLo, ResHi);
+ }
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
@@ -14802,6 +15455,47 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, X);
}
+ // Fastmath by default
+ case PPC::BI__builtin_ppc_recipdivf:
+ case PPC::BI__builtin_ppc_recipdivd:
+ case PPC::BI__builtin_ppc_rsqrtf:
+ case PPC::BI__builtin_ppc_rsqrtd: {
+ FastMathFlags FMF = Builder.getFastMathFlags();
+ Builder.getFastMathFlags().setFast();
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+
+ if (BuiltinID == PPC::BI__builtin_ppc_recipdivf ||
+ BuiltinID == PPC::BI__builtin_ppc_recipdivd) {
+ Value *Y = EmitScalarExpr(E->getArg(1));
+ Value *FDiv = Builder.CreateFDiv(X, Y, "recipdiv");
+ Builder.getFastMathFlags() &= (FMF);
+ return FDiv;
+ }
+ auto *One = ConstantFP::get(ResultType, 1.0);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sqrt, ResultType);
+ Value *FDiv = Builder.CreateFDiv(One, Builder.CreateCall(F, X), "rsqrt");
+ Builder.getFastMathFlags() &= (FMF);
+ return FDiv;
+ }
+ case PPC::BI__builtin_ppc_alignx: {
+ ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
+ if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
+ AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
+ llvm::Value::MaximumAlignment);
+
+ emitAlignmentAssumption(Ops[1], E->getArg(1),
+ /*The expr loc is sufficient.*/ SourceLocation(),
+ AlignmentCI, nullptr);
+ return Ops[1];
+ }
+ case PPC::BI__builtin_ppc_rdlam: {
+ llvm::Type *Ty = Ops[0]->getType();
+ Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
+ Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
+ Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
+ return Builder.CreateAnd(Rotate, Ops[2]);
+ }
// FMA variations
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
@@ -15013,13 +15707,20 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Unpacked, Index);
}
+ case PPC::BI__builtin_ppc_sthcx: {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
+ Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
+ return Builder.CreateCall(F, Ops);
+ }
+
// The PPC MMA builtins take a pointer to a __vector_quad as an argument.
// Some of the MMA instructions accumulate their result into an existing
// accumulator whereas the others generate a new accumulator. So we need to
// use custom code generation to expand a builtin call with a pointer to a
// load (if the corresponding instruction accumulates its result) followed by
// the call to the intrinsic and a store of the result.
-#define CUSTOM_BUILTIN(Name, Types, Accumulate) \
+#define CUSTOM_BUILTIN(Name, Intr, Types, Accumulate) \
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
{
@@ -15028,7 +15729,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// return values. So, here we emit code extracting these values from the
// intrinsic results and storing them using that pointer.
if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc ||
- BuiltinID == PPC::BI__builtin_vsx_disassemble_pair) {
+ BuiltinID == PPC::BI__builtin_vsx_disassemble_pair ||
+ BuiltinID == PPC::BI__builtin_mma_disassemble_pair) {
unsigned NumVecs = 2;
auto Intrinsic = Intrinsic::ppc_vsx_disassemble_pair;
if (BuiltinID == PPC::BI__builtin_mma_disassemble_acc) {
@@ -15044,28 +15746,31 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
for (unsigned i=0; i<NumVecs; i++) {
Value *Vec = Builder.CreateExtractValue(Call, i);
llvm::ConstantInt* Index = llvm::ConstantInt::get(IntTy, i);
- Value *GEP = Builder.CreateInBoundsGEP(Ptr, Index);
+ Value *GEP = Builder.CreateInBoundsGEP(VTy, Ptr, Index);
Builder.CreateAlignedStore(Vec, GEP, MaybeAlign(16));
}
return Call;
}
bool Accumulate;
switch (BuiltinID) {
- #define CUSTOM_BUILTIN(Name, Types, Acc) \
+ #define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
case PPC::BI__builtin_##Name: \
- ID = Intrinsic::ppc_##Name; \
+ ID = Intrinsic::ppc_##Intr; \
Accumulate = Acc; \
break;
#include "clang/Basic/BuiltinsPPC.def"
}
if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
- BuiltinID == PPC::BI__builtin_vsx_stxvp) {
- if (BuiltinID == PPC::BI__builtin_vsx_lxvp) {
+ BuiltinID == PPC::BI__builtin_vsx_stxvp ||
+ BuiltinID == PPC::BI__builtin_mma_lxvp ||
+ BuiltinID == PPC::BI__builtin_mma_stxvp) {
+ if (BuiltinID == PPC::BI__builtin_vsx_lxvp ||
+ BuiltinID == PPC::BI__builtin_mma_lxvp) {
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops[0] = Builder.CreateGEP(Int8Ty, Ops[1], Ops[0]);
} else {
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops[1] = Builder.CreateGEP(Int8Ty, Ops[2], Ops[1]);
}
Ops.pop_back();
llvm::Function *F = CGM.getIntrinsic(ID);
@@ -15083,6 +15788,125 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *Call = Builder.CreateCall(F, CallOps);
return Builder.CreateAlignedStore(Call, Ops[0], MaybeAlign(64));
}
+
+ case PPC::BI__builtin_ppc_compare_and_swap:
+ case PPC::BI__builtin_ppc_compare_and_swaplp: {
+ Address Addr = EmitPointerWithAlignment(E->getArg(0));
+ Address OldValAddr = EmitPointerWithAlignment(E->getArg(1));
+ Value *OldVal = Builder.CreateLoad(OldValAddr);
+ QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
+ LValue LV = MakeAddrLValue(Addr, AtomicTy);
+ auto Pair = EmitAtomicCompareExchange(
+ LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
+ llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
+ // Unlike c11's atomic_compare_exchange, accroding to
+ // https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
+ // > In either case, the contents of the memory location specified by addr
+ // > are copied into the memory location specified by old_val_addr.
+ // But it hasn't specified storing to OldValAddr is atomic or not and
+ // which order to use. Now following XL's codegen, treat it as a normal
+ // store.
+ Value *LoadedVal = Pair.first.getScalarVal();
+ Builder.CreateStore(LoadedVal, OldValAddr);
+ return Pair.second;
+ }
+ case PPC::BI__builtin_ppc_fetch_and_add:
+ case PPC::BI__builtin_ppc_fetch_and_addlp: {
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
+ llvm::AtomicOrdering::Monotonic);
+ }
+ case PPC::BI__builtin_ppc_fetch_and_and:
+ case PPC::BI__builtin_ppc_fetch_and_andlp: {
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
+ llvm::AtomicOrdering::Monotonic);
+ }
+
+ case PPC::BI__builtin_ppc_fetch_and_or:
+ case PPC::BI__builtin_ppc_fetch_and_orlp: {
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
+ llvm::AtomicOrdering::Monotonic);
+ }
+ case PPC::BI__builtin_ppc_fetch_and_swap:
+ case PPC::BI__builtin_ppc_fetch_and_swaplp: {
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
+ llvm::AtomicOrdering::Monotonic);
+ }
+ case PPC::BI__builtin_ppc_ldarx:
+ case PPC::BI__builtin_ppc_lwarx:
+ case PPC::BI__builtin_ppc_lharx:
+ case PPC::BI__builtin_ppc_lbarx:
+ return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
+ case PPC::BI__builtin_ppc_mfspr: {
+ llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
+ ? Int32Ty
+ : Int64Ty;
+ Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
+ return Builder.CreateCall(F, Ops);
+ }
+ case PPC::BI__builtin_ppc_mtspr: {
+ llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
+ ? Int32Ty
+ : Int64Ty;
+ Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
+ return Builder.CreateCall(F, Ops);
+ }
+ case PPC::BI__builtin_ppc_popcntb: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+ Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
+ return Builder.CreateCall(F, Ops, "popcntb");
+ }
+ case PPC::BI__builtin_ppc_mtfsf: {
+ // The builtin takes a uint32 that needs to be cast to an
+ // f64 to be passed to the intrinsic.
+ Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
+ return Builder.CreateCall(F, {Ops[0], Cast}, "");
+ }
+
+ case PPC::BI__builtin_ppc_swdiv_nochk:
+ case PPC::BI__builtin_ppc_swdivs_nochk: {
+ FastMathFlags FMF = Builder.getFastMathFlags();
+ Builder.getFastMathFlags().setFast();
+ Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
+ Builder.getFastMathFlags() &= (FMF);
+ return FDiv;
+ }
+ case PPC::BI__builtin_ppc_fric:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::rint,
+ Intrinsic::experimental_constrained_rint))
+ .getScalarVal();
+ case PPC::BI__builtin_ppc_frim:
+ case PPC::BI__builtin_ppc_frims:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::floor,
+ Intrinsic::experimental_constrained_floor))
+ .getScalarVal();
+ case PPC::BI__builtin_ppc_frin:
+ case PPC::BI__builtin_ppc_frins:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::round,
+ Intrinsic::experimental_constrained_round))
+ .getScalarVal();
+ case PPC::BI__builtin_ppc_frip:
+ case PPC::BI__builtin_ppc_frips:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::ceil,
+ Intrinsic::experimental_constrained_ceil))
+ .getScalarVal();
+ case PPC::BI__builtin_ppc_friz:
+ case PPC::BI__builtin_ppc_frizs:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::trunc,
+ Intrinsic::experimental_constrained_trunc))
+ .getScalarVal();
+ case PPC::BI__builtin_ppc_fsqrt:
+ case PPC::BI__builtin_ppc_fsqrts:
+ return RValue::get(emitUnaryMaybeConstrainedFPBuiltin(
+ *this, E, Intrinsic::sqrt,
+ Intrinsic::experimental_constrained_sqrt))
+ .getScalarVal();
}
}
@@ -15113,7 +15937,7 @@ Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
auto *DP = EmitAMDGPUDispatchPtr(CGF);
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
- auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
auto *DstTy =
CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
@@ -15133,7 +15957,7 @@ Value *EmitAMDGPUGridSize(CodeGenFunction &CGF, unsigned Index) {
auto *DP = EmitAMDGPUDispatchPtr(CGF);
// Indexing the HSA kernel_dispatch_packet struct.
auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 4);
- auto *GEP = CGF.Builder.CreateGEP(DP, Offset);
+ auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
auto *DstTy =
CGF.Int32Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
auto *Cast = CGF.Builder.CreateBitCast(GEP, DstTy);
@@ -15157,8 +15981,10 @@ bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
// Map C11/C++11 memory ordering to LLVM memory ordering
+ assert(llvm::isValidAtomicOrderingCABI(ord));
switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
case llvm::AtomicOrderingCABI::acquire:
+ case llvm::AtomicOrderingCABI::consume:
AO = llvm::AtomicOrdering::Acquire;
break;
case llvm::AtomicOrderingCABI::release:
@@ -15170,8 +15996,8 @@ bool CodeGenFunction::ProcessOrderScopeAMDGCN(Value *Order, Value *Scope,
case llvm::AtomicOrderingCABI::seq_cst:
AO = llvm::AtomicOrdering::SequentiallyConsistent;
break;
- case llvm::AtomicOrderingCABI::consume:
case llvm::AtomicOrderingCABI::relaxed:
+ AO = llvm::AtomicOrdering::Monotonic;
break;
}
@@ -15386,6 +16212,23 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CI->setConvergent();
return CI;
}
+ case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray:
+ case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_h:
+ case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_l:
+ case AMDGPU::BI__builtin_amdgcn_image_bvh_intersect_ray_lh: {
+ llvm::Value *NodePtr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *RayExtent = EmitScalarExpr(E->getArg(1));
+ llvm::Value *RayOrigin = EmitScalarExpr(E->getArg(2));
+ llvm::Value *RayDir = EmitScalarExpr(E->getArg(3));
+ llvm::Value *RayInverseDir = EmitScalarExpr(E->getArg(4));
+ llvm::Value *TextureDescr = EmitScalarExpr(E->getArg(5));
+
+ Function *F = CGM.getIntrinsic(Intrinsic::amdgcn_image_bvh_intersect_ray,
+ {NodePtr->getType(), RayDir->getType()});
+ return Builder.CreateCall(F, {NodePtr, RayExtent, RayOrigin, RayDir,
+ RayInverseDir, TextureDescr});
+ }
+
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
@@ -15968,6 +16811,34 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
case NVPTX::BI__bmma_m8n8k128_ld_c:
return MMA_LDST(2, m8n8k128_load_c_s32);
+ // Double MMA loads
+ case NVPTX::BI__dmma_m8n8k4_ld_a:
+ return MMA_LDST(1, m8n8k4_load_a_f64);
+ case NVPTX::BI__dmma_m8n8k4_ld_b:
+ return MMA_LDST(1, m8n8k4_load_b_f64);
+ case NVPTX::BI__dmma_m8n8k4_ld_c:
+ return MMA_LDST(2, m8n8k4_load_c_f64);
+
+ // Alternate float MMA loads
+ case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
+ return MMA_LDST(4, m16n16k16_load_a_bf16);
+ case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
+ return MMA_LDST(4, m16n16k16_load_b_bf16);
+ case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
+ return MMA_LDST(2, m8n32k16_load_a_bf16);
+ case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
+ return MMA_LDST(8, m8n32k16_load_b_bf16);
+ case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
+ return MMA_LDST(8, m32n8k16_load_a_bf16);
+ case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
+ return MMA_LDST(2, m32n8k16_load_b_bf16);
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
+ return MMA_LDST(4, m16n16k8_load_a_tf32);
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
+ return MMA_LDST(2, m16n16k8_load_b_tf32);
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
+ return MMA_LDST(8, m16n16k8_load_c_f32);
+
// NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
// PTX and LLVM IR where stores always use fragment D, NVCC builtins always
// use fragment C for both loads and stores.
@@ -15999,6 +16870,14 @@ static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
case NVPTX::BI__bmma_m8n8k128_st_c_i32:
return MMA_LDST(2, m8n8k128_store_d_s32);
+ // Double MMA store
+ case NVPTX::BI__dmma_m8n8k4_st_c_f64:
+ return MMA_LDST(2, m8n8k4_store_d_f64);
+
+ // Alternate float MMA store
+ case NVPTX::BI__mma_m16n16k8_st_c_f32:
+ return MMA_LDST(8, m16n16k8_store_d_f32);
+
default:
llvm_unreachable("Unknown MMA builtin");
}
@@ -16012,10 +16891,14 @@ struct NVPTXMmaInfo {
unsigned NumEltsB;
unsigned NumEltsC;
unsigned NumEltsD;
+
+ // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
+ // over 'col' for layout. The index of non-satf variants is expected to match
+ // the undocumented layout constants used by CUDA's mma.hpp.
std::array<unsigned, 8> Variants;
unsigned getMMAIntrinsic(int Layout, bool Satf) {
- unsigned Index = Layout * 2 + Satf;
+ unsigned Index = Layout + 4 * Satf;
if (Index >= Variants.size())
return 0;
return Variants[Index];
@@ -16026,95 +16909,121 @@ struct NVPTXMmaInfo {
// Layout and Satf, 0 otherwise.
static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
// clang-format off
-#define MMA_VARIANTS(geom, type) {{ \
+#define MMA_VARIANTS(geom, type) \
Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
+#define MMA_SATF_VARIANTS(geom, type) \
+ MMA_VARIANTS(geom, type), \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite \
- }}
+ Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
// Sub-integer MMA only supports row.col layout.
-#define MMA_VARIANTS_I4(geom, type) {{ \
- 0, \
+#define MMA_VARIANTS_I4(geom, type) \
0, \
Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
0, \
0, \
0, \
- 0 \
- }}
+ Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
+ 0, \
+ 0
// b1 MMA does not support .satfinite.
-#define MMA_VARIANTS_B1(geom, type) {{ \
+#define MMA_VARIANTS_B1_XOR(geom, type) \
0, \
+ Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
0, \
- Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
0, \
0, \
0, \
0, \
- 0 \
- }}
- // clang-format on
- switch (BuiltinID) {
- // FP MMA
- // Note that 'type' argument of MMA_VARIANT uses D_C notation, while
- // NumEltsN of return value are ordered as A,B,C,D.
- case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m16n16k16, f16_f16)};
- case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m16n16k16, f32_f16)};
- case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m16n16k16, f16_f32)};
- case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m16n16k16, f32_f32)};
- case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m32n8k16, f16_f16)};
- case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m32n8k16, f32_f16)};
- case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m32n8k16, f16_f32)};
- case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m32n8k16, f32_f32)};
- case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
- return {8, 8, 4, 4, MMA_VARIANTS(m8n32k16, f16_f16)};
- case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
- return {8, 8, 4, 8, MMA_VARIANTS(m8n32k16, f32_f16)};
- case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
- return {8, 8, 8, 4, MMA_VARIANTS(m8n32k16, f16_f32)};
- case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
- return {8, 8, 8, 8, MMA_VARIANTS(m8n32k16, f32_f32)};
-
- // Integer MMA
- case NVPTX::BI__imma_m16n16k16_mma_s8:
- return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, s8)};
- case NVPTX::BI__imma_m16n16k16_mma_u8:
- return {2, 2, 8, 8, MMA_VARIANTS(m16n16k16, u8)};
- case NVPTX::BI__imma_m32n8k16_mma_s8:
- return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, s8)};
- case NVPTX::BI__imma_m32n8k16_mma_u8:
- return {4, 1, 8, 8, MMA_VARIANTS(m32n8k16, u8)};
- case NVPTX::BI__imma_m8n32k16_mma_s8:
- return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, s8)};
- case NVPTX::BI__imma_m8n32k16_mma_u8:
- return {1, 4, 8, 8, MMA_VARIANTS(m8n32k16, u8)};
-
- // Sub-integer MMA
- case NVPTX::BI__imma_m8n8k32_mma_s4:
- return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, s4)};
- case NVPTX::BI__imma_m8n8k32_mma_u4:
- return {1, 1, 2, 2, MMA_VARIANTS_I4(m8n8k32, u4)};
- case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
- return {1, 1, 2, 2, MMA_VARIANTS_B1(m8n8k128, b1)};
- default:
- llvm_unreachable("Unexpected builtin ID.");
- }
+ 0
+#define MMA_VARIANTS_B1_AND(geom, type) \
+ 0, \
+ Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0, \
+ 0
+ // clang-format on
+ switch (BuiltinID) {
+ // FP MMA
+ // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
+ // NumEltsN of return value are ordered as A,B,C,D.
+ case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
+ return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
+ case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
+ return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
+ case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
+ return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
+ case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
+ return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
+ return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
+ return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
+ case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
+ return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
+ case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
+ return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
+ return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
+ return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
+ case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
+ return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
+ case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
+ return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
+
+ // Integer MMA
+ case NVPTX::BI__imma_m16n16k16_mma_s8:
+ return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
+ case NVPTX::BI__imma_m16n16k16_mma_u8:
+ return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
+ case NVPTX::BI__imma_m32n8k16_mma_s8:
+ return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
+ case NVPTX::BI__imma_m32n8k16_mma_u8:
+ return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
+ case NVPTX::BI__imma_m8n32k16_mma_s8:
+ return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
+ case NVPTX::BI__imma_m8n32k16_mma_u8:
+ return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
+
+ // Sub-integer MMA
+ case NVPTX::BI__imma_m8n8k32_mma_s4:
+ return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
+ case NVPTX::BI__imma_m8n8k32_mma_u4:
+ return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
+ case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
+ return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
+ case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
+ return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
+
+ // Double MMA
+ case NVPTX::BI__dmma_m8n8k4_mma_f64:
+ return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
+
+ // Alternate FP MMA
+ case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
+ return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
+ case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
+ return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
+ case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
+ return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
+ case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
+ return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
+ default:
+ llvm_unreachable("Unexpected builtin ID.");
+ }
#undef MMA_VARIANTS
+#undef MMA_SATF_VARIANTS
#undef MMA_VARIANTS_I4
-#undef MMA_VARIANTS_B1
+#undef MMA_VARIANTS_B1_AND
+#undef MMA_VARIANTS_B1_XOR
}
} // namespace
@@ -16410,7 +17319,20 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
case NVPTX::BI__bmma_m8n8k128_ld_c:
- {
+ // Double MMA loads.
+ case NVPTX::BI__dmma_m8n8k4_ld_a:
+ case NVPTX::BI__dmma_m8n8k4_ld_b:
+ case NVPTX::BI__dmma_m8n8k4_ld_c:
+ // Alternate float MMA loads.
+ case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
+ case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
+ case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
+ case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
+ case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
+ case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
+ case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Value *Src = EmitScalarExpr(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -16437,7 +17359,7 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
Dst.getElementType()),
- Builder.CreateGEP(Dst.getPointer(),
+ Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
}
@@ -16455,7 +17377,9 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__imma_m32n8k16_st_c_i32:
case NVPTX::BI__imma_m8n32k16_st_c_i32:
case NVPTX::BI__imma_m8n8k32_st_c_i32:
- case NVPTX::BI__bmma_m8n8k128_st_c_i32: {
+ case NVPTX::BI__bmma_m8n8k128_st_c_i32:
+ case NVPTX::BI__dmma_m8n8k4_st_c_f64:
+ case NVPTX::BI__mma_m16n16k8_st_c_f32: {
Value *Dst = EmitScalarExpr(E->getArg(0));
Address Src = EmitPointerWithAlignment(E->getArg(1));
Value *Ldm = EmitScalarExpr(E->getArg(2));
@@ -16474,7 +17398,9 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
SmallVector<Value *, 10> Values = {Dst};
for (unsigned i = 0; i < II.NumResults; ++i) {
Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(Src.getPointer(), llvm::ConstantInt::get(IntTy, i)),
+ Src.getElementType(),
+ Builder.CreateGEP(Src.getElementType(), Src.getPointer(),
+ llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, ParamType));
}
@@ -16505,7 +17431,13 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__imma_m8n32k16_mma_u8:
case NVPTX::BI__imma_m8n8k32_mma_s4:
case NVPTX::BI__imma_m8n8k32_mma_u4:
- case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1: {
+ case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
+ case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
+ case NVPTX::BI__dmma_m8n8k4_mma_f64:
+ case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
+ case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
+ case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
+ case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
Address Dst = EmitPointerWithAlignment(E->getArg(0));
Address SrcA = EmitPointerWithAlignment(E->getArg(1));
Address SrcB = EmitPointerWithAlignment(E->getArg(2));
@@ -16518,7 +17450,8 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
if (Layout < 0 || Layout > 3)
return nullptr;
llvm::APSInt SatfArg;
- if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1)
+ if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
+ BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
SatfArg = 0; // .b1 does not have satf argument.
else if (Optional<llvm::APSInt> OptSatfArg =
E->getArg(5)->getIntegerConstantExpr(getContext()))
@@ -16537,7 +17470,8 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
// Load A
for (unsigned i = 0; i < MI.NumEltsA; ++i) {
Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcA.getPointer(),
+ SrcA.getElementType(),
+ Builder.CreateGEP(SrcA.getElementType(), SrcA.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, AType));
@@ -16546,7 +17480,8 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
for (unsigned i = 0; i < MI.NumEltsB; ++i) {
Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcB.getPointer(),
+ SrcB.getElementType(),
+ Builder.CreateGEP(SrcB.getElementType(), SrcB.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, BType));
@@ -16556,7 +17491,8 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
for (unsigned i = 0; i < MI.NumEltsC; ++i) {
Value *V = Builder.CreateAlignedLoad(
- Builder.CreateGEP(SrcC.getPointer(),
+ SrcC.getElementType(),
+ Builder.CreateGEP(SrcC.getElementType(), SrcC.getPointer(),
llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
Values.push_back(Builder.CreateBitCast(V, CType));
@@ -16566,7 +17502,8 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
for (unsigned i = 0; i < MI.NumEltsD; ++i)
Builder.CreateAlignedStore(
Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
- Builder.CreateGEP(Dst.getPointer(), llvm::ConstantInt::get(IntTy, i)),
+ Builder.CreateGEP(Dst.getElementType(), Dst.getPointer(),
+ llvm::ConstantInt::get(IntTy, i)),
CharUnits::fromQuantity(4));
return Result;
}
@@ -16650,7 +17587,7 @@ RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
// can use an inbounds GEP to enable better optimization.
Value *Base = EmitCastToVoidPtr(Args.Src);
if (getLangOpts().isSignedOverflowDefined())
- Result = Builder.CreateGEP(Base, Difference, "aligned_result");
+ Result = Builder.CreateGEP(Int8Ty, Base, Difference, "aligned_result");
else
Result = EmitCheckedInBoundsGEP(Base, Difference,
/*SignedIndices=*/true,
@@ -16754,8 +17691,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_s_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_signed,
- {ResT, Src->getType()});
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::fptosi_sat, {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32_f32:
@@ -16765,8 +17702,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_trunc_saturate_u_i32x4_f32x4: {
Value *Src = EmitScalarExpr(E->getArg(0));
llvm::Type *ResT = ConvertType(E->getType());
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_trunc_saturate_unsigned,
- {ResT, Src->getType()});
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::fptoui_sat, {ResT, Src->getType()});
return Builder.CreateCall(Callee, {Src});
}
case WebAssembly::BI__builtin_wasm_min_f32:
@@ -16789,22 +17726,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::maximum, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
- case WebAssembly::BI__builtin_wasm_pmin_f32x4:
- case WebAssembly::BI__builtin_wasm_pmin_f64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee =
- CGM.getIntrinsic(Intrinsic::wasm_pmin, ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_pmax_f32x4:
- case WebAssembly::BI__builtin_wasm_pmax_f64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee =
- CGM.getIntrinsic(Intrinsic::wasm_pmax, ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
case WebAssembly::BI__builtin_wasm_ceil_f32x4:
case WebAssembly::BI__builtin_wasm_floor_f32x4:
case WebAssembly::BI__builtin_wasm_trunc_f32x4:
@@ -16817,19 +17738,19 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
case WebAssembly::BI__builtin_wasm_ceil_f32x4:
case WebAssembly::BI__builtin_wasm_ceil_f64x2:
- IntNo = Intrinsic::wasm_ceil;
+ IntNo = Intrinsic::ceil;
break;
case WebAssembly::BI__builtin_wasm_floor_f32x4:
case WebAssembly::BI__builtin_wasm_floor_f64x2:
- IntNo = Intrinsic::wasm_floor;
+ IntNo = Intrinsic::floor;
break;
case WebAssembly::BI__builtin_wasm_trunc_f32x4:
case WebAssembly::BI__builtin_wasm_trunc_f64x2:
- IntNo = Intrinsic::wasm_trunc;
+ IntNo = Intrinsic::trunc;
break;
case WebAssembly::BI__builtin_wasm_nearest_f32x4:
case WebAssembly::BI__builtin_wasm_nearest_f64x2:
- IntNo = Intrinsic::wasm_nearest;
+ IntNo = Intrinsic::nearbyint;
break;
default:
llvm_unreachable("unexpected builtin ID");
@@ -16838,94 +17759,37 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
return Builder.CreateCall(Callee, Value);
}
- case WebAssembly::BI__builtin_wasm_swizzle_v8x16: {
+ case WebAssembly::BI__builtin_wasm_swizzle_i8x16: {
Value *Src = EmitScalarExpr(E->getArg(0));
Value *Indices = EmitScalarExpr(E->getArg(1));
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_swizzle);
return Builder.CreateCall(Callee, {Src, Indices});
}
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_f64x2: {
- llvm::APSInt LaneConst =
- *E->getArg(1)->getIntegerConstantExpr(getContext());
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
- Value *Extract = Builder.CreateExtractElement(Vec, Lane);
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_s_i16x8:
- return Builder.CreateSExt(Extract, ConvertType(E->getType()));
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i8x16:
- case WebAssembly::BI__builtin_wasm_extract_lane_u_i16x8:
- return Builder.CreateZExt(Extract, ConvertType(E->getType()));
- case WebAssembly::BI__builtin_wasm_extract_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_extract_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_extract_lane_f64x2:
- return Extract;
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- }
- case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
- case WebAssembly::BI__builtin_wasm_replace_lane_i16x8:
- case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_f64x2: {
- llvm::APSInt LaneConst =
- *E->getArg(1)->getIntegerConstantExpr(getContext());
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Value *Lane = llvm::ConstantInt::get(getLLVMContext(), LaneConst);
- Value *Val = EmitScalarExpr(E->getArg(2));
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_replace_lane_i8x16:
- case WebAssembly::BI__builtin_wasm_replace_lane_i16x8: {
- llvm::Type *ElemType =
- cast<llvm::VectorType>(ConvertType(E->getType()))->getElementType();
- Value *Trunc = Builder.CreateTrunc(Val, ElemType);
- return Builder.CreateInsertElement(Vec, Trunc, Lane);
- }
- case WebAssembly::BI__builtin_wasm_replace_lane_i32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_i64x2:
- case WebAssembly::BI__builtin_wasm_replace_lane_f32x4:
- case WebAssembly::BI__builtin_wasm_replace_lane_f64x2:
- return Builder.CreateInsertElement(Vec, Val, Lane);
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- }
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8: {
+ case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8: {
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_s_i16x8:
+ case WebAssembly::BI__builtin_wasm_add_sat_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_sat_s_i16x8:
IntNo = Intrinsic::sadd_sat;
break;
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_add_saturate_u_i16x8:
+ case WebAssembly::BI__builtin_wasm_add_sat_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_add_sat_u_i16x8:
IntNo = Intrinsic::uadd_sat;
break;
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_s_i16x8:
- IntNo = Intrinsic::wasm_sub_saturate_signed;
+ case WebAssembly::BI__builtin_wasm_sub_sat_s_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_sat_s_i16x8:
+ IntNo = Intrinsic::wasm_sub_sat_signed;
break;
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i8x16:
- case WebAssembly::BI__builtin_wasm_sub_saturate_u_i16x8:
- IntNo = Intrinsic::wasm_sub_saturate_unsigned;
+ case WebAssembly::BI__builtin_wasm_sub_sat_u_i8x16:
+ case WebAssembly::BI__builtin_wasm_sub_sat_u_i16x8:
+ IntNo = Intrinsic::wasm_sub_sat_unsigned;
break;
default:
llvm_unreachable("unexpected builtin ID");
@@ -16937,7 +17801,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_abs_i8x16:
case WebAssembly::BI__builtin_wasm_abs_i16x8:
- case WebAssembly::BI__builtin_wasm_abs_i32x4: {
+ case WebAssembly::BI__builtin_wasm_abs_i32x4:
+ case WebAssembly::BI__builtin_wasm_abs_i64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
Value *Neg = Builder.CreateNeg(Vec, "neg");
Constant *Zero = llvm::Constant::getNullValue(Vec->getType());
@@ -16993,54 +17858,10 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
ConvertType(E->getType()));
return Builder.CreateCall(Callee, {LHS, RHS});
}
- case WebAssembly::BI__builtin_wasm_q15mulr_saturate_s_i16x8: {
+ case WebAssembly::BI__builtin_wasm_q15mulr_sat_s_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee =
- CGM.getIntrinsic(Intrinsic::wasm_q15mulr_saturate_signed);
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
- case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
- case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
- case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_s_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_s_i64x2:
- IntNo = Intrinsic::wasm_extmul_low_signed;
- break;
- case WebAssembly::BI__builtin_wasm_extmul_low_i8x16_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_low_i16x8_u_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_low_i32x4_u_i64x2:
- IntNo = Intrinsic::wasm_extmul_low_unsigned;
- break;
- case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_s_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_s_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_s_i64x2:
- IntNo = Intrinsic::wasm_extmul_high_signed;
- break;
- case WebAssembly::BI__builtin_wasm_extmul_high_i8x16_u_i16x8:
- case WebAssembly::BI__builtin_wasm_extmul_high_i16x8_u_i32x4:
- case WebAssembly::BI__builtin_wasm_extmul_high_i32x4_u_i64x2:
- IntNo = Intrinsic::wasm_extmul_high_unsigned;
- break;
- default:
- llvm_unreachable("unexptected builtin ID");
- }
-
- Function *Callee = CGM.getIntrinsic(IntNo, ConvertType(E->getType()));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_q15mulr_sat_signed);
return Builder.CreateCall(Callee, {LHS, RHS});
}
case WebAssembly::BI__builtin_wasm_extadd_pairwise_i8x16_s_i16x8:
@@ -17073,17 +17894,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::wasm_bitselect, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {V1, V2, C});
}
- case WebAssembly::BI__builtin_wasm_signselect_i8x16:
- case WebAssembly::BI__builtin_wasm_signselect_i16x8:
- case WebAssembly::BI__builtin_wasm_signselect_i32x4:
- case WebAssembly::BI__builtin_wasm_signselect_i64x2: {
- Value *V1 = EmitScalarExpr(E->getArg(0));
- Value *V2 = EmitScalarExpr(E->getArg(1));
- Value *C = EmitScalarExpr(E->getArg(2));
- Function *Callee =
- CGM.getIntrinsic(Intrinsic::wasm_signselect, ConvertType(E->getType()));
- return Builder.CreateCall(Callee, {V1, V2, C});
- }
case WebAssembly::BI__builtin_wasm_dot_s_i32x4_i16x8: {
Value *LHS = EmitScalarExpr(E->getArg(0));
Value *RHS = EmitScalarExpr(E->getArg(1));
@@ -17092,29 +17902,18 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_popcnt_i8x16: {
Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_popcnt);
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::ctpop, ConvertType(E->getType()));
return Builder.CreateCall(Callee, {Vec});
}
- case WebAssembly::BI__builtin_wasm_eq_i64x2: {
- Value *LHS = EmitScalarExpr(E->getArg(0));
- Value *RHS = EmitScalarExpr(E->getArg(1));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_eq);
- return Builder.CreateCall(Callee, {LHS, RHS});
- }
- case WebAssembly::BI__builtin_wasm_any_true_i8x16:
- case WebAssembly::BI__builtin_wasm_any_true_i16x8:
- case WebAssembly::BI__builtin_wasm_any_true_i32x4:
- case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ case WebAssembly::BI__builtin_wasm_any_true_v128:
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
case WebAssembly::BI__builtin_wasm_all_true_i16x8:
case WebAssembly::BI__builtin_wasm_all_true_i32x4:
case WebAssembly::BI__builtin_wasm_all_true_i64x2: {
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_any_true_i8x16:
- case WebAssembly::BI__builtin_wasm_any_true_i16x8:
- case WebAssembly::BI__builtin_wasm_any_true_i32x4:
- case WebAssembly::BI__builtin_wasm_any_true_i64x2:
+ case WebAssembly::BI__builtin_wasm_any_true_v128:
IntNo = Intrinsic::wasm_anytrue;
break;
case WebAssembly::BI__builtin_wasm_all_true_i8x16:
@@ -17151,29 +17950,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::sqrt, Vec->getType());
return Builder.CreateCall(Callee, {Vec});
}
- case WebAssembly::BI__builtin_wasm_qfma_f32x4:
- case WebAssembly::BI__builtin_wasm_qfms_f32x4:
- case WebAssembly::BI__builtin_wasm_qfma_f64x2:
- case WebAssembly::BI__builtin_wasm_qfms_f64x2: {
- Value *A = EmitScalarExpr(E->getArg(0));
- Value *B = EmitScalarExpr(E->getArg(1));
- Value *C = EmitScalarExpr(E->getArg(2));
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_qfma_f32x4:
- case WebAssembly::BI__builtin_wasm_qfma_f64x2:
- IntNo = Intrinsic::wasm_qfma;
- break;
- case WebAssembly::BI__builtin_wasm_qfms_f32x4:
- case WebAssembly::BI__builtin_wasm_qfms_f64x2:
- IntNo = Intrinsic::wasm_qfms;
- break;
- default:
- llvm_unreachable("unexpected builtin ID");
- }
- Function *Callee = CGM.getIntrinsic(IntNo, A->getType());
- return Builder.CreateCall(Callee, {A, B, C});
- }
case WebAssembly::BI__builtin_wasm_narrow_s_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_u_i8x16_i16x8:
case WebAssembly::BI__builtin_wasm_narrow_s_i16x8_i32x4:
@@ -17197,126 +17973,32 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
- case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
- case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
- case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
- case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_widen_low_s_i32x4_i64x2:
- IntNo = Intrinsic::wasm_widen_low_signed;
- break;
- case WebAssembly::BI__builtin_wasm_widen_high_s_i32x4_i64x2:
- IntNo = Intrinsic::wasm_widen_high_signed;
- break;
- case WebAssembly::BI__builtin_wasm_widen_low_u_i32x4_i64x2:
- IntNo = Intrinsic::wasm_widen_low_unsigned;
- break;
- case WebAssembly::BI__builtin_wasm_widen_high_u_i32x4_i64x2:
- IntNo = Intrinsic::wasm_widen_high_unsigned;
- break;
- }
- Function *Callee = CGM.getIntrinsic(IntNo);
- return Builder.CreateCall(Callee, Vec);
- }
- case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
- case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_convert_low_s_i32x4_f64x2:
- IntNo = Intrinsic::wasm_convert_low_signed;
- break;
- case WebAssembly::BI__builtin_wasm_convert_low_u_i32x4_f64x2:
- IntNo = Intrinsic::wasm_convert_low_unsigned;
- break;
- }
- Function *Callee = CGM.getIntrinsic(IntNo);
- return Builder.CreateCall(Callee, Vec);
- }
- case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
- case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4: {
+ case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_s_f64x2_i32x4:
- IntNo = Intrinsic::wasm_trunc_saturate_zero_signed;
- break;
- case WebAssembly::BI__builtin_wasm_trunc_saturate_zero_u_f64x2_i32x4:
- IntNo = Intrinsic::wasm_trunc_saturate_zero_unsigned;
- break;
- }
- Function *Callee = CGM.getIntrinsic(IntNo);
- return Builder.CreateCall(Callee, Vec);
- }
- case WebAssembly::BI__builtin_wasm_demote_zero_f64x2_f32x4: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_demote_zero);
- return Builder.CreateCall(Callee, Vec);
- }
- case WebAssembly::BI__builtin_wasm_promote_low_f32x4_f64x2: {
- Value *Vec = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_promote_low);
- return Builder.CreateCall(Callee, Vec);
- }
- case WebAssembly::BI__builtin_wasm_load32_zero: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load32_zero);
- return Builder.CreateCall(Callee, {Ptr});
- }
- case WebAssembly::BI__builtin_wasm_load64_zero: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
- return Builder.CreateCall(Callee, {Ptr});
- }
- case WebAssembly::BI__builtin_wasm_load8_lane:
- case WebAssembly::BI__builtin_wasm_load16_lane:
- case WebAssembly::BI__builtin_wasm_load32_lane:
- case WebAssembly::BI__builtin_wasm_load64_lane:
- case WebAssembly::BI__builtin_wasm_store8_lane:
- case WebAssembly::BI__builtin_wasm_store16_lane:
- case WebAssembly::BI__builtin_wasm_store32_lane:
- case WebAssembly::BI__builtin_wasm_store64_lane: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Value *Vec = EmitScalarExpr(E->getArg(1));
- Optional<llvm::APSInt> LaneIdxConst =
- E->getArg(2)->getIntegerConstantExpr(getContext());
- assert(LaneIdxConst && "Constant arg isn't actually constant?");
- Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
- unsigned IntNo;
- switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_load8_lane:
- IntNo = Intrinsic::wasm_load8_lane;
- break;
- case WebAssembly::BI__builtin_wasm_load16_lane:
- IntNo = Intrinsic::wasm_load16_lane;
+ case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
+ IntNo = Intrinsic::fptosi_sat;
break;
- case WebAssembly::BI__builtin_wasm_load32_lane:
- IntNo = Intrinsic::wasm_load32_lane;
- break;
- case WebAssembly::BI__builtin_wasm_load64_lane:
- IntNo = Intrinsic::wasm_load64_lane;
- break;
- case WebAssembly::BI__builtin_wasm_store8_lane:
- IntNo = Intrinsic::wasm_store8_lane;
- break;
- case WebAssembly::BI__builtin_wasm_store16_lane:
- IntNo = Intrinsic::wasm_store16_lane;
- break;
- case WebAssembly::BI__builtin_wasm_store32_lane:
- IntNo = Intrinsic::wasm_store32_lane;
- break;
- case WebAssembly::BI__builtin_wasm_store64_lane:
- IntNo = Intrinsic::wasm_store64_lane;
+ case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
+ IntNo = Intrinsic::fptoui_sat;
break;
default:
llvm_unreachable("unexpected builtin ID");
}
- Function *Callee = CGM.getIntrinsic(IntNo);
- return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
- }
- case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
+ llvm::Type *SrcT = Vec->getType();
+ llvm::Type *TruncT =
+ SrcT->getWithNewType(llvm::IntegerType::get(getLLVMContext(), 32));
+ Function *Callee = CGM.getIntrinsic(IntNo, {TruncT, SrcT});
+ Value *Trunc = Builder.CreateCall(Callee, Vec);
+ Value *Splat = Builder.CreateVectorSplat(2, Builder.getInt32(0));
+ Value *ConcatMask =
+ llvm::ConstantVector::get({Builder.getInt32(0), Builder.getInt32(1),
+ Builder.getInt32(2), Builder.getInt32(3)});
+ return Builder.CreateShuffleVector(Trunc, Splat, ConcatMask);
+ }
+ case WebAssembly::BI__builtin_wasm_shuffle_i8x16: {
Value *Ops[18];
size_t OpIdx = 0;
Ops[OpIdx++] = EmitScalarExpr(E->getArg(0));
@@ -17330,16 +18012,6 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
return Builder.CreateCall(Callee, Ops);
}
- case WebAssembly::BI__builtin_wasm_prefetch_t: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_t);
- return Builder.CreateCall(Callee, Ptr);
- }
- case WebAssembly::BI__builtin_wasm_prefetch_nt: {
- Value *Ptr = EmitScalarExpr(E->getArg(0));
- Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_nt);
- return Builder.CreateCall(Callee, Ptr);
- }
default:
return nullptr;
}
@@ -17588,3 +18260,147 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
return nullptr;
}
+
+Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ SmallVector<Value *, 4> Ops;
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ unsigned NF = 1;
+
+ // Required for overloaded intrinsics.
+ llvm::SmallVector<llvm::Type *, 2> IntrinsicTypes;
+ switch (BuiltinID) {
+ default: llvm_unreachable("unexpected builtin ID");
+ case RISCV::BI__builtin_riscv_orc_b_32:
+ case RISCV::BI__builtin_riscv_orc_b_64:
+ case RISCV::BI__builtin_riscv_clmul:
+ case RISCV::BI__builtin_riscv_clmulh:
+ case RISCV::BI__builtin_riscv_clmulr:
+ case RISCV::BI__builtin_riscv_bcompress_32:
+ case RISCV::BI__builtin_riscv_bcompress_64:
+ case RISCV::BI__builtin_riscv_bdecompress_32:
+ case RISCV::BI__builtin_riscv_bdecompress_64:
+ case RISCV::BI__builtin_riscv_grev_32:
+ case RISCV::BI__builtin_riscv_grev_64:
+ case RISCV::BI__builtin_riscv_gorc_32:
+ case RISCV::BI__builtin_riscv_gorc_64:
+ case RISCV::BI__builtin_riscv_shfl_32:
+ case RISCV::BI__builtin_riscv_shfl_64:
+ case RISCV::BI__builtin_riscv_unshfl_32:
+ case RISCV::BI__builtin_riscv_unshfl_64:
+ case RISCV::BI__builtin_riscv_xperm_n:
+ case RISCV::BI__builtin_riscv_xperm_b:
+ case RISCV::BI__builtin_riscv_xperm_h:
+ case RISCV::BI__builtin_riscv_xperm_w:
+ case RISCV::BI__builtin_riscv_crc32_b:
+ case RISCV::BI__builtin_riscv_crc32_h:
+ case RISCV::BI__builtin_riscv_crc32_w:
+ case RISCV::BI__builtin_riscv_crc32_d:
+ case RISCV::BI__builtin_riscv_crc32c_b:
+ case RISCV::BI__builtin_riscv_crc32c_h:
+ case RISCV::BI__builtin_riscv_crc32c_w:
+ case RISCV::BI__builtin_riscv_crc32c_d: {
+ switch (BuiltinID) {
+ default: llvm_unreachable("unexpected builtin ID");
+ // Zbb
+ case RISCV::BI__builtin_riscv_orc_b_32:
+ case RISCV::BI__builtin_riscv_orc_b_64:
+ ID = Intrinsic::riscv_orc_b;
+ break;
+
+ // Zbc
+ case RISCV::BI__builtin_riscv_clmul:
+ ID = Intrinsic::riscv_clmul;
+ break;
+ case RISCV::BI__builtin_riscv_clmulh:
+ ID = Intrinsic::riscv_clmulh;
+ break;
+ case RISCV::BI__builtin_riscv_clmulr:
+ ID = Intrinsic::riscv_clmulr;
+ break;
+
+ // Zbe
+ case RISCV::BI__builtin_riscv_bcompress_32:
+ case RISCV::BI__builtin_riscv_bcompress_64:
+ ID = Intrinsic::riscv_bcompress;
+ break;
+ case RISCV::BI__builtin_riscv_bdecompress_32:
+ case RISCV::BI__builtin_riscv_bdecompress_64:
+ ID = Intrinsic::riscv_bdecompress;
+ break;
+
+ // Zbp
+ case RISCV::BI__builtin_riscv_grev_32:
+ case RISCV::BI__builtin_riscv_grev_64:
+ ID = Intrinsic::riscv_grev;
+ break;
+ case RISCV::BI__builtin_riscv_gorc_32:
+ case RISCV::BI__builtin_riscv_gorc_64:
+ ID = Intrinsic::riscv_gorc;
+ break;
+ case RISCV::BI__builtin_riscv_shfl_32:
+ case RISCV::BI__builtin_riscv_shfl_64:
+ ID = Intrinsic::riscv_shfl;
+ break;
+ case RISCV::BI__builtin_riscv_unshfl_32:
+ case RISCV::BI__builtin_riscv_unshfl_64:
+ ID = Intrinsic::riscv_unshfl;
+ break;
+ case RISCV::BI__builtin_riscv_xperm_n:
+ ID = Intrinsic::riscv_xperm_n;
+ break;
+ case RISCV::BI__builtin_riscv_xperm_b:
+ ID = Intrinsic::riscv_xperm_b;
+ break;
+ case RISCV::BI__builtin_riscv_xperm_h:
+ ID = Intrinsic::riscv_xperm_h;
+ break;
+ case RISCV::BI__builtin_riscv_xperm_w:
+ ID = Intrinsic::riscv_xperm_w;
+ break;
+
+ // Zbr
+ case RISCV::BI__builtin_riscv_crc32_b:
+ ID = Intrinsic::riscv_crc32_b;
+ break;
+ case RISCV::BI__builtin_riscv_crc32_h:
+ ID = Intrinsic::riscv_crc32_h;
+ break;
+ case RISCV::BI__builtin_riscv_crc32_w:
+ ID = Intrinsic::riscv_crc32_w;
+ break;
+ case RISCV::BI__builtin_riscv_crc32_d:
+ ID = Intrinsic::riscv_crc32_d;
+ break;
+ case RISCV::BI__builtin_riscv_crc32c_b:
+ ID = Intrinsic::riscv_crc32c_b;
+ break;
+ case RISCV::BI__builtin_riscv_crc32c_h:
+ ID = Intrinsic::riscv_crc32c_h;
+ break;
+ case RISCV::BI__builtin_riscv_crc32c_w:
+ ID = Intrinsic::riscv_crc32c_w;
+ break;
+ case RISCV::BI__builtin_riscv_crc32c_d:
+ ID = Intrinsic::riscv_crc32c_d;
+ break;
+ }
+
+ IntrinsicTypes = {ResultType};
+ break;
+ }
+ // Vector builtins are handled from here.
+#include "clang/Basic/riscv_vector_builtin_cg.inc"
+ }
+
+ assert(ID != Intrinsic::not_intrinsic);
+
+ llvm::Function *F = CGM.getIntrinsic(ID, IntrinsicTypes);
+ return Builder.CreateCall(F, Ops, "");
+}
diff --git a/clang/lib/CodeGen/CGCUDANV.cpp b/clang/lib/CodeGen/CGCUDANV.cpp
index 33a2d6f4483e..88030fee501b 100644
--- a/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/clang/lib/CodeGen/CGCUDANV.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/Decl.h"
@@ -42,12 +43,18 @@ private:
llvm::LLVMContext &Context;
/// Convenience reference to the current module
llvm::Module &TheModule;
- /// Keeps track of kernel launch stubs emitted in this module
+ /// Keeps track of kernel launch stubs and handles emitted in this module
struct KernelInfo {
- llvm::Function *Kernel;
+ llvm::Function *Kernel; // stub function to help launch kernel
const Decl *D;
};
llvm::SmallVector<KernelInfo, 16> EmittedKernels;
+ // Map a device stub function to a symbol for identifying kernel in host code.
+ // For CUDA, the symbol for identifying the kernel is the same as the device
+ // stub function. For HIP, they are different.
+ llvm::DenseMap<llvm::Function *, llvm::GlobalValue *> KernelHandles;
+ // Map a kernel handle to the kernel stub.
+ llvm::DenseMap<llvm::GlobalValue *, llvm::Function *> KernelStubs;
struct VarInfo {
llvm::GlobalVariable *Var;
const VarDecl *D;
@@ -120,12 +127,8 @@ private:
void emitDeviceStubBodyNew(CodeGenFunction &CGF, FunctionArgList &Args);
std::string getDeviceSideName(const NamedDecl *ND) override;
-public:
- CGNVCUDARuntime(CodeGenModule &CGM);
-
- void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, bool Constant) override {
+ bool Extern, bool Constant) {
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Variable, Extern, Constant,
@@ -133,7 +136,7 @@ public:
/*Normalized*/ false, 0}});
}
void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, int Type) override {
+ bool Extern, int Type) {
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Surface, Extern, /*Constant*/ false,
@@ -141,7 +144,7 @@ public:
/*Normalized*/ false, Type}});
}
void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, int Type, bool Normalized) override {
+ bool Extern, int Type, bool Normalized) {
DeviceVars.push_back({&Var,
VD,
{DeviceVarFlags::Texture, Extern, /*Constant*/ false,
@@ -149,9 +152,29 @@ public:
}
/// Creates module constructor function
- llvm::Function *makeModuleCtorFunction() override;
+ llvm::Function *makeModuleCtorFunction();
/// Creates module destructor function
- llvm::Function *makeModuleDtorFunction() override;
+ llvm::Function *makeModuleDtorFunction();
+ /// Transform managed variables for device compilation.
+ void transformManagedVars();
+
+public:
+ CGNVCUDARuntime(CodeGenModule &CGM);
+
+ llvm::GlobalValue *getKernelHandle(llvm::Function *F, GlobalDecl GD) override;
+ llvm::Function *getKernelStub(llvm::GlobalValue *Handle) override {
+ auto Loc = KernelStubs.find(Handle);
+ assert(Loc != KernelStubs.end());
+ return Loc->second;
+ }
+ void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) override;
+ void handleVarRegistration(const VarDecl *VD,
+ llvm::GlobalVariable &Var) override;
+ void
+ internalizeDeviceSideVar(const VarDecl *D,
+ llvm::GlobalValue::LinkageTypes &Linkage) override;
+
+ llvm::Function *finalizeModule() override;
};
}
@@ -168,12 +191,27 @@ CGNVCUDARuntime::addUnderscoredPrefixToName(StringRef FuncName) const {
return ((Twine("__cuda") + Twine(FuncName)).str());
}
+static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
+ // If the host and device have different C++ ABIs, mark it as the device
+ // mangle context so that the mangling needs to retrieve the additional
+ // device lambda mangling number instead of the regular host one.
+ if (CGM.getContext().getAuxTargetInfo() &&
+ CGM.getContext().getTargetInfo().getCXXABI().isMicrosoft() &&
+ CGM.getContext().getAuxTargetInfo()->getCXXABI().isItaniumFamily()) {
+ return std::unique_ptr<MangleContext>(
+ CGM.getContext().createDeviceMangleContext(
+ *CGM.getContext().getAuxTargetInfo()));
+ }
+
+ return std::unique_ptr<MangleContext>(CGM.getContext().createMangleContext(
+ CGM.getContext().getAuxTargetInfo()));
+}
+
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
- DeviceMC(CGM.getContext().createMangleContext(
- CGM.getContext().getAuxTargetInfo())) {
+ DeviceMC(InitDeviceMC(CGM)) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -230,19 +268,39 @@ std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
else
GD = GlobalDecl(ND);
std::string DeviceSideName;
- if (DeviceMC->shouldMangleDeclName(ND)) {
+ MangleContext *MC;
+ if (CGM.getLangOpts().CUDAIsDevice)
+ MC = &CGM.getCXXABI().getMangleContext();
+ else
+ MC = DeviceMC.get();
+ if (MC->shouldMangleDeclName(ND)) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
- DeviceMC->mangleName(GD, Out);
+ MC->mangleName(GD, Out);
DeviceSideName = std::string(Out.str());
} else
DeviceSideName = std::string(ND->getIdentifier()->getName());
+
+ // Make unique name for device side static file-scope variable for HIP.
+ if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
+ CGM.getLangOpts().GPURelocatableDeviceCode &&
+ !CGM.getLangOpts().CUID.empty()) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << DeviceSideName;
+ CGM.printPostfixForExternalizedStaticVar(Out);
+ DeviceSideName = std::string(Out.str());
+ }
return DeviceSideName;
}
void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
FunctionArgList &Args) {
EmittedKernels.push_back({CGF.CurFn, CGF.CurFuncDecl});
+ if (auto *GV = dyn_cast<llvm::GlobalVariable>(KernelHandles[CGF.CurFn])) {
+ GV->setLinkage(CGF.CurFn->getLinkage());
+ GV->setInitializer(CGF.CurFn);
+ }
if (CudaFeatureEnabled(CGM.getTarget().getSDKVersion(),
CudaFeature::CUDA_USES_NEW_LAUNCH) ||
(CGF.getLangOpts().HIP && CGF.getLangOpts().HIPUseNewLaunchAPI))
@@ -268,7 +326,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
llvm::Value* VarPtr = CGF.GetAddrOfLocalVar(Args[i]).getPointer();
llvm::Value *VoidVarPtr = CGF.Builder.CreatePointerCast(VarPtr, VoidPtrTy);
CGF.Builder.CreateDefaultAlignedStore(
- VoidVarPtr, CGF.Builder.CreateConstGEP1_32(KernelArgs.getPointer(), i));
+ VoidVarPtr,
+ CGF.Builder.CreateConstGEP1_32(VoidPtrTy, KernelArgs.getPointer(), i));
}
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
@@ -286,7 +345,7 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
IdentifierInfo &cudaLaunchKernelII =
CGM.getContext().Idents.get(LaunchKernelName);
FunctionDecl *cudaLaunchKernelFD = nullptr;
- for (const auto &Result : DC->lookup(&cudaLaunchKernelII)) {
+ for (auto *Result : DC->lookup(&cudaLaunchKernelII)) {
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Result))
cudaLaunchKernelFD = FD;
}
@@ -321,7 +380,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
ShmemSize.getPointer(), Stream.getPointer()});
// Emit the call to cudaLaunch
- llvm::Value *Kernel = CGF.Builder.CreatePointerCast(CGF.CurFn, VoidPtrTy);
+ llvm::Value *Kernel =
+ CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn], VoidPtrTy);
CallArgList LaunchKernelArgs;
LaunchKernelArgs.add(RValue::get(Kernel),
cudaLaunchKernelFD->getParamDecl(0)->getType());
@@ -376,7 +436,8 @@ void CGNVCUDARuntime::emitDeviceStubBodyLegacy(CodeGenFunction &CGF,
// Emit the call to cudaLaunch
llvm::FunctionCallee cudaLaunchFn = getLaunchFn();
- llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
+ llvm::Value *Arg =
+ CGF.Builder.CreatePointerCast(KernelHandles[CGF.CurFn], CharPtrTy);
CGF.EmitRuntimeCallOrInvoke(cudaLaunchFn, Arg);
CGF.EmitBranch(EndBlock);
@@ -470,7 +531,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
llvm::Constant *NullPtr = llvm::ConstantPointerNull::get(VoidPtrTy);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
- Builder.CreateBitCast(I.Kernel, VoidPtrTy),
+ Builder.CreateBitCast(KernelHandles[I.Kernel], VoidPtrTy),
KernelName,
KernelName,
llvm::ConstantInt::get(IntTy, -1),
@@ -520,6 +581,9 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
addUnderscoredPrefixToName("RegisterTexture"));
for (auto &&Info : DeviceVars) {
llvm::GlobalVariable *Var = Info.Var;
+ assert((!Var->isDeclaration() || Info.Flags.isManaged()) &&
+ "External variables should not show up here, except HIP managed "
+ "variables");
llvm::Constant *VarName = makeConstantString(getDeviceSideName(Info.D));
switch (Info.Flags.getKind()) {
case DeviceVarFlags::Variable: {
@@ -529,9 +593,16 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
auto ManagedVar = new llvm::GlobalVariable(
CGM.getModule(), Var->getType(),
/*isConstant=*/false, Var->getLinkage(),
- /*Init=*/llvm::ConstantPointerNull::get(Var->getType()),
- Twine(Var->getName() + ".managed"), /*InsertBefore=*/nullptr,
+ /*Init=*/Var->isDeclaration()
+ ? nullptr
+ : llvm::ConstantPointerNull::get(Var->getType()),
+ /*Name=*/"", /*InsertBefore=*/nullptr,
llvm::GlobalVariable::NotThreadLocal);
+ ManagedVar->setDSOLocal(Var->isDSOLocal());
+ ManagedVar->setVisibility(Var->getVisibility());
+ ManagedVar->setExternallyInitialized(true);
+ ManagedVar->takeName(Var);
+ Var->setName(Twine(ManagedVar->getName() + ".managed"));
replaceManagedVar(Var, ManagedVar);
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
@@ -540,7 +611,8 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
VarName,
llvm::ConstantInt::get(VarSizeTy, VarSize),
llvm::ConstantInt::get(IntTy, Var->getAlignment())};
- Builder.CreateCall(RegisterManagedVar, Args);
+ if (!Var->isDeclaration())
+ Builder.CreateCall(RegisterManagedVar, Args);
} else {
llvm::Value *Args[] = {
&GpuBinaryHandlePtr,
@@ -915,3 +987,168 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
return new CGNVCUDARuntime(CGM);
}
+
+void CGNVCUDARuntime::internalizeDeviceSideVar(
+ const VarDecl *D, llvm::GlobalValue::LinkageTypes &Linkage) {
+ // For -fno-gpu-rdc, host-side shadows of external declarations of device-side
+ // global variables become internal definitions. These have to be internal in
+ // order to prevent name conflicts with global host variables with the same
+ // name in a different TUs.
+ //
+ // For -fgpu-rdc, the shadow variables should not be internalized because
+ // they may be accessed by different TU.
+ if (CGM.getLangOpts().GPURelocatableDeviceCode)
+ return;
+
+ // __shared__ variables are odd. Shadows do get created, but
+ // they are not registered with the CUDA runtime, so they
+ // can't really be used to access their device-side
+ // counterparts. It's not clear yet whether it's nvcc's bug or
+ // a feature, but we've got to do the same for compatibility.
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
+ D->hasAttr<CUDASharedAttr>() ||
+ D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()) {
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ }
+}
+
+void CGNVCUDARuntime::handleVarRegistration(const VarDecl *D,
+ llvm::GlobalVariable &GV) {
+ if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
+ // Shadow variables and their properties must be registered with CUDA
+ // runtime. Skip Extern global variables, which will be registered in
+ // the TU where they are defined.
+ //
+ // Don't register a C++17 inline variable. The local symbol can be
+ // discarded and referencing a discarded local symbol from outside the
+ // comdat (__cuda_register_globals) is disallowed by the ELF spec.
+ //
+ // HIP managed variables need to be always recorded in device and host
+ // compilations for transformation.
+ //
+ // HIP managed variables and variables in CUDADeviceVarODRUsedByHost are
+ // added to llvm.compiler-used, therefore they are safe to be registered.
+ if ((!D->hasExternalStorage() && !D->isInline()) ||
+ CGM.getContext().CUDADeviceVarODRUsedByHost.contains(D) ||
+ D->hasAttr<HIPManagedAttr>()) {
+ registerDeviceVar(D, GV, !D->hasDefinition(),
+ D->hasAttr<CUDAConstantAttr>());
+ }
+ } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ D->getType()->isCUDADeviceBuiltinTextureType()) {
+ // Builtin surfaces and textures and their template arguments are
+ // also registered with CUDA runtime.
+ const auto *TD = cast<ClassTemplateSpecializationDecl>(
+ D->getType()->castAs<RecordType>()->getDecl());
+ const TemplateArgumentList &Args = TD->getTemplateArgs();
+ if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
+ assert(Args.size() == 2 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin surface type.");
+ auto SurfType = Args[1].getAsIntegral();
+ if (!D->hasExternalStorage())
+ registerDeviceSurf(D, GV, !D->hasDefinition(), SurfType.getSExtValue());
+ } else {
+ assert(Args.size() == 3 &&
+ "Unexpected number of template arguments of CUDA device "
+ "builtin texture type.");
+ auto TexType = Args[1].getAsIntegral();
+ auto Normalized = Args[2].getAsIntegral();
+ if (!D->hasExternalStorage())
+ registerDeviceTex(D, GV, !D->hasDefinition(), TexType.getSExtValue(),
+ Normalized.getZExtValue());
+ }
+ }
+}
+
+// Transform managed variables to pointers to managed variables in device code.
+// Each use of the original managed variable is replaced by a load from the
+// transformed managed variable. The transformed managed variable contains
+// the address of managed memory which will be allocated by the runtime.
+void CGNVCUDARuntime::transformManagedVars() {
+ for (auto &&Info : DeviceVars) {
+ llvm::GlobalVariable *Var = Info.Var;
+ if (Info.Flags.getKind() == DeviceVarFlags::Variable &&
+ Info.Flags.isManaged()) {
+ auto ManagedVar = new llvm::GlobalVariable(
+ CGM.getModule(), Var->getType(),
+ /*isConstant=*/false, Var->getLinkage(),
+ /*Init=*/Var->isDeclaration()
+ ? nullptr
+ : llvm::ConstantPointerNull::get(Var->getType()),
+ /*Name=*/"", /*InsertBefore=*/nullptr,
+ llvm::GlobalVariable::NotThreadLocal,
+ CGM.getContext().getTargetAddressSpace(LangAS::cuda_device));
+ ManagedVar->setDSOLocal(Var->isDSOLocal());
+ ManagedVar->setVisibility(Var->getVisibility());
+ ManagedVar->setExternallyInitialized(true);
+ replaceManagedVar(Var, ManagedVar);
+ ManagedVar->takeName(Var);
+ Var->setName(Twine(ManagedVar->getName()) + ".managed");
+ // Keep managed variables even if they are not used in device code since
+ // they need to be allocated by the runtime.
+ if (!Var->isDeclaration()) {
+ assert(!ManagedVar->isDeclaration());
+ CGM.addCompilerUsedGlobal(Var);
+ CGM.addCompilerUsedGlobal(ManagedVar);
+ }
+ }
+ }
+}
+
+// Returns module constructor to be added.
+llvm::Function *CGNVCUDARuntime::finalizeModule() {
+ if (CGM.getLangOpts().CUDAIsDevice) {
+ transformManagedVars();
+
+ // Mark ODR-used device variables as compiler used to prevent it from being
+ // eliminated by optimization. This is necessary for device variables
+ // ODR-used by host functions. Sema correctly marks them as ODR-used no
+ // matter whether they are ODR-used by device or host functions.
+ //
+ // We do not need to do this if the variable has used attribute since it
+ // has already been added.
+ //
+ // Static device variables have been externalized at this point, therefore
+ // variables with LLVM private or internal linkage need not be added.
+ for (auto &&Info : DeviceVars) {
+ auto Kind = Info.Flags.getKind();
+ if (!Info.Var->isDeclaration() &&
+ !llvm::GlobalValue::isLocalLinkage(Info.Var->getLinkage()) &&
+ (Kind == DeviceVarFlags::Variable ||
+ Kind == DeviceVarFlags::Surface ||
+ Kind == DeviceVarFlags::Texture) &&
+ Info.D->isUsed() && !Info.D->hasAttr<UsedAttr>()) {
+ CGM.addCompilerUsedGlobal(Info.Var);
+ }
+ }
+ return nullptr;
+ }
+ return makeModuleCtorFunction();
+}
+
+llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
+ GlobalDecl GD) {
+ auto Loc = KernelHandles.find(F);
+ if (Loc != KernelHandles.end())
+ return Loc->second;
+
+ if (!CGM.getLangOpts().HIP) {
+ KernelHandles[F] = F;
+ KernelStubs[F] = F;
+ return F;
+ }
+
+ auto *Var = new llvm::GlobalVariable(
+ TheModule, F->getType(), /*isConstant=*/true, F->getLinkage(),
+ /*Initializer=*/nullptr,
+ CGM.getMangledName(
+ GD.getWithKernelReferenceKind(KernelReferenceKind::Kernel)));
+ Var->setAlignment(CGM.getPointerAlign().getAsAlign());
+ Var->setDSOLocal(F->isDSOLocal());
+ Var->setVisibility(F->getVisibility());
+ KernelHandles[F] = Var;
+ KernelStubs[Var] = F;
+ return Var;
+}
diff --git a/clang/lib/CodeGen/CGCUDARuntime.h b/clang/lib/CodeGen/CGCUDARuntime.h
index ba3404ead368..1c119dc77fd4 100644
--- a/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/clang/lib/CodeGen/CGCUDARuntime.h
@@ -15,7 +15,9 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+#include "clang/AST/GlobalDecl.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/IR/GlobalValue.h"
namespace llvm {
class Function;
@@ -80,24 +82,30 @@ public:
/// Emits a kernel launch stub.
virtual void emitDeviceStub(CodeGenFunction &CGF, FunctionArgList &Args) = 0;
- virtual void registerDeviceVar(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, bool Constant) = 0;
- virtual void registerDeviceSurf(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, int Type) = 0;
- virtual void registerDeviceTex(const VarDecl *VD, llvm::GlobalVariable &Var,
- bool Extern, int Type, bool Normalized) = 0;
- /// Constructs and returns a module initialization function or nullptr if it's
- /// not needed. Must be called after all kernels have been emitted.
- virtual llvm::Function *makeModuleCtorFunction() = 0;
+ /// Check whether a variable is a device variable and register it if true.
+ virtual void handleVarRegistration(const VarDecl *VD,
+ llvm::GlobalVariable &Var) = 0;
- /// Returns a module cleanup function or nullptr if it's not needed.
- /// Must be called after ModuleCtorFunction
- virtual llvm::Function *makeModuleDtorFunction() = 0;
+ /// Finalize generated LLVM module. Returns a module constructor function
+ /// to be added or a null pointer.
+ virtual llvm::Function *finalizeModule() = 0;
/// Returns function or variable name on device side even if the current
/// compilation is for host.
virtual std::string getDeviceSideName(const NamedDecl *ND) = 0;
+
+ /// Get kernel handle by stub function.
+ virtual llvm::GlobalValue *getKernelHandle(llvm::Function *Stub,
+ GlobalDecl GD) = 0;
+
+ /// Get kernel stub by kernel handle.
+ virtual llvm::Function *getKernelStub(llvm::GlobalValue *Handle) = 0;
+
+ /// Adjust linkage of shadow variables in host compilation.
+ virtual void
+ internalizeDeviceSideVar(const VarDecl *D,
+ llvm::GlobalValue::LinkageTypes &Linkage) = 0;
};
/// Creates an instance of a CUDA runtime class.
diff --git a/clang/lib/CodeGen/CGCXX.cpp b/clang/lib/CodeGen/CGCXX.cpp
index a4bd2c6d5da0..86f548191d65 100644
--- a/clang/lib/CodeGen/CGCXX.cpp
+++ b/clang/lib/CodeGen/CGCXX.cpp
@@ -252,8 +252,8 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
"No kext in Microsoft ABI");
CodeGenModule &CGM = CGF.CGM;
llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
- Ty = Ty->getPointerTo()->getPointerTo();
- VTable = CGF.Builder.CreateBitCast(VTable, Ty);
+ Ty = Ty->getPointerTo();
+ VTable = CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo());
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
@@ -262,9 +262,9 @@ static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
VTableIndex += VTLayout.getVTableOffset(AddressPoint.VTableIndex) +
AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ CGF.Builder.CreateConstInBoundsGEP1_64(Ty, VTable, VTableIndex, "vfnkxt");
llvm::Value *VFunc = CGF.Builder.CreateAlignedLoad(
- VFuncPtr, llvm::Align(CGF.PointerAlignInBytes));
+ Ty, VFuncPtr, llvm::Align(CGF.PointerAlignInBytes));
CGCallee Callee(GD, VFunc);
return Callee;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 42801372189b..47a4ed35be85 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -66,6 +66,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
case CC_Swift: return llvm::CallingConv::Swift;
+ case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
}
}
@@ -773,7 +774,7 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
// Force target independent argument handling for the host visible
// kernel functions.
computeSPIRKernelABIInfo(CGM, *FI);
- } else if (info.getCC() == CC_Swift) {
+ } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
swiftcall::computeABIInfo(CGM, *FI);
} else {
getABIInfo().computeInfo(*FI);
@@ -1012,8 +1013,8 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
for (int i = 0, n = CAE->NumElts; i < n; i++) {
- llvm::Value *EltAddr =
- CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
+ llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
+ BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
Fn(Address(EltAddr, EltAlign));
}
}
@@ -1732,6 +1733,18 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
+bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
+ QualType ReturnType) {
+ // We can't just discard the return value for a record type with a
+ // complex destructor or a non-trivially copyable type.
+ if (const RecordType *RT =
+ ReturnType.getCanonicalType()->getAs<RecordType>()) {
+ if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return ClassDecl->hasTrivialDestructor();
+ }
+ return ReturnType.isTriviallyCopyableType(Context);
+}
+
void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
bool HasOptnone,
bool AttrOnCallSite,
@@ -1753,8 +1766,7 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (AttrOnCallSite) {
// Attributes that should go on the call site only.
- if (!CodeGenOpts.SimplifyLibCalls ||
- CodeGenOpts.isNoBuiltinFunc(Name.data()))
+ if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
if (!CodeGenOpts.TrapFuncName.empty())
FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
@@ -1773,8 +1785,8 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
}
FuncAttrs.addAttribute("frame-pointer", FpKind);
- FuncAttrs.addAttribute("less-precise-fpmad",
- llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+ if (CodeGenOpts.LessPreciseFPMAD)
+ FuncAttrs.addAttribute("less-precise-fpmad", "true");
if (CodeGenOpts.NullPointerIsValid)
FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
@@ -1788,9 +1800,8 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
CodeGenOpts.FP32DenormalMode.str());
}
- FuncAttrs.addAttribute("no-trapping-math",
- llvm::toStringRef(LangOpts.getFPExceptionMode() ==
- LangOptions::FPE_Ignore));
+ if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
+ FuncAttrs.addAttribute("no-trapping-math", "true");
// Strict (compliant) code is the default, so only add this attribute to
// indicate that we are trying to workaround a problem case.
@@ -1799,18 +1810,18 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
// TODO: Are these all needed?
// unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
- FuncAttrs.addAttribute("no-infs-fp-math",
- llvm::toStringRef(LangOpts.NoHonorInfs));
- FuncAttrs.addAttribute("no-nans-fp-math",
- llvm::toStringRef(LangOpts.NoHonorNaNs));
- FuncAttrs.addAttribute("unsafe-fp-math",
- llvm::toStringRef(LangOpts.UnsafeFPMath));
- FuncAttrs.addAttribute("use-soft-float",
- llvm::toStringRef(CodeGenOpts.SoftFloat));
+ if (LangOpts.NoHonorInfs)
+ FuncAttrs.addAttribute("no-infs-fp-math", "true");
+ if (LangOpts.NoHonorNaNs)
+ FuncAttrs.addAttribute("no-nans-fp-math", "true");
+ if (LangOpts.UnsafeFPMath)
+ FuncAttrs.addAttribute("unsafe-fp-math", "true");
+ if (CodeGenOpts.SoftFloat)
+ FuncAttrs.addAttribute("use-soft-float", "true");
FuncAttrs.addAttribute("stack-protector-buffer-size",
llvm::utostr(CodeGenOpts.SSPBufferSize));
- FuncAttrs.addAttribute("no-signed-zeros-fp-math",
- llvm::toStringRef(LangOpts.NoSignedZero));
+ if (LangOpts.NoSignedZero)
+ FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
// TODO: Reciprocal estimate codegen options should apply to instructions?
const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
@@ -1906,6 +1917,55 @@ static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
}
+static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
+ const llvm::DataLayout &DL, const ABIArgInfo &AI,
+ bool CheckCoerce = true) {
+ llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
+ if (AI.getKind() == ABIArgInfo::Indirect)
+ return true;
+ if (AI.getKind() == ABIArgInfo::Extend)
+ return true;
+ if (!DL.typeSizeEqualsStoreSize(Ty))
+ // TODO: This will result in a modest amount of values not marked noundef
+ // when they could be. We care about values that *invisibly* contain undef
+ // bits from the perspective of LLVM IR.
+ return false;
+ if (CheckCoerce && AI.canHaveCoerceToType()) {
+ llvm::Type *CoerceTy = AI.getCoerceToType();
+ if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
+ DL.getTypeSizeInBits(Ty)))
+ // If we're coercing to a type with a greater size than the canonical one,
+ // we're introducing new undef bits.
+ // Coercing to a type of smaller or equal size is ok, as we know that
+ // there's no internal padding (typeSizeEqualsStoreSize).
+ return false;
+ }
+ if (QTy->isExtIntType())
+ return true;
+ if (QTy->isReferenceType())
+ return true;
+ if (QTy->isNullPtrType())
+ return false;
+ if (QTy->isMemberPointerType())
+ // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
+ // now, never mark them.
+ return false;
+ if (QTy->isScalarType()) {
+ if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
+ return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
+ return true;
+ }
+ if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
+ return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
+ if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
+ return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
+ if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
+ return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
+
+ // TODO: Some structs may be `noundef`, in specific situations.
+ return false;
+}
+
/// Construct the IR attribute list of a function or call.
///
/// When adding an attribute, please consider where it should be handled:
@@ -1923,9 +1983,12 @@ static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
/// attributes that restrict how the frontend generates code must be
/// added here rather than getDefaultFunctionAttributes.
///
-void CodeGenModule::ConstructAttributeList(
- StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
- llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
+void CodeGenModule::ConstructAttributeList(StringRef Name,
+ const CGFunctionInfo &FI,
+ CGCalleeInfo CalleeInfo,
+ llvm::AttributeList &AttrList,
+ unsigned &CallingConv,
+ bool AttrOnCallSite, bool IsThunk) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
@@ -1989,15 +2052,38 @@ void CodeGenModule::ConstructAttributeList(
// allows it to work on indirect virtual function calls.
if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
+
+ // Add known guaranteed alignment for allocation functions.
+ if (unsigned BuiltinID = Fn->getBuiltinID()) {
+ switch (BuiltinID) {
+ case Builtin::BIaligned_alloc:
+ case Builtin::BIcalloc:
+ case Builtin::BImalloc:
+ case Builtin::BImemalign:
+ case Builtin::BIrealloc:
+ case Builtin::BIstrdup:
+ case Builtin::BIstrndup:
+ RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() /
+ Context.getTargetInfo().getCharWidth());
+ break;
+ default:
+ break;
+ }
+ }
}
// 'const', 'pure' and 'noalias' attributed functions are also nounwind.
if (TargetDecl->hasAttr<ConstAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ // gcc specifies that 'const' functions have greater restrictions than
+ // 'pure' functions, so they also cannot have infinite loops.
+ FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
} else if (TargetDecl->hasAttr<PureAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+ // gcc specifies that 'pure' functions cannot have infinite loops.
+ FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
} else if (TargetDecl->hasAttr<NoAliasAttr>()) {
FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
@@ -2085,6 +2171,17 @@ void CodeGenModule::ConstructAttributeList(
}
}
+ // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
+ // functions with -funique-internal-linkage-names.
+ if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
+ if (isa<FunctionDecl>(TargetDecl)) {
+ if (this->getFunctionLinkage(CalleeInfo.getCalleeDecl()) ==
+ llvm::GlobalValue::InternalLinkage)
+ FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
+ "selected");
+ }
+ }
+
// Collect non-call-site function IR attributes from declaration-specific
// information.
if (!AttrOnCallSite) {
@@ -2112,8 +2209,8 @@ void CodeGenModule::ConstructAttributeList(
return false;
};
- FuncAttrs.addAttribute("disable-tail-calls",
- llvm::toStringRef(shouldDisableTailCalls()));
+ if (shouldDisableTailCalls())
+ FuncAttrs.addAttribute("disable-tail-calls", "true");
// CPU/feature overrides. addDefaultFunctionDefinitionAttributes
// handles these separately to set them based on the global defaults.
@@ -2125,6 +2222,34 @@ void CodeGenModule::ConstructAttributeList(
QualType RetTy = FI.getReturnType();
const ABIArgInfo &RetAI = FI.getReturnInfo();
+ const llvm::DataLayout &DL = getDataLayout();
+
+ // C++ explicitly makes returning undefined values UB. C's rule only applies
+ // to used values, so we never mark them noundef for now.
+ bool HasStrictReturn = getLangOpts().CPlusPlus;
+ if (TargetDecl) {
+ if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
+ HasStrictReturn &= !FDecl->isExternC();
+ else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
+ // Function pointer
+ HasStrictReturn &= !VDecl->isExternC();
+ }
+
+ // We don't want to be too aggressive with the return checking, unless
+ // it's explicit in the code opts or we're using an appropriate sanitizer.
+ // Try to respect what the programmer intended.
+ HasStrictReturn &= getCodeGenOpts().StrictReturn ||
+ !MayDropFunctionReturn(getContext(), RetTy) ||
+ getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
+ getLangOpts().Sanitize.has(SanitizerKind::Return);
+
+ // Determine if the return type could be partially undef
+ if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
+ if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
+ DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
+ RetAttrs.addAttribute(llvm::Attribute::NoUndef);
+ }
+
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
if (RetAI.isSignExt())
@@ -2155,18 +2280,21 @@ void CodeGenModule::ConstructAttributeList(
llvm_unreachable("Invalid ABI kind for return argument");
}
- if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
- QualType PTy = RefTy->getPointeeType();
- if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
- RetAttrs.addDereferenceableAttr(
- getMinimumObjectSize(PTy).getQuantity());
- if (getContext().getTargetAddressSpace(PTy) == 0 &&
- !CodeGenOpts.NullPointerIsValid)
- RetAttrs.addAttribute(llvm::Attribute::NonNull);
- if (PTy->isObjectType()) {
- llvm::Align Alignment =
- getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
- RetAttrs.addAlignmentAttr(Alignment);
+ if (!IsThunk) {
+ // FIXME: fix this properly, https://reviews.llvm.org/D100388
+ if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
+ QualType PTy = RefTy->getPointeeType();
+ if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
+ RetAttrs.addDereferenceableAttr(
+ getMinimumObjectSize(PTy).getQuantity());
+ if (getContext().getTargetAddressSpace(PTy) == 0 &&
+ !CodeGenOpts.NullPointerIsValid)
+ RetAttrs.addAttribute(llvm::Attribute::NonNull);
+ if (PTy->isObjectType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
+ RetAttrs.addAlignmentAttr(Alignment);
+ }
}
}
@@ -2188,27 +2316,29 @@ void CodeGenModule::ConstructAttributeList(
// Attach attributes to inalloca argument.
if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
- Attrs.addAttribute(llvm::Attribute::InAlloca);
+ Attrs.addInAllocaAttr(FI.getArgStruct());
ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
llvm::AttributeSet::get(getLLVMContext(), Attrs);
}
- // Apply `nonnull` and `dereferencable(N)` to the `this` argument.
+ // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
+ // unless this is a thunk function.
+ // FIXME: fix this properly, https://reviews.llvm.org/D100388
if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
- !FI.arg_begin()->type->isVoidPointerType()) {
+ !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
auto IRArgs = IRFunctionArgs.getIRArgs(0);
assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
llvm::AttrBuilder Attrs;
+ QualType ThisTy =
+ FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
+
if (!CodeGenOpts.NullPointerIsValid &&
getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
Attrs.addAttribute(llvm::Attribute::NonNull);
- Attrs.addDereferenceableAttr(
- getMinimumObjectSize(
- FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
- .getQuantity());
+ Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
} else {
// FIXME dereferenceable should be correct here, regardless of
// NullPointerIsValid. However, dereferenceable currently does not always
@@ -2220,6 +2350,12 @@ void CodeGenModule::ConstructAttributeList(
.getQuantity());
}
+ llvm::Align Alignment =
+ getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
+ /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
+ .getAsAlign();
+ Attrs.addAlignmentAttr(Alignment);
+
ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
}
@@ -2241,6 +2377,11 @@ void CodeGenModule::ConstructAttributeList(
}
}
+ // Decide whether the argument we're handling could be partially undef
+ bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
+ if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
+ Attrs.addAttribute(llvm::Attribute::NoUndef);
+
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
// have the corresponding parameter variable. It doesn't make
// sense to do it here because parameters are so messed up.
@@ -2256,6 +2397,7 @@ void CodeGenModule::ConstructAttributeList(
Attrs.addAttribute(llvm::Attribute::Nest);
else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
+ Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
break;
case ABIArgInfo::Indirect: {
@@ -2365,6 +2507,10 @@ void CodeGenModule::ConstructAttributeList(
case ParameterABI::SwiftContext:
Attrs.addAttribute(llvm::Attribute::SwiftSelf);
break;
+
+ case ParameterABI::SwiftAsyncContext:
+ Attrs.addAttribute(llvm::Attribute::SwiftAsync);
+ break;
}
if (FI.getExtParameterInfo(ArgNo).isNoEscape())
@@ -3292,8 +3438,11 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
--EI;
llvm::Value *ArgStruct = &*EI;
llvm::Value *SRet = Builder.CreateStructGEP(
- nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
- RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
+ EI->getType()->getPointerElementType(), ArgStruct,
+ RetAI.getInAllocaFieldIndex());
+ llvm::Type *Ty =
+ cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
+ RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
}
break;
@@ -3570,7 +3719,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
}
// Deactivate the cleanup for the callee-destructed param that was pushed.
- if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
+ if (type->isRecordType() && !CurFuncIsThunk &&
type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
param->needsDestruction(getContext())) {
EHScopeStack::stable_iterator cleanup =
@@ -3881,11 +4030,11 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
// later, so we can't check it directly.
static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
ArrayRef<QualType> ArgTypes) {
- // The Swift calling convention doesn't go through the target-specific
- // argument classification, so it never uses inalloca.
+ // The Swift calling conventions don't go through the target-specific
+ // argument classification, they never use inalloca.
// TODO: Consider limiting inalloca use to only calling conventions supported
// by MSVC.
- if (ExplicitCC == CC_Swift)
+ if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
return false;
if (!CGM.getTarget().getCXXABI().isMicrosoft())
return false;
@@ -4141,7 +4290,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
// In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
// However, we still have to push an EH-only cleanup in case we unwind before
// we make it to the call.
- if (HasAggregateEvalKind &&
+ if (type->isRecordType() &&
type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
// If we're using inalloca, use the argument memory. Otherwise, use a
// temporary.
@@ -4338,7 +4487,8 @@ llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
llvm::Value *New) {
- DeferredReplacements.push_back(std::make_pair(Old, New));
+ DeferredReplacements.push_back(
+ std::make_pair(llvm::WeakTrackingVH(Old), New));
}
namespace {
@@ -4449,7 +4599,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- llvm::CallBase **callOrInvoke,
+ llvm::CallBase **callOrInvoke, bool IsMustTail,
SourceLocation Loc) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
@@ -4536,7 +4686,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
} else {
SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
if (HaveInsertPoint() && ReturnValue.isUnused()) {
- uint64_t size =
+ llvm::TypeSize size =
CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
}
@@ -4697,7 +4847,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
IRCallArgs[FirstIRArg] = AI.getPointer();
// Emit lifetime markers for the temporary alloca.
- uint64_t ByvalTempElementSize =
+ llvm::TypeSize ByvalTempElementSize =
CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
llvm::Value *LifetimeSize =
EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
@@ -5011,7 +5161,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::AttributeList Attrs;
CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
Callee.getAbstractInfo(), Attrs, CallingConv,
- /*AttrOnCallSite=*/true);
+ /*AttrOnCallSite=*/true,
+ /*IsThunk=*/false);
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
if (FD->hasAttr<StrictFPAttr>())
@@ -5142,10 +5293,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CGM.getLangOpts().ObjCAutoRefCount)
AddObjCARCExceptionMetadata(CI);
- // Suppress tail calls if requested.
+ // Set tail call kind if necessary.
if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
+ else if (IsMustTail)
+ Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
}
// Add metadata for calls to MSAllocator functions
@@ -5197,6 +5350,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return GetUndefRValue(RetTy);
}
+ // If this is a musttail call, return immediately. We do not branch to the
+ // epilogue in this case.
+ if (IsMustTail) {
+ for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
+ ++it) {
+ EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
+ if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
+ CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
+ }
+ if (CI->getType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(CI);
+ Builder.ClearInsertionPoint();
+ EnsureInsertPoint();
+ return GetUndefRValue(RetTy);
+ }
+
// Perform the swifterror writeback.
if (swiftErrorTemp.isValid()) {
llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index ba221dbbc83b..9895a23b7093 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -272,7 +272,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, Address addr,
llvm::Value *ptr = addr.getPointer();
unsigned AddrSpace = ptr->getType()->getPointerAddressSpace();
ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8Ty->getPointerTo(AddrSpace));
- ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr");
+ ptr = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, ptr, baseOffset, "add.ptr");
// If we have a virtual component, the alignment of the result will
// be relative only to the known alignment of that vbase.
@@ -434,8 +434,8 @@ CodeGenFunction::GetAddressOfDerivedClass(Address BaseAddr,
// Apply the offset.
llvm::Value *Value = Builder.CreateBitCast(BaseAddr.getPointer(), Int8PtrTy);
- Value = Builder.CreateInBoundsGEP(Value, Builder.CreateNeg(NonVirtualOffset),
- "sub.ptr");
+ Value = Builder.CreateInBoundsGEP(
+ Int8Ty, Value, Builder.CreateNeg(NonVirtualOffset), "sub.ptr");
// Just cast.
Value = Builder.CreateBitCast(Value, DerivedPtrTy);
@@ -467,8 +467,6 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
const CXXRecordDecl *RD = cast<CXXMethodDecl>(CurCodeDecl)->getParent();
const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
- llvm::Value *VTT;
-
uint64_t SubVTTIndex;
if (Delegating) {
@@ -494,15 +492,14 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
// A VTT parameter was passed to the constructor, use it.
- VTT = LoadCXXVTT();
- VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ llvm::Value *VTT = LoadCXXVTT();
+ return Builder.CreateConstInBoundsGEP1_64(VoidPtrTy, VTT, SubVTTIndex);
} else {
// We're the complete constructor, so get the VTT by name.
- VTT = CGM.getVTables().GetAddrOfVTT(RD);
- VTT = Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ llvm::GlobalValue *VTT = CGM.getVTables().GetAddrOfVTT(RD);
+ return Builder.CreateConstInBoundsGEP2_64(
+ VTT->getValueType(), VTT, 0, SubVTTIndex);
}
-
- return VTT;
}
namespace {
@@ -1744,6 +1741,7 @@ namespace {
llvm::ConstantInt::get(CGF.SizeTy, PoisonStart.getQuantity());
llvm::Value *OffsetPtr = CGF.Builder.CreateGEP(
+ CGF.Int8Ty,
CGF.Builder.CreateBitCast(CGF.LoadCXXThis(), CGF.Int8PtrTy),
OffsetSizePtr);
@@ -1963,9 +1961,10 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
// Find the end of the array.
+ llvm::Type *elementType = arrayBase.getElementType();
llvm::Value *arrayBegin = arrayBase.getPointer();
- llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
- "arrayctor.end");
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(
+ elementType, arrayBegin, numElements, "arrayctor.end");
// Enter the loop, setting up a phi for the current location to initialize.
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
@@ -2023,9 +2022,8 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
}
// Go to the next element.
- llvm::Value *next =
- Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
- "arrayctor.next");
+ llvm::Value *next = Builder.CreateInBoundsGEP(
+ elementType, cur, llvm::ConstantInt::get(SizeTy, 1), "arrayctor.next");
cur->addIncoming(next, Builder.GetInsertBlock());
// Check whether that's the end of the loop.
@@ -2182,7 +2180,7 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
CGCallee Callee = CGCallee::forDirect(CalleePtr, GlobalDecl(D, Type));
- EmitCall(Info, Callee, ReturnValueSlot(), Args, nullptr, Loc);
+ EmitCall(Info, Callee, ReturnValueSlot(), Args, nullptr, false, Loc);
// Generate vtable assumptions if we're constructing a complete object
// with a vtable. We don't do this for base subobjects for two reasons:
@@ -2518,8 +2516,10 @@ void CodeGenFunction::InitializeVTablePointer(const VPtr &Vptr) {
llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
->getPointerTo(ProgAS)
->getPointerTo(GlobalsAS);
+ // vtable field is is derived from `this` pointer, therefore it should be in
+ // default address space.
VTableField = Builder.CreatePointerBitCastOrAddrSpaceCast(
- VTableField, VTablePtrTy->getPointerTo(GlobalsAS));
+ VTableField, VTablePtrTy->getPointerTo());
VTableAddressPoint = Builder.CreatePointerBitCastOrAddrSpaceCast(
VTableAddressPoint, VTablePtrTy);
@@ -2776,7 +2776,7 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
}
std::string TypeName = RD->getQualifiedNameAsString();
- if (getContext().getSanitizerBlacklist().isBlacklistedType(M, TypeName))
+ if (getContext().getNoSanitizeList().containsType(M, TypeName))
return;
SanitizerScope SanScope(this);
@@ -2829,8 +2829,8 @@ bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
return false;
std::string TypeName = RD->getQualifiedNameAsString();
- return !getContext().getSanitizerBlacklist().isBlacklistedType(
- SanitizerKind::CFIVCall, TypeName);
+ return !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall,
+ TypeName);
}
llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
@@ -2852,8 +2852,8 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
std::string TypeName = RD->getQualifiedNameAsString();
if (SanOpts.has(SanitizerKind::CFIVCall) &&
- !getContext().getSanitizerBlacklist().isBlacklistedType(
- SanitizerKind::CFIVCall, TypeName)) {
+ !getContext().getNoSanitizeList().containsType(SanitizerKind::CFIVCall,
+ TypeName)) {
EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIVCall),
SanitizerHandler::CFICheckFail, {}, {});
}
diff --git a/clang/lib/CodeGen/CGCleanup.cpp b/clang/lib/CodeGen/CGCleanup.cpp
index ad543ef86c1a..b9364fcd2231 100644
--- a/clang/lib/CodeGen/CGCleanup.cpp
+++ b/clang/lib/CodeGen/CGCleanup.cpp
@@ -194,6 +194,11 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
if (IsLifetimeMarker)
Scope->setLifetimeMarker();
+ // With Windows -EHa, Invoke llvm.seh.scope.begin() for EHCleanup
+ if (CGF->getLangOpts().EHAsynch && IsEHCleanup && !IsLifetimeMarker &&
+ CGF->getTarget().getCXXABI().isMicrosoft())
+ CGF->EmitSehCppScopeBegin();
+
return Scope->getCleanupBuffer();
}
@@ -759,14 +764,31 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
if (Scope.isEHCleanup())
cleanupFlags.setIsEHCleanupKind();
+ // Under -EHa, invoke seh.scope.end() to mark scope end before dtor
+ bool IsEHa = getLangOpts().EHAsynch && !Scope.isLifetimeMarker();
+ const EHPersonality &Personality = EHPersonality::get(*this);
if (!RequiresNormalCleanup) {
+ // Mark CPP scope end for passed-by-value Arg temp
+ // per Windows ABI which is "normally" Cleanup in callee
+ if (IsEHa && getInvokeDest()) {
+ if (Personality.isMSVCXXPersonality())
+ EmitSehCppScopeEnd();
+ }
destroyOptimisticNormalEntry(*this, Scope);
EHStack.popCleanup();
} else {
// If we have a fallthrough and no other need for the cleanup,
// emit it directly.
- if (HasFallthrough && !HasPrebranchedFallthrough &&
- !HasFixups && !HasExistingBranches) {
+ if (HasFallthrough && !HasPrebranchedFallthrough && !HasFixups &&
+ !HasExistingBranches) {
+
+ // mark SEH scope end for fall-through flow
+ if (IsEHa && getInvokeDest()) {
+ if (Personality.isMSVCXXPersonality())
+ EmitSehCppScopeEnd();
+ else
+ EmitSehTryScopeEnd();
+ }
destroyOptimisticNormalEntry(*this, Scope);
EHStack.popCleanup();
@@ -801,6 +823,14 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// should already be branched to it.
EmitBlock(NormalEntry);
+ // intercept normal cleanup to mark SEH scope end
+ if (IsEHa) {
+ if (Personality.isMSVCXXPersonality())
+ EmitSehCppScopeEnd();
+ else
+ EmitSehTryScopeEnd();
+ }
+
// III. Figure out where we're going and build the cleanup
// epilogue.
@@ -1248,11 +1278,17 @@ void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
// to the current RunCleanupsScope.
if (C == EHStack.stable_begin() &&
CurrentCleanupScopeDepth.strictlyEncloses(C)) {
- // If it's a normal cleanup, we need to pretend that the
- // fallthrough is unreachable.
- CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- PopCleanupBlock();
- Builder.restoreIP(SavedIP);
+ // Per comment below, checking EHAsynch is not really necessary
+ // it's there to assure zero-impact w/o EHAsynch option
+ if (!Scope.isNormalCleanup() && getLangOpts().EHAsynch) {
+ PopCleanupBlock();
+ } else {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ }
return;
}
@@ -1276,3 +1312,59 @@ void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
/*useEHCleanup*/ true);
}
+
+// Need to set "funclet" in OperandBundle properly for noThrow
+// intrinsic (see CGCall.cpp)
+static void EmitSehScope(CodeGenFunction &CGF,
+ llvm::FunctionCallee &SehCppScope) {
+ llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
+ assert(CGF.Builder.GetInsertBlock() && InvokeDest);
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
+ SmallVector<llvm::OperandBundleDef, 1> BundleList =
+ CGF.getBundlesForFunclet(SehCppScope.getCallee());
+ if (CGF.CurrentFuncletPad)
+ BundleList.emplace_back("funclet", CGF.CurrentFuncletPad);
+ CGF.Builder.CreateInvoke(SehCppScope, Cont, InvokeDest, None, BundleList);
+ CGF.EmitBlock(Cont);
+}
+
+// Invoke a llvm.seh.scope.begin at the beginning of a CPP scope for -EHa
+void CodeGenFunction::EmitSehCppScopeBegin() {
+ assert(getLangOpts().EHAsynch);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee SehCppScope =
+ CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.begin");
+ EmitSehScope(*this, SehCppScope);
+}
+
+// Invoke a llvm.seh.scope.end at the end of a CPP scope for -EHa
+// llvm.seh.scope.end is emitted before popCleanup, so it's "invoked"
+void CodeGenFunction::EmitSehCppScopeEnd() {
+ assert(getLangOpts().EHAsynch);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee SehCppScope =
+ CGM.CreateRuntimeFunction(FTy, "llvm.seh.scope.end");
+ EmitSehScope(*this, SehCppScope);
+}
+
+// Invoke a llvm.seh.try.begin at the beginning of a SEH scope for -EHa
+void CodeGenFunction::EmitSehTryScopeBegin() {
+ assert(getLangOpts().EHAsynch);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee SehCppScope =
+ CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.begin");
+ EmitSehScope(*this, SehCppScope);
+}
+
+// Invoke a llvm.seh.try.end at the end of a SEH scope for -EHa
+void CodeGenFunction::EmitSehTryScopeEnd() {
+ assert(getLangOpts().EHAsynch);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ llvm::FunctionCallee SehCppScope =
+ CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.end");
+ EmitSehScope(*this, SehCppScope);
+}
diff --git a/clang/lib/CodeGen/CGCoroutine.cpp b/clang/lib/CodeGen/CGCoroutine.cpp
index 5c57ad0685d5..ca071d3d2e80 100644
--- a/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/clang/lib/CodeGen/CGCoroutine.cpp
@@ -556,6 +556,8 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
{Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
createCoroData(*this, CurCoro, CoroId);
CurCoro.Data->SuspendBB = RetBB;
+ assert(ShouldEmitLifetimeMarkers &&
+ "Must emit lifetime intrinsics for coroutines");
// Backend is allowed to elide memory allocations, to help it, emit
// auto mem = coro.alloc() ? 0 : ... allocation code ...;
@@ -600,10 +602,21 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
{
+ CGDebugInfo *DI = getDebugInfo();
ParamReferenceReplacerRAII ParamReplacer(LocalDeclMap);
CodeGenFunction::RunCleanupsScope ResumeScope(*this);
EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate());
+ // Create mapping between parameters and copy-params for coroutine function.
+ auto ParamMoves = S.getParamMoves();
+ assert(
+ (ParamMoves.size() == 0 || (ParamMoves.size() == FnArgs.size())) &&
+ "ParamMoves and FnArgs should be the same size for coroutine function");
+ if (ParamMoves.size() == FnArgs.size() && DI)
+ for (const auto Pair : llvm::zip(FnArgs, ParamMoves))
+ DI->getCoroutineParameterMappings().insert(
+ {std::get<0>(Pair), std::get<1>(Pair)});
+
// Create parameter copies. We do it before creating a promise, since an
// evolution of coroutine TS may allow promise constructor to observe
// parameter copies.
diff --git a/clang/lib/CodeGen/CGDebugInfo.cpp b/clang/lib/CodeGen/CGDebugInfo.cpp
index 99944afaad14..81c910f40bf8 100644
--- a/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -249,26 +249,7 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
}
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
- assert(FD && "Invalid FunctionDecl!");
- IdentifierInfo *FII = FD->getIdentifier();
- FunctionTemplateSpecializationInfo *Info =
- FD->getTemplateSpecializationInfo();
-
- if (!Info && FII)
- return FII->getName();
-
- SmallString<128> NS;
- llvm::raw_svector_ostream OS(NS);
- FD->printName(OS);
-
- // Add any template specialization args.
- if (Info) {
- const TemplateArgumentList *TArgs = Info->TemplateArguments;
- printTemplateArgumentList(OS, TArgs->asArray(), getPrintingPolicy());
- }
-
- // Copy this name on the side and use its reference.
- return internString(OS.str());
+ return internString(GetName(FD));
}
StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
@@ -301,15 +282,8 @@ StringRef CGDebugInfo::getSelectorName(Selector S) {
StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
if (isa<ClassTemplateSpecializationDecl>(RD)) {
- SmallString<128> Name;
- llvm::raw_svector_ostream OS(Name);
- PrintingPolicy PP = getPrintingPolicy();
- PP.PrintCanonicalTypes = true;
- RD->getNameForDiagnostic(OS, PP,
- /*Qualified*/ false);
-
// Copy this name on the side and use its reference.
- return internString(Name);
+ return internString(GetName(RD));
}
// quick optimization to avoid having to intern strings that are already
@@ -317,8 +291,9 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
if (const IdentifierInfo *II = RD->getIdentifier())
return II->getName();
- // The CodeView printer in LLVM wants to see the names of unnamed types: it is
- // used to reconstruct the fully qualified type names.
+ // The CodeView printer in LLVM wants to see the names of unnamed types
+ // because they need to have a unique identifier.
+ // These names are used to reconstruct the fully qualified type names.
if (CGM.getCodeGenOpts().EmitCodeView) {
if (const TypedefNameDecl *D = RD->getTypedefNameForAnonDecl()) {
assert(RD->getDeclContext() == D->getDeclContext() &&
@@ -342,6 +317,12 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
// associate typedef mangled in if they have one.
Name = TND->getName();
+ // Give lambdas a display name based on their name mangling.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->isLambda())
+ return internString(
+ CGM.getCXXABI().getMangleContext().getLambdaString(CXXRD));
+
if (!Name.empty()) {
SmallString<256> UnnamedType("<unnamed-type-");
UnnamedType += Name;
@@ -560,14 +541,19 @@ void CGDebugInfo::CreateCompileUnit() {
if (LO.CPlusPlus) {
if (LO.ObjC)
LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
- else if (LO.CPlusPlus14)
+ else if (LO.CPlusPlus14 && (!CGM.getCodeGenOpts().DebugStrictDwarf ||
+ CGM.getCodeGenOpts().DwarfVersion >= 5))
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_14;
- else if (LO.CPlusPlus11)
+ else if (LO.CPlusPlus11 && (!CGM.getCodeGenOpts().DebugStrictDwarf ||
+ CGM.getCodeGenOpts().DwarfVersion >= 5))
LangTag = llvm::dwarf::DW_LANG_C_plus_plus_11;
else
LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
} else if (LO.ObjC) {
LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.OpenCL && (!CGM.getCodeGenOpts().DebugStrictDwarf ||
+ CGM.getCodeGenOpts().DwarfVersion >= 5)) {
+ LangTag = llvm::dwarf::DW_LANG_OpenCL;
} else if (LO.RenderScript) {
LangTag = llvm::dwarf::DW_LANG_GOOGLE_RenderScript;
} else if (LO.C99) {
@@ -752,6 +738,59 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
#include "clang/Basic/PPCTypes.def"
return CreateType(cast<const BuiltinType>(CGM.getContext().IntTy));
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+ {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ CGM.getContext().getBuiltinVectorTypeInfo(BT);
+
+ unsigned ElementCount = Info.EC.getKnownMinValue();
+ unsigned SEW = CGM.getContext().getTypeSize(Info.ElementType);
+
+ bool Fractional = false;
+ unsigned LMUL;
+ unsigned FixedSize = ElementCount * SEW;
+ if (Info.ElementType == CGM.getContext().BoolTy) {
+ // Mask type only occupies one vector register.
+ LMUL = 1;
+ } else if (FixedSize < 64) {
+ // In RVV scalable vector types, we encode 64 bits in the fixed part.
+ Fractional = true;
+ LMUL = 64 / FixedSize;
+ } else {
+ LMUL = FixedSize / 64;
+ }
+
+ // Element count = (VLENB / SEW) x LMUL
+ SmallVector<int64_t, 9> Expr(
+ // The DW_OP_bregx operation has two operands: a register which is
+ // specified by an unsigned LEB128 number, followed by a signed LEB128
+ // offset.
+ {llvm::dwarf::DW_OP_bregx, // Read the contents of a register.
+ 4096 + 0xC22, // RISC-V VLENB CSR register.
+ 0, // Offset for DW_OP_bregx. It is dummy here.
+ llvm::dwarf::DW_OP_constu,
+ SEW / 8, // SEW is in bits.
+ llvm::dwarf::DW_OP_div, llvm::dwarf::DW_OP_constu, LMUL});
+ if (Fractional)
+ Expr.push_back(llvm::dwarf::DW_OP_div);
+ else
+ Expr.push_back(llvm::dwarf::DW_OP_mul);
+
+ auto *LowerBound =
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(
+ llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0));
+ auto *UpperBound = DBuilder.createExpression(Expr);
+ llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(
+ /*count*/ nullptr, LowerBound, UpperBound, /*stride*/ nullptr);
+ llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
+ llvm::DIType *ElemTy =
+ getOrCreateType(Info.ElementType, TheCU->getFile());
+
+ auto Align = getTypeAlignIfRequired(BT, CGM.getContext());
+ return DBuilder.createVectorType(/*Size=*/0, Align, ElemTy,
+ SubscriptArray);
+ }
case BuiltinType::UChar:
case BuiltinType::Char_U:
Encoding = llvm::dwarf::DW_ATE_unsigned_char;
@@ -1248,6 +1287,9 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_OpenCLKernel;
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
+ case CC_SwiftAsync:
+ // [FIXME: swiftasynccc] Update to SwiftAsync once LLVM support lands.
+ return llvm::dwarf::DW_CC_LLVM_Swift;
case CC_PreserveMost:
return llvm::dwarf::DW_CC_LLVM_PreserveMost;
case CC_PreserveAll:
@@ -1722,6 +1764,8 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
Flags |= llvm::DINode::FlagLValueReference;
if (Method->getRefQualifier() == RQ_RValue)
Flags |= llvm::DINode::FlagRValueReference;
+ if (!Method->isExternallyVisible())
+ SPFlags |= llvm::DISubprogram::SPFlagLocalToUnit;
if (CGM.getLangOpts().Optimize)
SPFlags |= llvm::DISubprogram::SPFlagOptimized;
@@ -2323,7 +2367,8 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
if (DebugKind == codegenoptions::DebugLineTablesOnly)
return true;
- if (DebugKind > codegenoptions::LimitedDebugInfo)
+ if (DebugKind > codegenoptions::LimitedDebugInfo ||
+ RD->hasAttr<StandaloneDebugAttr>())
return false;
if (!LangOpts.CPlusPlus)
@@ -2677,16 +2722,26 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
EltTys.push_back(PropertyNode);
};
{
- llvm::SmallPtrSet<const IdentifierInfo *, 16> PropertySet;
+ // Use 'char' for the isClassProperty bit as DenseSet requires space for
+ // empty/tombstone keys in the data type (and bool is too small for that).
+ typedef std::pair<char, const IdentifierInfo *> IsClassAndIdent;
+ /// List of already emitted properties. Two distinct class and instance
+ /// properties can share the same identifier (but not two instance
+ /// properties or two class properties).
+ llvm::DenseSet<IsClassAndIdent> PropertySet;
+ /// Returns the IsClassAndIdent key for the given property.
+ auto GetIsClassAndIdent = [](const ObjCPropertyDecl *PD) {
+ return std::make_pair(PD->isClassProperty(), PD->getIdentifier());
+ };
for (const ObjCCategoryDecl *ClassExt : ID->known_extensions())
for (auto *PD : ClassExt->properties()) {
- PropertySet.insert(PD->getIdentifier());
+ PropertySet.insert(GetIsClassAndIdent(PD));
AddProperty(PD);
}
for (const auto *PD : ID->properties()) {
// Don't emit duplicate metadata for properties that were already in a
// class extension.
- if (!PropertySet.insert(PD->getIdentifier()).second)
+ if (!PropertySet.insert(GetIsClassAndIdent(PD)).second)
continue;
AddProperty(PD);
}
@@ -2915,8 +2970,13 @@ llvm::DIType *CGDebugInfo::CreateType(const LValueReferenceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIFile *Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty,
- Ty->getPointeeType(), Unit);
+ llvm::dwarf::Tag Tag = llvm::dwarf::DW_TAG_rvalue_reference_type;
+ // DW_TAG_rvalue_reference_type was introduced in DWARF 4.
+ if (CGM.getCodeGenOpts().DebugStrictDwarf &&
+ CGM.getCodeGenOpts().DwarfVersion < 4)
+ Tag = llvm::dwarf::DW_TAG_reference_type;
+
+ return CreatePointerLikeType(Tag, Ty, Ty->getPointeeType(), Unit);
}
llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
@@ -3024,15 +3084,11 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
- // Create elements for each enumerator.
SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
- bool IsSigned = ED->getIntegerType()->isSignedIntegerType();
for (const auto *Enum : ED->enumerators()) {
- const auto &InitVal = Enum->getInitVal();
- auto Value = IsSigned ? InitVal.getSExtValue() : InitVal.getZExtValue();
Enumerators.push_back(
- DBuilder.createEnumerator(Enum->getName(), Value, !IsSigned));
+ DBuilder.createEnumerator(Enum->getName(), Enum->getInitVal()));
}
// Return a CompositeType for the enum itself.
@@ -3459,19 +3515,20 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
llvm::DINode::DIFlags &Flags) {
- const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ const auto *FD = cast<FunctionDecl>(GD.getCanonicalDecl().getDecl());
Name = getFunctionName(FD);
// Use mangled name as linkage name for C/C++ functions.
- if (FD->hasPrototype()) {
+ if (FD->getType()->getAs<FunctionProtoType>())
LinkageName = CGM.getMangledName(GD);
+ if (FD->hasPrototype())
Flags |= llvm::DINode::FlagPrototyped;
- }
// No need to replicate the linkage name if it isn't different from the
// subprogram name, no need to have it at all unless coverage is enabled or
// debug is set to more than just line tables or extra debug info is needed.
if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs &&
!CGM.getCodeGenOpts().EmitGcovNotes &&
!CGM.getCodeGenOpts().DebugInfoForProfiling &&
+ !CGM.getCodeGenOpts().PseudoProbeForProfiling &&
DebugKind <= codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
@@ -3915,12 +3972,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
return;
llvm::TimeTraceScope TimeScope("DebugFunction", [&]() {
- std::string Name;
- llvm::raw_string_ostream OS(Name);
- if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
- ND->getNameForDiagnostic(OS, getPrintingPolicy(),
- /*Qualified=*/true);
- return Name;
+ return GetName(D, true);
});
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
@@ -3984,9 +4036,9 @@ void CGDebugInfo::EmitFuncDeclForCallSite(llvm::CallBase *CallOrInvoke,
if (CalleeDecl->getBuiltinID() != 0 || CalleeDecl->hasAttr<NoDebugAttr>() ||
getCallSiteRelatedAttrs() == llvm::DINode::FlagZero)
return;
- if (const auto *Id = CalleeDecl->getIdentifier())
- if (Id->isReservedName())
- return;
+ if (CalleeDecl->isReserved(CGM.getLangOpts()) !=
+ ReservedIdentifierStatus::NotReserved)
+ return;
// If there is no DISubprogram attached to the function being called,
// create the one describing the function in order to have complete
@@ -4229,7 +4281,9 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
StringRef Name = VD->getName();
if (!Name.empty()) {
- if (VD->hasAttr<BlocksAttr>()) {
+ // __block vars are stored on the heap if they are captured by a block that
+ // can escape the local scope.
+ if (VD->isEscapingByref()) {
// Here, we need an offset *into* the alloca.
CharUnits offset = CharUnits::fromQuantity(32);
Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
@@ -4290,13 +4344,53 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
}
// Create the descriptor for the variable.
- auto *D = ArgNo ? DBuilder.createParameterVariable(
- Scope, Name, *ArgNo, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags)
- : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize,
- Flags, Align);
-
+ llvm::DILocalVariable *D = nullptr;
+ if (ArgNo) {
+ D = DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags);
+ } else {
+ // For normal local variable, we will try to find out whether 'VD' is the
+ // copy parameter of coroutine.
+ // If yes, we are going to use DIVariable of the origin parameter instead
+ // of creating the new one.
+ // If no, it might be a normal alloc, we just create a new one for it.
+
+ // Check whether the VD is move parameters.
+ auto RemapCoroArgToLocalVar = [&]() -> llvm::DILocalVariable * {
+ // The scope of parameter and move-parameter should be distinct
+ // DISubprogram.
+ if (!isa<llvm::DISubprogram>(Scope) || !Scope->isDistinct())
+ return nullptr;
+
+ auto Iter = llvm::find_if(CoroutineParameterMappings, [&](auto &Pair) {
+ Stmt *StmtPtr = const_cast<Stmt *>(Pair.second);
+ if (DeclStmt *DeclStmtPtr = dyn_cast<DeclStmt>(StmtPtr)) {
+ DeclGroupRef DeclGroup = DeclStmtPtr->getDeclGroup();
+ Decl *Decl = DeclGroup.getSingleDecl();
+ if (VD == dyn_cast_or_null<VarDecl>(Decl))
+ return true;
+ }
+ return false;
+ });
+
+ if (Iter != CoroutineParameterMappings.end()) {
+ ParmVarDecl *PD = const_cast<ParmVarDecl *>(Iter->first);
+ auto Iter2 = llvm::find_if(ParamDbgMappings, [&](auto &DbgPair) {
+ return DbgPair.first == PD && DbgPair.second->getScope() == Scope;
+ });
+ if (Iter2 != ParamDbgMappings.end())
+ return const_cast<llvm::DILocalVariable *>(Iter2->second);
+ }
+ return nullptr;
+ };
+
+ // If we couldn't find a move param DIVariable, create a new one.
+ D = RemapCoroArgToLocalVar();
+ // Or we will create a new DIVariable for this Decl if D dose not exists.
+ if (!D)
+ D = DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags, Align);
+ }
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
llvm::DILocation::get(CGM.getLLVMContext(), Line,
@@ -4421,11 +4515,11 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
DBuilder.insertDeclare(Storage, D, Expr, DL, Builder.GetInsertBlock());
}
-void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
- unsigned ArgNo,
- CGBuilderTy &Builder) {
+llvm::DILocalVariable *
+CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
- EmitDeclare(VD, AI, ArgNo, Builder);
+ return EmitDeclare(VD, AI, ArgNo, Builder);
}
namespace {
@@ -4646,6 +4740,18 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
return GVE;
}
+std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
+ std::string Name;
+ llvm::raw_string_ostream OS(Name);
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ PrintingPolicy PP = getPrintingPolicy();
+ PP.PrintCanonicalTypes = true;
+ PP.SuppressInlineNamespace = false;
+ ND->getNameForDiagnostic(OS, PP, Qualified);
+ }
+ return Name;
+}
+
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
@@ -4653,11 +4759,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
return;
llvm::TimeTraceScope TimeScope("DebugGlobalVariable", [&]() {
- std::string Name;
- llvm::raw_string_ostream OS(Name);
- D->getNameForDiagnostic(OS, getPrintingPolicy(),
- /*Qualified=*/true);
- return Name;
+ return GetName(D, true);
});
// If we already created a DIGlobalVariable for this declaration, just attach
@@ -4721,11 +4823,7 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
if (VD->hasAttr<NoDebugAttr>())
return;
llvm::TimeTraceScope TimeScope("DebugConstGlobalVariable", [&]() {
- std::string Name;
- llvm::raw_string_ostream OS(Name);
- VD->getNameForDiagnostic(OS, getPrintingPolicy(),
- /*Qualified=*/true);
- return Name;
+ return GetName(VD, true);
});
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
@@ -4846,24 +4944,7 @@ void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
}
}
-void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
- if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
- return;
- assert(UD.shadow_size() &&
- "We shouldn't be codegening an invalid UsingDecl containing no decls");
- // Emitting one decl is sufficient - debuggers can detect that this is an
- // overloaded name & provide lookup for all the overloads.
- const UsingShadowDecl &USD = **UD.shadow_begin();
-
- // FIXME: Skip functions with undeduced auto return type for now since we
- // don't currently have the plumbing for separate declarations & definitions
- // of free functions and mismatched types (auto in the declaration, concrete
- // return type in the definition)
- if (const auto *FD = dyn_cast<FunctionDecl>(USD.getUnderlyingDecl()))
- if (const auto *AT =
- FD->getType()->castAs<FunctionProtoType>()->getContainedAutoType())
- if (AT->getDeducedType().isNull())
- return;
+void CGDebugInfo::EmitUsingShadowDecl(const UsingShadowDecl &USD) {
if (llvm::DINode *Target =
getDeclarationOrDefinition(USD.getUnderlyingDecl())) {
auto Loc = USD.getLocation();
@@ -4873,6 +4954,42 @@ void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
}
}
+void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ return;
+ assert(UD.shadow_size() &&
+ "We shouldn't be codegening an invalid UsingDecl containing no decls");
+
+ for (const auto *USD : UD.shadows()) {
+ // FIXME: Skip functions with undeduced auto return type for now since we
+ // don't currently have the plumbing for separate declarations & definitions
+ // of free functions and mismatched types (auto in the declaration, concrete
+ // return type in the definition)
+ if (const auto *FD = dyn_cast<FunctionDecl>(USD->getUnderlyingDecl()))
+ if (const auto *AT = FD->getType()
+ ->castAs<FunctionProtoType>()
+ ->getContainedAutoType())
+ if (AT->getDeducedType().isNull())
+ continue;
+
+ EmitUsingShadowDecl(*USD);
+ // Emitting one decl is sufficient - debuggers can detect that this is an
+ // overloaded name & provide lookup for all the overloads.
+ break;
+ }
+}
+
+void CGDebugInfo::EmitUsingEnumDecl(const UsingEnumDecl &UD) {
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ return;
+ assert(UD.shadow_size() &&
+ "We shouldn't be codegening an invalid UsingEnumDecl"
+ " containing no decls");
+
+ for (const auto *USD : UD.shadows())
+ EmitUsingShadowDecl(*USD);
+}
+
void CGDebugInfo::EmitImportDecl(const ImportDecl &ID) {
if (CGM.getCodeGenOpts().getDebuggerTuning() != llvm::DebuggerKind::LLDB)
return;
diff --git a/clang/lib/CodeGen/CGDebugInfo.h b/clang/lib/CodeGen/CGDebugInfo.h
index afd5b50c182a..b01165f85a6c 100644
--- a/clang/lib/CodeGen/CGDebugInfo.h
+++ b/clang/lib/CodeGen/CGDebugInfo.h
@@ -161,6 +161,15 @@ class CGDebugInfo {
llvm::DenseMap<const Decl *, llvm::TypedTrackingMDRef<llvm::DIDerivedType>>
StaticDataMemberCache;
+ using ParamDecl2StmtTy = llvm::DenseMap<const ParmVarDecl *, const Stmt *>;
+ using Param2DILocTy =
+ llvm::DenseMap<const ParmVarDecl *, llvm::DILocalVariable *>;
+
+ /// The key is coroutine real parameters, value is coroutine move parameters.
+ ParamDecl2StmtTy CoroutineParameterMappings;
+ /// The key is coroutine real parameters, value is DIVariable in LLVM IR.
+ Param2DILocTy ParamDbgMappings;
+
/// Helper functions for getOrCreateType.
/// @{
/// Currently the checksum of an interface includes the number of
@@ -463,8 +472,10 @@ public:
/// Emit call to \c llvm.dbg.declare for an argument variable
/// declaration.
- void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
- unsigned ArgNo, CGBuilderTy &Builder);
+ llvm::DILocalVariable *EmitDeclareOfArgVariable(const VarDecl *Decl,
+ llvm::Value *AI,
+ unsigned ArgNo,
+ CGBuilderTy &Builder);
/// Emit call to \c llvm.dbg.declare for the block-literal argument
/// to a block invocation function.
@@ -491,9 +502,15 @@ public:
/// Emit the type even if it might not be used.
void EmitAndRetainType(QualType Ty);
+ /// Emit a shadow decl brought in by a using or using-enum
+ void EmitUsingShadowDecl(const UsingShadowDecl &USD);
+
/// Emit C++ using declaration.
void EmitUsingDecl(const UsingDecl &UD);
+ /// Emit C++ using-enum declaration.
+ void EmitUsingEnumDecl(const UsingEnumDecl &UD);
+
/// Emit an @import declaration.
void EmitImportDecl(const ImportDecl &ID);
@@ -533,6 +550,11 @@ public:
SourceLocation LineLoc,
SourceLocation FileLoc);
+ Param2DILocTy &getParamDbgMappings() { return ParamDbgMappings; }
+ ParamDecl2StmtTy &getCoroutineParameterMappings() {
+ return CoroutineParameterMappings;
+ }
+
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
/// Returns a pointer to the DILocalVariable associated with the
@@ -549,6 +571,8 @@ private:
llvm::DIType *WrappedType;
};
+ std::string GetName(const Decl*, bool Qualified = false) const;
+
/// Build up structure info for the byref. See \a BuildByRefType.
BlockByRefType EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *OffSet);
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index a01638f0b67b..5b3d39f20b41 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -99,6 +99,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ConstructorUsingShadow:
case Decl::ObjCTypeParam:
case Decl::Binding:
+ case Decl::UnresolvedUsingIfExists:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Record: // struct/union/class X;
case Decl::CXXRecord: // struct/union/class X; [C++]
@@ -137,6 +138,10 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(D));
return;
+ case Decl::UsingEnum: // using enum X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(D));
+ return;
case Decl::UsingPack:
for (auto *Using : cast<UsingPackDecl>(D).expansions())
EmitDecl(*Using);
@@ -441,8 +446,10 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
if (const SectionAttr *SA = D.getAttr<SectionAttr>())
var->setSection(SA->getName());
- if (D.hasAttr<UsedAttr>())
+ if (D.hasAttr<RetainAttr>())
CGM.addUsedGlobal(var);
+ else if (D.hasAttr<UsedAttr>())
+ CGM.addUsedOrCompilerUsedGlobal(var);
// We may have to cast the constant because of the initializer
// mismatch above.
@@ -548,6 +555,7 @@ namespace {
struct CallStackRestore final : EHScopeStack::Cleanup {
Address Stack;
CallStackRestore(Address Stack) : Stack(Stack) {}
+ bool isRedundantBeforeReturn() override { return true; }
void Emit(CodeGenFunction &CGF, Flags flags) override {
llvm::Value *V = CGF.Builder.CreateLoad(Stack);
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
@@ -706,10 +714,10 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
}
// If it was an l-value, use objc_copyWeak.
- if (srcExpr->getValueKind() == VK_LValue) {
+ if (srcExpr->isLValue()) {
CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
} else {
- assert(srcExpr->getValueKind() == VK_XValue);
+ assert(srcExpr->isXValue());
CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
}
return true;
@@ -769,9 +777,10 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
// If we're emitting a value with lifetime, we have to do the
// initialization *before* we leave the cleanup scopes.
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(init))
- init = EWC->getSubExpr();
- CodeGenFunction::RunCleanupsScope Scope(*this);
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(init)) {
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+ return EmitScalarInit(EWC->getSubExpr(), D, lvalue, capturedByInit);
+ }
// We have to maintain the illusion that the variable is
// zero-initialized. If the variable might be accessed in its
@@ -1119,7 +1128,7 @@ Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
bool isConstant = true;
llvm::GlobalVariable *InsertBefore = nullptr;
unsigned AS =
- getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
+ getContext().getTargetAddressSpace(GetGlobalConstantAddressSpace());
std::string Name;
if (D.hasGlobalStorage())
Name = getMangledName(&D).str() + ".const";
@@ -1313,7 +1322,7 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
/// Emit a lifetime.begin marker if some criteria are satisfied.
/// \return a pointer to the temporary size Value if a marker was emitted, null
/// otherwise
-llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
+llvm::Value *CodeGenFunction::EmitLifetimeStart(llvm::TypeSize Size,
llvm::Value *Addr) {
if (!ShouldEmitLifetimeMarkers)
return nullptr;
@@ -1321,7 +1330,8 @@ llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
assert(Addr->getType()->getPointerAddressSpace() ==
CGM.getDataLayout().getAllocaAddrSpace() &&
"Pointer should be in alloca address space");
- llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
+ llvm::Value *SizeV = llvm::ConstantInt::get(
+ Int64Ty, Size.isScalable() ? -1 : Size.getFixedValue());
Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
llvm::CallInst *C =
Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
@@ -1544,12 +1554,9 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// is rare.
if (!Bypasses.IsBypassed(&D) &&
!(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
- llvm::TypeSize size =
- CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ llvm::TypeSize Size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
- size.isScalable() ? EmitLifetimeStart(-1, AllocaAddr.getPointer())
- : EmitLifetimeStart(size.getFixedSize(),
- AllocaAddr.getPointer());
+ EmitLifetimeStart(Size, AllocaAddr.getPointer());
}
} else {
assert(!emission.useLifetimeMarkers());
@@ -1765,8 +1772,8 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
llvm::Value *BaseSizeInChars =
llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
- llvm::Value *End =
- Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
+ llvm::Value *End = Builder.CreateInBoundsGEP(
+ Begin.getElementType(), Begin.getPointer(), SizeVal, "vla.end");
llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
EmitBlock(LoopBB);
llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
@@ -2194,7 +2201,8 @@ void CodeGenFunction::emitDestroy(Address addr, QualType type,
}
llvm::Value *begin = addr.getPointer();
- llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ llvm::Value *end =
+ Builder.CreateInBoundsGEP(addr.getElementType(), begin, length);
emitArrayDestroy(begin, end, type, elementAlign, destroyer,
checkZeroLength, useEHCleanupForArray);
}
@@ -2238,8 +2246,9 @@ void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
// Shift the address back by one element.
llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
- llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
- "arraydestroy.element");
+ llvm::Value *element = Builder.CreateInBoundsGEP(
+ elementPast->getType()->getPointerElementType(), elementPast, negativeOne,
+ "arraydestroy.element");
if (useEHCleanup)
pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
@@ -2279,8 +2288,11 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
- begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
- end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
+ llvm::Type *elemTy = begin->getType()->getPointerElementType();
+ begin = CGF.Builder.CreateInBoundsGEP(
+ elemTy, begin, gepIndices, "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(
+ elemTy, end, gepIndices, "pad.arrayend");
}
// Destroy the array. We don't ever need an EH cleanup because we
@@ -2468,7 +2480,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Push a destructor cleanup for this parameter if the ABI requires it.
// Don't push a cleanup in a thunk for a method that will also emit a
// cleanup.
- if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
+ if (Ty->isRecordType() && !CurFuncIsThunk &&
Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
if (QualType::DestructionKind DtorKind =
D.needsDestruction(getContext())) {
@@ -2566,7 +2578,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Emit debug info for param declarations in non-thunk functions.
if (CGDebugInfo *DI = getDebugInfo()) {
if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
- DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
+ llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
+ &D, DeclPtr.getPointer(), ArgNo, Builder);
+ if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
+ DI->getParamDbgMappings().insert({Var, DILocalVar});
}
}
@@ -2605,3 +2620,57 @@ void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
getOpenMPRuntime().processRequiresDirective(D);
}
+
+void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
+ for (const Expr *E : D->varlists()) {
+ const auto *DE = cast<DeclRefExpr>(E);
+ const auto *VD = cast<VarDecl>(DE->getDecl());
+
+ // Skip all but globals.
+ if (!VD->hasGlobalStorage())
+ continue;
+
+ // Check if the global has been materialized yet or not. If not, we are done
+ // as any later generation will utilize the OMPAllocateDeclAttr. However, if
+ // we already emitted the global we might have done so before the
+ // OMPAllocateDeclAttr was attached, leading to the wrong address space
+ // (potentially). While not pretty, common practise is to remove the old IR
+ // global and generate a new one, so we do that here too. Uses are replaced
+ // properly.
+ StringRef MangledName = getMangledName(VD);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (!Entry)
+ continue;
+
+ // We can also keep the existing global if the address space is what we
+ // expect it to be, if not, it is replaced.
+ QualType ASTTy = VD->getType();
+ clang::LangAS GVAS = GetGlobalVarAddressSpace(VD);
+ auto TargetAS = getContext().getTargetAddressSpace(GVAS);
+ if (Entry->getType()->getAddressSpace() == TargetAS)
+ continue;
+
+ // Make a new global with the correct type / address space.
+ llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy);
+ llvm::PointerType *PTy = llvm::PointerType::get(Ty, TargetAS);
+
+ // Replace all uses of the old global with a cast. Since we mutate the type
+ // in place we neeed an intermediate that takes the spot of the old entry
+ // until we can create the cast.
+ llvm::GlobalVariable *DummyGV = new llvm::GlobalVariable(
+ getModule(), Entry->getValueType(), false,
+ llvm::GlobalValue::CommonLinkage, nullptr, "dummy", nullptr,
+ llvm::GlobalVariable::NotThreadLocal, Entry->getAddressSpace());
+ Entry->replaceAllUsesWith(DummyGV);
+
+ Entry->mutateType(PTy);
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ Entry, DummyGV->getType());
+
+ // Now we have a casted version of the changed global, the dummy can be
+ // replaced and deleted.
+ DummyGV->replaceAllUsesWith(NewPtrForOldDecl);
+ DummyGV->eraseFromParent();
+ }
+}
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index 3dbf4cc7cb97..d43fb99550a8 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -262,6 +262,58 @@ llvm::Function *CodeGenFunction::createAtExitStub(const VarDecl &VD,
return fn;
}
+/// Create a stub function, suitable for being passed to __pt_atexit_np,
+/// which passes the given address to the given destructor function.
+llvm::Function *CodeGenFunction::createTLSAtExitStub(
+ const VarDecl &D, llvm::FunctionCallee Dtor, llvm::Constant *Addr,
+ llvm::FunctionCallee &AtExit) {
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&D, Out);
+ }
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeLLVMFunctionInfo(
+ getContext().IntTy, /*instanceMethod=*/false, /*chainCall=*/false,
+ {getContext().IntTy}, FunctionType::ExtInfo(), {}, RequiredArgs::All);
+
+ // Get the stub function type, int(*)(int,...).
+ llvm::FunctionType *StubTy =
+ llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy}, true);
+
+ llvm::Function *DtorStub = CGM.CreateGlobalInitOrCleanUpFunction(
+ StubTy, FnName.str(), FI, D.getLocation());
+
+ CodeGenFunction CGF(CGM);
+
+ FunctionArgList Args;
+ ImplicitParamDecl IPD(CGM.getContext(), CGM.getContext().IntTy,
+ ImplicitParamDecl::Other);
+ Args.push_back(&IPD);
+ QualType ResTy = CGM.getContext().IntTy;
+
+ CGF.StartFunction(GlobalDecl(&D, DynamicInitKind::AtExit), ResTy, DtorStub,
+ FI, Args, D.getLocation(), D.getInit()->getExprLoc());
+
+ // Emit an artificial location for this function.
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(Dtor, Addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (auto *DtorFn = dyn_cast<llvm::Function>(
+ Dtor.getCallee()->stripPointerCastsAndAliases()))
+ call->setCallingConv(DtorFn->getCallingConv());
+
+ // Return 0 from function
+ CGF.Builder.CreateStore(llvm::Constant::getNullValue(CGM.IntTy),
+ CGF.ReturnValue);
+
+ CGF.FinishFunction();
+
+ return DtorStub;
+}
+
/// Register a global destructor using the C atexit runtime function.
void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::FunctionCallee dtor,
@@ -388,43 +440,43 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
Fn->setDoesNotThrow();
if (getLangOpts().Sanitize.has(SanitizerKind::Address) &&
- !isInSanitizerBlacklist(SanitizerKind::Address, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::Address, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
if (getLangOpts().Sanitize.has(SanitizerKind::KernelAddress) &&
- !isInSanitizerBlacklist(SanitizerKind::KernelAddress, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::KernelAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
if (getLangOpts().Sanitize.has(SanitizerKind::HWAddress) &&
- !isInSanitizerBlacklist(SanitizerKind::HWAddress, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::HWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
if (getLangOpts().Sanitize.has(SanitizerKind::KernelHWAddress) &&
- !isInSanitizerBlacklist(SanitizerKind::KernelHWAddress, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
- !isInSanitizerBlacklist(SanitizerKind::MemTag, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::MemTag, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
- !isInSanitizerBlacklist(SanitizerKind::Thread, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::Thread, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
if (getLangOpts().Sanitize.has(SanitizerKind::Memory) &&
- !isInSanitizerBlacklist(SanitizerKind::Memory, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::Memory, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (getLangOpts().Sanitize.has(SanitizerKind::KernelMemory) &&
- !isInSanitizerBlacklist(SanitizerKind::KernelMemory, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::KernelMemory, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
if (getLangOpts().Sanitize.has(SanitizerKind::SafeStack) &&
- !isInSanitizerBlacklist(SanitizerKind::SafeStack, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::SafeStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SafeStack);
if (getLangOpts().Sanitize.has(SanitizerKind::ShadowCallStack) &&
- !isInSanitizerBlacklist(SanitizerKind::ShadowCallStack, Fn, Loc))
+ !isInNoSanitizeList(SanitizerKind::ShadowCallStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
return Fn;
@@ -499,7 +551,8 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
} else if (PerformInit && ISA) {
EmitPointerToInitFunc(D, Addr, Fn, ISA);
} else if (auto *IPA = D->getAttr<InitPriorityAttr>()) {
- OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
+ OrderGlobalInitsOrStermFinalizers Key(IPA->getPriority(),
+ PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
} else if (isTemplateInstantiation(D->getTemplateSpecializationKind()) ||
getContext().GetGVALinkageForVariable(D) == GVA_DiscardableODR) {
@@ -566,6 +619,17 @@ static SmallString<128> getTransformedFileName(llvm::Module &M) {
return FileName;
}
+static std::string getPrioritySuffix(unsigned int Priority) {
+ assert(Priority <= 65535 && "Priority should always be <= 65535.");
+
+ // Compute the function suffix from priority. Prepend with zeroes to make
+ // sure the function names are also ordered as priorities.
+ std::string PrioritySuffix = llvm::utostr(Priority);
+ PrioritySuffix = std::string(6 - PrioritySuffix.size(), '0') + PrioritySuffix;
+
+ return PrioritySuffix;
+}
+
void
CodeGenModule::EmitCXXGlobalInitFunc() {
while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
@@ -577,12 +641,8 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
- const bool UseSinitAndSterm = getCXXABI().useSinitAndSterm();
// Create our global prioritized initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
- assert(!UseSinitAndSterm && "Prioritized sinit and sterm functions are not"
- " supported yet.");
-
SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
@@ -596,14 +656,10 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
PrioE = std::upper_bound(I + 1, E, *I, GlobalInitPriorityCmp());
LocalCXXGlobalInits.clear();
- unsigned Priority = I->first.priority;
- // Compute the function suffix from priority. Prepend with zeroes to make
- // sure the function names are also ordered as priorities.
- std::string PrioritySuffix = llvm::utostr(Priority);
- // Priority is always <= 65535 (enforced by sema).
- PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
+
+ unsigned int Priority = I->first.priority;
llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
- FTy, "_GLOBAL__I_" + PrioritySuffix, FI);
+ FTy, "_GLOBAL__I_" + getPrioritySuffix(Priority), FI);
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
@@ -614,7 +670,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
PrioritizedCXXGlobalInits.clear();
}
- if (UseSinitAndSterm && CXXGlobalInits.empty())
+ if (getCXXABI().useSinitAndSterm() && CXXGlobalInits.empty())
return;
// Include the filename in the symbol name. Including "sub_" matches gcc
@@ -640,7 +696,9 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
}
- if (getLangOpts().HIP) {
+ assert(!getLangOpts().CUDA || !getLangOpts().CUDAIsDevice ||
+ getLangOpts().GPUAllowDeviceInit);
+ if (getLangOpts().HIP && getLangOpts().CUDAIsDevice) {
Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
Fn->addFnAttr("device-init");
}
@@ -649,12 +707,50 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
}
void CodeGenModule::EmitCXXGlobalCleanUpFunc() {
- if (CXXGlobalDtorsOrStermFinalizers.empty())
+ if (CXXGlobalDtorsOrStermFinalizers.empty() &&
+ PrioritizedCXXStermFinalizers.empty())
return;
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
const CGFunctionInfo &FI = getTypes().arrangeNullaryFunction();
+ // Create our global prioritized cleanup function.
+ if (!PrioritizedCXXStermFinalizers.empty()) {
+ SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8> LocalCXXStermFinalizers;
+ llvm::array_pod_sort(PrioritizedCXXStermFinalizers.begin(),
+ PrioritizedCXXStermFinalizers.end());
+ // Iterate over "chunks" of dtors with same priority and emit each chunk
+ // into separate function. Note - everything is sorted first by priority,
+ // second - by lex order, so we emit dtor functions in proper order.
+ for (SmallVectorImpl<StermFinalizerData>::iterator
+ I = PrioritizedCXXStermFinalizers.begin(),
+ E = PrioritizedCXXStermFinalizers.end();
+ I != E;) {
+ SmallVectorImpl<StermFinalizerData>::iterator PrioE =
+ std::upper_bound(I + 1, E, *I, StermFinalizerPriorityCmp());
+
+ LocalCXXStermFinalizers.clear();
+
+ unsigned int Priority = I->first.priority;
+ llvm::Function *Fn = CreateGlobalInitOrCleanUpFunction(
+ FTy, "_GLOBAL__a_" + getPrioritySuffix(Priority), FI);
+
+ for (; I < PrioE; ++I) {
+ llvm::FunctionCallee DtorFn = I->second;
+ LocalCXXStermFinalizers.emplace_back(DtorFn.getFunctionType(),
+ DtorFn.getCallee(), nullptr);
+ }
+
+ CodeGenFunction(*this).GenerateCXXGlobalCleanUpFunc(
+ Fn, LocalCXXStermFinalizers);
+ AddGlobalDtor(Fn, Priority);
+ }
+ PrioritizedCXXStermFinalizers.clear();
+ }
+
+ if (CXXGlobalDtorsOrStermFinalizers.empty())
+ return;
+
// Create our global cleanup function.
llvm::Function *Fn =
CreateGlobalInitOrCleanUpFunction(FTy, "_GLOBAL__D_a", FI);
@@ -761,8 +857,9 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
- const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsOrStermFinalizers) {
+ ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>>
+ DtorsOrStermFinalizers) {
{
auto NL = ApplyDebugLocation::CreateEmpty(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 7a64963183bc..9f65e9eb120c 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -39,6 +39,18 @@ static llvm::FunctionCallee getFreeExceptionFn(CodeGenModule &CGM) {
return CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
}
+static llvm::FunctionCallee getSehTryBeginFn(CodeGenModule &CGM) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.begin");
+}
+
+static llvm::FunctionCallee getSehTryEndFn(CodeGenModule &CGM) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ return CGM.CreateRuntimeFunction(FTy, "llvm.seh.try.end");
+}
+
static llvm::FunctionCallee getUnexpectedFn(CodeGenModule &CGM) {
// void __cxa_call_unexpected(void *thrown_exception);
@@ -467,15 +479,16 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot) {
// noexcept functions are simple terminate scopes.
- EHStack.pushTerminate();
+ if (!getLangOpts().EHAsynch) // -EHa: HW exception still can occur
+ EHStack.pushTerminate();
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
// encode these in an object file but MSVC doesn't do anything with it.
if (getTarget().getCXXABI().isMicrosoft())
return;
- // In wasm we currently treat 'throw()' in the same way as 'noexcept'. In
+ // In Wasm EH we currently treat 'throw()' in the same way as 'noexcept'. In
// case of throw with types, we ignore it and print a warning for now.
- // TODO Correctly handle exception specification in wasm
+ // TODO Correctly handle exception specification in Wasm EH
if (CGM.getLangOpts().hasWasmExceptions()) {
if (EST == EST_DynamicNone)
EHStack.pushTerminate();
@@ -485,6 +498,19 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
<< FD->getExceptionSpecSourceRange();
return;
}
+ // Currently Emscripten EH only handles 'throw()' but not 'throw' with
+ // types. 'throw()' handling will be done in JS glue code so we don't need
+ // to do anything in that case. Just print a warning message in case of
+ // throw with types.
+ // TODO Correctly handle exception specification in Emscripten EH
+ if (getTarget().getCXXABI() == TargetCXXABI::WebAssembly &&
+ CGM.getLangOpts().getExceptionHandling() ==
+ LangOptions::ExceptionHandlingKind::None &&
+ EST == EST_Dynamic)
+ CGM.getDiags().Report(D->getLocation(),
+ diag::warn_wasm_dynamic_exception_spec_ignored)
+ << FD->getExceptionSpecSourceRange();
+
unsigned NumExceptions = Proto->getNumExceptions();
EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
@@ -544,7 +570,7 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!FD) {
// Check if CapturedDecl is nothrow and pop terminate scope for it.
if (const CapturedDecl* CD = dyn_cast_or_null<CapturedDecl>(D)) {
- if (CD->isNothrow())
+ if (CD->isNothrow() && !EHStack.empty())
EHStack.popTerminate();
}
return;
@@ -554,7 +580,8 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
return;
ExceptionSpecificationType EST = Proto->getExceptionSpecType();
- if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot) {
+ if (isNoexceptExceptionSpec(EST) && Proto->canThrow() == CT_Cannot &&
+ !EHStack.empty() /* possible empty when under async exceptions */) {
EHStack.popTerminate();
} else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
// TODO: Revisit exception specifications for the MS ABI. There is a way to
@@ -610,6 +637,10 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
} else {
// No exception decl indicates '...', a catch-all.
CatchScope->setHandler(I, CGM.getCXXABI().getCatchAllTypeInfo(), Handler);
+ // Under async exceptions, catch(...) need to catch HW exception too
+ // Mark scope with SehTryBegin as a SEH __try scope
+ if (getLangOpts().EHAsynch)
+ EmitRuntimeCallOrInvoke(getSehTryBeginFn(CGM));
}
}
}
@@ -724,7 +755,7 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
// If exceptions are disabled/ignored and SEH is not in use, then there is no
// invoke destination. SEH "works" even if exceptions are off. In practice,
// this means that C++ destructors and other EH cleanups don't run, which is
- // consistent with MSVC's behavior.
+ // consistent with MSVC's behavior, except in the presence of -EHa
const LangOptions &LO = CGM.getLangOpts();
if (!LO.Exceptions || LO.IgnoreExceptions) {
if (!LO.Borland && !LO.MicrosoftExt)
@@ -1343,7 +1374,8 @@ namespace {
CGF.EmitBlock(RethrowBB);
if (SavedExnVar) {
CGF.EmitRuntimeCallOrInvoke(RethrowFn,
- CGF.Builder.CreateAlignedLoad(SavedExnVar, CGF.getPointerAlign()));
+ CGF.Builder.CreateAlignedLoad(CGF.Int8PtrTy, SavedExnVar,
+ CGF.getPointerAlign()));
} else {
CGF.EmitRuntimeCallOrInvoke(RethrowFn);
}
@@ -1552,17 +1584,8 @@ llvm::BasicBlock *CodeGenFunction::getTerminateFunclet() {
CurrentFuncletPad = Builder.CreateCleanupPad(ParentPad);
// Emit the __std_terminate call.
- llvm::Value *Exn = nullptr;
- // In case of wasm personality, we need to pass the exception value to
- // __clang_call_terminate function.
- if (getLangOpts().CPlusPlus &&
- EHPersonality::get(*this).isWasmPersonality()) {
- llvm::Function *GetExnFn =
- CGM.getIntrinsic(llvm::Intrinsic::wasm_get_exception);
- Exn = Builder.CreateCall(GetExnFn, CurrentFuncletPad);
- }
llvm::CallInst *terminateCall =
- CGM.getCXXABI().emitTerminateForUnexpectedException(*this, Exn);
+ CGM.getCXXABI().emitTerminateForUnexpectedException(*this, nullptr);
terminateCall->setDoesNotReturn();
Builder.CreateUnreachable();
@@ -1614,7 +1637,23 @@ void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
JumpDest TryExit = getJumpDestInCurrentScope("__try.__leave");
SEHTryEpilogueStack.push_back(&TryExit);
+
+ llvm::BasicBlock *TryBB = nullptr;
+ // IsEHa: emit an invoke to _seh_try_begin() runtime for -EHa
+ if (getLangOpts().EHAsynch) {
+ EmitRuntimeCallOrInvoke(getSehTryBeginFn(CGM));
+ if (SEHTryEpilogueStack.size() == 1) // outermost only
+ TryBB = Builder.GetInsertBlock();
+ }
+
EmitStmt(S.getTryBlock());
+
+ // Volatilize all blocks in Try, till current insert point
+ if (TryBB) {
+ llvm::SmallPtrSet<llvm::BasicBlock *, 10> Visited;
+ VolatilizeTryBlocks(TryBB, Visited);
+ }
+
SEHTryEpilogueStack.pop_back();
if (!TryExit.getBlock()->use_empty())
@@ -1625,6 +1664,35 @@ void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
ExitSEHTryStmt(S);
}
+// Recursively walk through blocks in a _try
+// and make all memory instructions volatile
+void CodeGenFunction::VolatilizeTryBlocks(
+ llvm::BasicBlock *BB, llvm::SmallPtrSet<llvm::BasicBlock *, 10> &V) {
+ if (BB == SEHTryEpilogueStack.back()->getBlock() /* end of Try */ ||
+ !V.insert(BB).second /* already visited */ ||
+ !BB->getParent() /* not emitted */ || BB->empty())
+ return;
+
+ if (!BB->isEHPad()) {
+ for (llvm::BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE;
+ ++J) {
+ if (auto LI = dyn_cast<llvm::LoadInst>(J)) {
+ LI->setVolatile(true);
+ } else if (auto SI = dyn_cast<llvm::StoreInst>(J)) {
+ SI->setVolatile(true);
+ } else if (auto* MCI = dyn_cast<llvm::MemIntrinsic>(J)) {
+ MCI->setVolatile(llvm::ConstantInt::get(Builder.getInt1Ty(), 1));
+ }
+ }
+ }
+ const llvm::Instruction *TI = BB->getTerminator();
+ if (TI) {
+ unsigned N = TI->getNumSuccessors();
+ for (unsigned I = 0; I < N; I++)
+ VolatilizeTryBlocks(TI->getSuccessor(I), V);
+ }
+}
+
namespace {
struct PerformSEHFinally final : EHScopeStack::Cleanup {
llvm::Function *OutlinedFinally;
@@ -1702,10 +1770,8 @@ struct CaptureFinder : ConstStmtVisitor<CaptureFinder> {
void VisitDeclRefExpr(const DeclRefExpr *E) {
// If this is already a capture, just make sure we capture 'this'.
- if (E->refersToEnclosingVariableOrCapture()) {
+ if (E->refersToEnclosingVariableOrCapture())
Captures.insert(ParentThis);
- return;
- }
const auto *D = dyn_cast<VarDecl>(E->getDecl());
if (D && D->isLocalVarDeclOrParm() && D->hasLocalStorage())
@@ -1865,11 +1931,6 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
// Create llvm.localrecover calls for all captures.
for (const VarDecl *VD : Finder.Captures) {
- if (isa<ImplicitParamDecl>(VD)) {
- CGM.ErrorUnsupported(VD, "'this' captured by SEH");
- CXXThisValue = llvm::UndefValue::get(ConvertTypeForMem(VD->getType()));
- continue;
- }
if (VD->getType()->isVariablyModifiedType()) {
CGM.ErrorUnsupported(VD, "VLA captured by SEH");
continue;
@@ -1877,6 +1938,12 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
assert((isa<ImplicitParamDecl>(VD) || VD->isLocalVarDeclOrParm()) &&
"captured non-local variable");
+ auto L = ParentCGF.LambdaCaptureFields.find(VD);
+ if (L != ParentCGF.LambdaCaptureFields.end()) {
+ LambdaCaptureFields[VD] = L->second;
+ continue;
+ }
+
// If this decl hasn't been declared yet, it will be declared in the
// OutlinedStmt.
auto I = ParentCGF.LocalDeclMap.find(VD);
@@ -1884,8 +1951,30 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
continue;
Address ParentVar = I->second;
- setAddrOfLocalVar(
- VD, recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP));
+ Address Recovered =
+ recoverAddrOfEscapedLocal(ParentCGF, ParentVar, ParentFP);
+ setAddrOfLocalVar(VD, Recovered);
+
+ if (isa<ImplicitParamDecl>(VD)) {
+ CXXABIThisAlignment = ParentCGF.CXXABIThisAlignment;
+ CXXThisAlignment = ParentCGF.CXXThisAlignment;
+ CXXABIThisValue = Builder.CreateLoad(Recovered, "this");
+ if (ParentCGF.LambdaThisCaptureField) {
+ LambdaThisCaptureField = ParentCGF.LambdaThisCaptureField;
+ // We are in a lambda function where "this" is captured so the
+ // CXXThisValue need to be loaded from the lambda capture
+ LValue ThisFieldLValue =
+ EmitLValueForLambdaField(LambdaThisCaptureField);
+ if (!LambdaThisCaptureField->getType()->isPointerType()) {
+ CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
+ } else {
+ CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation())
+ .getScalarVal();
+ }
+ } else {
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
}
if (Finder.SEHCodeSlot.isValid()) {
@@ -2025,8 +2114,8 @@ void CodeGenFunction::EmitSEHExceptionCodeSave(CodeGenFunction &ParentCGF,
llvm::Type *PtrsTy = llvm::StructType::get(RecordTy, CGM.VoidPtrTy);
llvm::Value *Ptrs = Builder.CreateBitCast(SEHInfo, PtrsTy->getPointerTo());
llvm::Value *Rec = Builder.CreateStructGEP(PtrsTy, Ptrs, 0);
- Rec = Builder.CreateAlignedLoad(Rec, getPointerAlign());
- llvm::Value *Code = Builder.CreateAlignedLoad(Rec, getIntAlign());
+ Rec = Builder.CreateAlignedLoad(RecordTy, Rec, getPointerAlign());
+ llvm::Value *Code = Builder.CreateAlignedLoad(Int32Ty, Rec, getIntAlign());
assert(!SEHCodeSlotStack.empty() && "emitting EH code outside of __except");
Builder.CreateStore(Code, SEHCodeSlotStack.back());
}
@@ -2105,6 +2194,12 @@ void CodeGenFunction::ExitSEHTryStmt(const SEHTryStmt &S) {
return;
}
+ // IsEHa: emit an invoke _seh_try_end() to mark end of FT flow
+ if (getLangOpts().EHAsynch && Builder.GetInsertBlock()) {
+ llvm::FunctionCallee SehTryEnd = getSehTryEndFn(CGM);
+ EmitRuntimeCallOrInvoke(SehTryEnd);
+ }
+
// Otherwise, we must have an __except block.
const SEHExceptStmt *Except = S.getExceptHandler();
assert(Except && "__try must have __finally xor __except");
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index a3f90449bb4c..bf514aab8851 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "CGCUDARuntime.h"
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGCleanup.h"
@@ -37,6 +38,7 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
#include <string>
@@ -198,7 +200,7 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
/// EmitIgnoredExpr - Emit code to compute the specified expression,
/// ignoring the result.
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
- if (E->isRValue())
+ if (E->isPRValue())
return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
// Just emit it as an l-value and drop the result.
@@ -402,24 +404,22 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
(Ty->isArrayType() || Ty->isRecordType()) &&
CGF.CGM.isTypeConstant(Ty, true))
if (auto Init = ConstantEmitter(CGF).tryEmitAbstract(Inner, Ty)) {
- if (auto AddrSpace = CGF.getTarget().getConstantAddressSpace()) {
- auto AS = AddrSpace.getValue();
- auto *GV = new llvm::GlobalVariable(
- CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
- llvm::GlobalValue::NotThreadLocal,
- CGF.getContext().getTargetAddressSpace(AS));
- CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
- GV->setAlignment(alignment.getAsAlign());
- llvm::Constant *C = GV;
- if (AS != LangAS::Default)
- C = TCG.performAddrSpaceCast(
- CGF.CGM, GV, AS, LangAS::Default,
- GV->getValueType()->getPointerTo(
- CGF.getContext().getTargetAddressSpace(LangAS::Default)));
- // FIXME: Should we put the new global into a COMDAT?
- return Address(C, alignment);
- }
+ auto AS = CGF.CGM.GetGlobalConstantAddressSpace();
+ auto *GV = new llvm::GlobalVariable(
+ CGF.CGM.getModule(), Init->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Init, ".ref.tmp", nullptr,
+ llvm::GlobalValue::NotThreadLocal,
+ CGF.getContext().getTargetAddressSpace(AS));
+ CharUnits alignment = CGF.getContext().getTypeAlignInChars(Ty);
+ GV->setAlignment(alignment.getAsAlign());
+ llvm::Constant *C = GV;
+ if (AS != LangAS::Default)
+ C = TCG.performAddrSpaceCast(
+ CGF.CGM, GV, AS, LangAS::Default,
+ GV->getValueType()->getPointerTo(
+ CGF.getContext().getTargetAddressSpace(LangAS::Default)));
+ // FIXME: Should we put the new global into a COMDAT?
+ return Address(C, alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -826,9 +826,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty.getUnqualifiedType(),
Out);
- // Blacklist based on the mangled type.
- if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
- SanitizerKind::Vptr, Out.str())) {
+ // Contained in NoSanitizeList based on the mangled type.
+ if (!CGM.getContext().getNoSanitizeList().containsType(SanitizerKind::Vptr,
+ Out.str())) {
llvm::hash_code TypeHash = hash_value(Out.str());
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
@@ -850,9 +850,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(IntPtrTy,
CacheSize-1));
llvm::Value *Indices[] = { Builder.getInt32(0), Slot };
- llvm::Value *CacheVal =
- Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(Cache, Indices),
- getPointerAlign());
+ llvm::Value *CacheVal = Builder.CreateAlignedLoad(
+ IntPtrTy, Builder.CreateInBoundsGEP(HashTable, Cache, Indices),
+ getPointerAlign());
// If the hash isn't in the cache, call a runtime handler to perform the
// hard work of checking whether the vptr is for an object of the right
@@ -2832,8 +2832,21 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return LV;
}
- if (const auto *FD = dyn_cast<FunctionDecl>(ND))
- return EmitFunctionDeclLValue(*this, E, FD);
+ if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
+ LValue LV = EmitFunctionDeclLValue(*this, E, FD);
+
+ // Emit debuginfo for the function declaration if the target wants to.
+ if (getContext().getTargetInfo().allowDebugInfoForExternalRef()) {
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo()) {
+ auto *Fn =
+ cast<llvm::Function>(LV.getPointer(*this)->stripPointerCasts());
+ if (!Fn->getSubprogram())
+ DI->EmitFunctionDecl(FD, FD->getLocation(), T, Fn);
+ }
+ }
+
+ return LV;
+ }
// FIXME: While we're emitting a binding from an enclosing scope, all other
// DeclRefExprs we see should be implicitly treated as if they also refer to
@@ -3382,14 +3395,14 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::FunctionType::get(VoidTy, {VoidPtrTy, VoidPtrTy}, false),
llvm::GlobalValue::WeakODRLinkage, "__cfi_check_fail", &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
F->setVisibility(llvm::GlobalValue::HiddenVisibility);
StartFunction(GlobalDecl(), CGM.getContext().VoidTy, F, FI, Args,
SourceLocation());
- // This function should not be affected by blacklist. This function does
+ // This function is not affected by NoSanitizeList. This function does
// not have a source location, but "src:*" would still apply. Revert any
// changes to SanOpts made in StartFunction.
SanOpts = CGM.getLangOpts().Sanitize;
@@ -3567,6 +3580,7 @@ static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
}
static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
+ llvm::Type *elemType,
llvm::Value *ptr,
ArrayRef<llvm::Value*> indices,
bool inbounds,
@@ -3578,7 +3592,7 @@ static llvm::Value *emitArraySubscriptGEP(CodeGenFunction &CGF,
CodeGenFunction::NotSubtraction, loc,
name);
} else {
- return CGF.Builder.CreateGEP(ptr, indices, name);
+ return CGF.Builder.CreateGEP(elemType, ptr, indices, name);
}
}
@@ -3672,8 +3686,8 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
if (!LastIndex ||
(!CGF.IsInPreservedAIRegion && !IsPreserveAIArrayBase(CGF, Base))) {
eltPtr = emitArraySubscriptGEP(
- CGF, addr.getPointer(), indices, inbounds, signedIndices,
- loc, name);
+ CGF, addr.getElementType(), addr.getPointer(), indices, inbounds,
+ signedIndices, loc, name);
} else {
// Remember the original array subscript for bpf target
unsigned idx = LastIndex->getZExtValue();
@@ -3798,8 +3812,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
CharUnits EltAlign =
getArrayElementAlign(Addr.getAlignment(), Idx, InterfaceSize);
llvm::Value *EltPtr =
- emitArraySubscriptGEP(*this, Addr.getPointer(), ScaledIdx, false,
- SignedIndices, E->getExprLoc());
+ emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
+ ScaledIdx, false, SignedIndices, E->getExprLoc());
Addr = Address(EltPtr, EltAlign);
// Cast back.
@@ -4171,8 +4185,10 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
/// Given that we are currently emitting a lambda, emit an l-value for
/// one of its members.
LValue CodeGenFunction::EmitLValueForLambdaField(const FieldDecl *Field) {
- assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
- assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
+ if (CurCodeDecl) {
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent()->isLambda());
+ assert(cast<CXXMethodDecl>(CurCodeDecl)->getParent() == Field->getParent());
+ }
QualType LambdaTagType =
getContext().getTagDeclType(Field->getParent());
LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue, LambdaTagType);
@@ -4643,6 +4659,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -4871,8 +4888,12 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
return CGCallee::forBuiltin(builtinID, FD);
}
- llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
- return CGCallee::forDirect(calleePtr, GD);
+ llvm::Constant *CalleePtr = EmitFunctionDeclPointer(CGF.CGM, GD);
+ if (CGF.CGM.getLangOpts().CUDA && !CGF.CGM.getLangOpts().CUDAIsDevice &&
+ FD->hasAttr<CUDAGlobalAttr>())
+ CalleePtr = CGF.CGM.getCUDARuntime().getKernelStub(
+ cast<llvm::GlobalValue>(CalleePtr->stripPointerCasts()));
+ return CGCallee::forDirect(CalleePtr, GD);
}
CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
@@ -5123,9 +5144,9 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
getContext().getFunctionTypeWithExceptionSpec(PointeeType, EST_None);
llvm::Constant *FTRTTIConst =
CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
- llvm::Type *PrefixStructTyElems[] = {PrefixSig->getType(), Int32Ty};
+ llvm::Type *PrefixSigType = PrefixSig->getType();
llvm::StructType *PrefixStructTy = llvm::StructType::get(
- CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
+ CGM.getLLVMContext(), {PrefixSigType, Int32Ty}, /*isPacked=*/true);
llvm::Value *CalleePtr = Callee.getFunctionPointer();
@@ -5134,7 +5155,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
llvm::Value *CalleeSig =
- Builder.CreateAlignedLoad(CalleeSigPtr, getIntAlign());
+ Builder.CreateAlignedLoad(PrefixSigType, CalleeSigPtr, getIntAlign());
llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
llvm::BasicBlock *Cont = createBasicBlock("cont");
@@ -5145,7 +5166,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Value *CalleeRTTIPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 1);
llvm::Value *CalleeRTTIEncoded =
- Builder.CreateAlignedLoad(CalleeRTTIPtr, getPointerAlign());
+ Builder.CreateAlignedLoad(Int32Ty, CalleeRTTIPtr, getPointerAlign());
llvm::Value *CalleeRTTI =
DecodeAddrUsedInPrologue(CalleePtr, CalleeRTTIEncoded);
llvm::Value *CalleeRTTIMatch =
@@ -5266,9 +5287,21 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
Callee.setFunctionPointer(CalleePtr);
}
+ // HIP function pointer contains kernel handle when it is used in triple
+ // chevron. The kernel stub needs to be loaded from kernel handle and used
+ // as callee.
+ if (CGM.getLangOpts().HIP && !CGM.getLangOpts().CUDAIsDevice &&
+ isa<CUDAKernelCallExpr>(E) &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ llvm::Value *Handle = Callee.getFunctionPointer();
+ auto *Cast =
+ Builder.CreateBitCast(Handle, Handle->getType()->getPointerTo());
+ auto *Stub = Builder.CreateLoad(Address(Cast, CGM.getPointerAlign()));
+ Callee.setFunctionPointer(Stub);
+ }
llvm::CallBase *CallOrInvoke = nullptr;
RValue Call = EmitCall(FnInfo, Callee, ReturnValue, Args, &CallOrInvoke,
- E->getExprLoc());
+ E == MustTailCall, E->getExprLoc());
// Generate function declaration DISuprogram in order to be used
// in debug info about call sites.
@@ -5365,7 +5398,7 @@ static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
// directly into the slot.
typedef CodeGenFunction::OpaqueValueMappingData OVMA;
OVMA opaqueData;
- if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ if (ov == resultExpr && ov->isPRValue() && !forLValue &&
CodeGenFunction::hasAggregateEvaluationKind(ov->getType())) {
CGF.EmitAggExpr(ov->getSourceExpr(), slot);
LValue LV = CGF.MakeAddrLValue(slot.getAddress(), ov->getType(),
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 60ea1b2af037..1e81ad9f2dc7 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -276,7 +276,7 @@ void AggExprEmitter::withReturnValueSlot(
RetAddr = Dest.getAddress();
} else {
RetAddr = CGF.CreateMemTemp(RetTy, "tmp", &RetAllocaAddr);
- uint64_t Size =
+ llvm::TypeSize Size =
CGF.CGM.getDataLayout().getTypeAllocSize(CGF.ConvertTypeForMem(RetTy));
LifetimeSizePtr = CGF.EmitLifetimeStart(Size, RetAllocaAddr.getPointer());
if (LifetimeSizePtr) {
@@ -417,8 +417,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
llvm::Value *IdxStart[] = { Zero, Zero };
- llvm::Value *ArrayStart =
- Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxStart, "arraystart");
+ llvm::Value *ArrayStart = Builder.CreateInBoundsGEP(
+ ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxStart, "arraystart");
CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
++Field;
@@ -434,8 +434,8 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ArrayType->getElementType())) {
// End pointer.
llvm::Value *IdxEnd[] = { Zero, Size };
- llvm::Value *ArrayEnd =
- Builder.CreateInBoundsGEP(ArrayPtr.getPointer(), IdxEnd, "arrayend");
+ llvm::Value *ArrayEnd = Builder.CreateInBoundsGEP(
+ ArrayPtr.getElementType(), ArrayPtr.getPointer(), IdxEnd, "arrayend");
CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
} else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
@@ -484,12 +484,14 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = { zero, zero };
- llvm::Value *begin =
- Builder.CreateInBoundsGEP(DestPtr.getPointer(), indices, "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(
+ DestPtr.getElementType(), DestPtr.getPointer(), indices,
+ "arrayinit.begin");
CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
CharUnits elementAlign =
DestPtr.getAlignment().alignmentOfArrayElement(elementSize);
+ llvm::Type *llvmElementType = begin->getType()->getPointerElementType();
// Consider initializing the array by copying from a global. For this to be
// more efficient than per-element initialization, the size of the elements
@@ -552,7 +554,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
for (uint64_t i = 0; i != NumInitElements; ++i) {
// Advance to the next element.
if (i > 0) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+ element = Builder.CreateInBoundsGEP(
+ llvmElementType, element, one, "arrayinit.element");
// Tell the cleanup that it needs to destroy up to this
// element. TODO: some of these stores can be trivially
@@ -581,14 +584,15 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
// Advance to the start of the rest of the array.
if (NumInitElements) {
- element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ element = Builder.CreateInBoundsGEP(
+ llvmElementType, element, one, "arrayinit.start");
if (endOfInit.isValid()) Builder.CreateStore(element, endOfInit);
}
// Compute the end of the array.
- llvm::Value *end = Builder.CreateInBoundsGEP(begin,
- llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
- "arrayinit.end");
+ llvm::Value *end = Builder.CreateInBoundsGEP(
+ llvmElementType, begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements), "arrayinit.end");
llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
@@ -616,8 +620,8 @@ void AggExprEmitter::EmitArrayInit(Address DestPtr, llvm::ArrayType *AType,
}
// Move on to the next element.
- llvm::Value *nextElement =
- Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+ llvm::Value *nextElement = Builder.CreateInBoundsGEP(
+ llvmElementType, currentElement, one, "arrayinit.next");
// Tell the EH cleanup that we finished with the last element.
if (endOfInit.isValid()) Builder.CreateStore(nextElement, endOfInit);
@@ -901,6 +905,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLOpaqueType:
+ case CK_MatrixCast:
case CK_IntToOCLSampler:
case CK_FloatingToFixedPoint:
@@ -1422,6 +1427,7 @@ static bool castPreservesZero(const CastExpr *CE) {
case CK_PointerToIntegral:
// Language extensions.
case CK_VectorSplat:
+ case CK_MatrixCast:
case CK_NonAtomicToAtomic:
case CK_AtomicToNonAtomic:
return true;
@@ -1777,8 +1783,9 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// destPtr is an array*. Construct an elementType* by drilling down a level.
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
llvm::Value *indices[] = {zero, zero};
- llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
- "arrayinit.begin");
+ llvm::Value *begin = Builder.CreateInBoundsGEP(
+ destPtr.getElementType(), destPtr.getPointer(), indices,
+ "arrayinit.begin");
// Prepare to special-case multidimensional array initialization: we avoid
// emitting multiple destructor loops in that case.
@@ -1800,7 +1807,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
llvm::PHINode *index =
Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
index->addIncoming(zero, entryBB);
- llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
+ llvm::Value *element = Builder.CreateInBoundsGEP(
+ begin->getType()->getPointerElementType(), begin, index);
// Prepare for a cleanup.
QualType::DestructionKind dtorKind = elementType.isDestructedType();
@@ -2056,7 +2064,7 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
Record->hasTrivialCopyAssignment() ||
Record->hasTrivialMoveConstructor() ||
Record->hasTrivialMoveAssignment() ||
- Record->isUnion()) &&
+ Record->hasAttr<TrivialABIAttr>() || Record->isUnion()) &&
"Trying to aggregate-copy a type without a trivial copy/move "
"constructor or assignment operator");
// Ignore empty classes in C++.
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index e1907a6f0680..96cf977ca290 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -87,6 +87,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
return EmitCall(FnInfo, Callee, ReturnValue, Args, nullptr,
+ CE && CE == MustTailCall,
CE ? CE->getExprLoc() : SourceLocation());
}
@@ -112,7 +113,7 @@ RValue CodeGenFunction::EmitCXXDestructorCall(
commonEmitCXXMemberOrOperatorCall(*this, DtorDecl, This, ImplicitParam,
ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(Dtor), Callee,
- ReturnValueSlot(), Args, nullptr,
+ ReturnValueSlot(), Args, nullptr, CE && CE == MustTailCall,
CE ? CE->getExprLoc() : SourceLocation{});
}
@@ -472,7 +473,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
EmitCallArgs(Args, FPT, E->arguments());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
/*PrefixSize=*/0),
- Callee, ReturnValue, Args, nullptr, E->getExprLoc());
+ Callee, ReturnValue, Args, nullptr, E == MustTailCall,
+ E->getExprLoc());
}
RValue
@@ -1048,7 +1050,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
->getSize().getZExtValue();
CurPtr =
- Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
+ CurPtr.getPointer(),
Builder.getSize(InitListElements),
"string.init.end"),
CurPtr.getAlignment().alignmentAtOffset(InitListElements *
@@ -1107,7 +1110,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
ILE->getInit(i)->getType(), CurPtr,
AggValueSlot::DoesNotOverlap);
- CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
+ CurPtr.getPointer(),
Builder.getSize(1),
"array.exp.next"),
StartAlign.alignmentAtOffset((i + 1) * ElementSize));
@@ -1226,7 +1230,8 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Find the end of the array, hoisted out of the loop.
llvm::Value *EndPtr =
- Builder.CreateInBoundsGEP(BeginPtr.getPointer(), NumElements, "array.end");
+ Builder.CreateInBoundsGEP(BeginPtr.getElementType(), BeginPtr.getPointer(),
+ NumElements, "array.end");
// If the number of elements isn't constant, we have to now check if there is
// anything left to initialize.
@@ -2034,8 +2039,8 @@ static void EmitArrayDelete(CodeGenFunction &CGF,
deletedPtr.getAlignment().alignmentOfArrayElement(elementSize);
llvm::Value *arrayBegin = deletedPtr.getPointer();
- llvm::Value *arrayEnd =
- CGF.Builder.CreateInBoundsGEP(arrayBegin, numElements, "delete.end");
+ llvm::Value *arrayEnd = CGF.Builder.CreateInBoundsGEP(
+ deletedPtr.getElementType(), arrayBegin, numElements, "delete.end");
// Note that it is legal to allocate a zero-length array, and we
// can never fold the check away because the length should always
@@ -2098,7 +2103,8 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
GEP.push_back(Zero);
}
- Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getPointer(), GEP, "del.first"),
+ Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
+ Ptr.getPointer(), GEP, "del.first"),
Ptr.getAlignment());
}
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 6b6b901e0376..5409e82d437e 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -533,6 +533,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_FixedPointToBoolean:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
+ case CK_MatrixCast:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp
index 497f9c04c9f8..734024149bbb 100644
--- a/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/clang/lib/CodeGen/CGExprConstant.cpp
@@ -1170,6 +1170,7 @@ public:
case CK_FixedPointToIntegral:
case CK_IntegralToFixedPoint:
case CK_ZeroToOCLOpaqueType:
+ case CK_MatrixCast:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index 6f7e8263fa10..418f23bd1a97 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -348,6 +348,9 @@ public:
EmitImplicitIntegerSignChangeChecks(
SanOpts.has(SanitizerKind::ImplicitIntegerSignChange)) {}
};
+ Value *EmitScalarCast(Value *Src, QualType SrcType, QualType DstType,
+ llvm::Type *SrcTy, llvm::Type *DstTy,
+ ScalarConversionOpts Opts);
Value *
EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy,
SourceLocation Loc,
@@ -483,6 +486,8 @@ public:
return CGF.EmitPseudoObjectRValue(E).getScalarVal();
}
+ Value *VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E);
+
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
return EmitLoadOfLValue(CGF.getOrCreateOpaqueLValueMapping(E),
@@ -729,6 +734,7 @@ public:
BO->getLHS()->getType().getCanonicalType());
auto *RHSMatTy = dyn_cast<ConstantMatrixType>(
BO->getRHS()->getType().getCanonicalType());
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
if (LHSMatTy && RHSMatTy)
return MB.CreateMatrixMultiply(Ops.LHS, Ops.RHS, LHSMatTy->getNumRows(),
LHSMatTy->getNumColumns(),
@@ -1191,6 +1197,54 @@ void ScalarExprEmitter::EmitIntegerSignChangeCheck(Value *Src, QualType SrcType,
{Src, Dst});
}
+Value *ScalarExprEmitter::EmitScalarCast(Value *Src, QualType SrcType,
+ QualType DstType, llvm::Type *SrcTy,
+ llvm::Type *DstTy,
+ ScalarConversionOpts Opts) {
+ // The Element types determine the type of cast to perform.
+ llvm::Type *SrcElementTy;
+ llvm::Type *DstElementTy;
+ QualType SrcElementType;
+ QualType DstElementType;
+ if (SrcType->isMatrixType() && DstType->isMatrixType()) {
+ SrcElementTy = cast<llvm::VectorType>(SrcTy)->getElementType();
+ DstElementTy = cast<llvm::VectorType>(DstTy)->getElementType();
+ SrcElementType = SrcType->castAs<MatrixType>()->getElementType();
+ DstElementType = DstType->castAs<MatrixType>()->getElementType();
+ } else {
+ assert(!SrcType->isMatrixType() && !DstType->isMatrixType() &&
+ "cannot cast between matrix and non-matrix types");
+ SrcElementTy = SrcTy;
+ DstElementTy = DstTy;
+ SrcElementType = SrcType;
+ DstElementType = DstType;
+ }
+
+ if (isa<llvm::IntegerType>(SrcElementTy)) {
+ bool InputSigned = SrcElementType->isSignedIntegerOrEnumerationType();
+ if (SrcElementType->isBooleanType() && Opts.TreatBooleanAsSigned) {
+ InputSigned = true;
+ }
+
+ if (isa<llvm::IntegerType>(DstElementTy))
+ return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ if (InputSigned)
+ return Builder.CreateSIToFP(Src, DstTy, "conv");
+ return Builder.CreateUIToFP(Src, DstTy, "conv");
+ }
+
+ if (isa<llvm::IntegerType>(DstElementTy)) {
+ assert(SrcElementTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstElementType->isSignedIntegerOrEnumerationType())
+ return Builder.CreateFPToSI(Src, DstTy, "conv");
+ return Builder.CreateFPToUI(Src, DstTy, "conv");
+ }
+
+ if (DstElementTy->getTypeID() < SrcElementTy->getTypeID())
+ return Builder.CreateFPTrunc(Src, DstTy, "conv");
+ return Builder.CreateFPExt(Src, DstTy, "conv");
+}
+
/// Emit a conversion from the specified type to the specified destination type,
/// both of which are LLVM scalar types.
Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
@@ -1318,6 +1372,9 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
return Builder.CreateVectorSplat(NumElements, Src, "splat");
}
+ if (SrcType->isMatrixType() && DstType->isMatrixType())
+ return EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
+
if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
// Allow bitcast from vector to integer/fp of the same size.
unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
@@ -1384,31 +1441,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
DstTy = CGF.FloatTy;
}
- if (isa<llvm::IntegerType>(SrcTy)) {
- bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
- if (SrcType->isBooleanType() && Opts.TreatBooleanAsSigned) {
- InputSigned = true;
- }
- if (isa<llvm::IntegerType>(DstTy))
- Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
- else if (InputSigned)
- Res = Builder.CreateSIToFP(Src, DstTy, "conv");
- else
- Res = Builder.CreateUIToFP(Src, DstTy, "conv");
- } else if (isa<llvm::IntegerType>(DstTy)) {
- assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
- if (DstType->isSignedIntegerOrEnumerationType())
- Res = Builder.CreateFPToSI(Src, DstTy, "conv");
- else
- Res = Builder.CreateFPToUI(Src, DstTy, "conv");
- } else {
- assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
- "Unknown real conversion");
- if (DstTy->getTypeID() < SrcTy->getTypeID())
- Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
- else
- Res = Builder.CreateFPExt(Src, DstTy, "conv");
- }
+ Res = EmitScalarCast(Src, SrcType, DstType, SrcTy, DstTy, Opts);
if (DstTy != ResTy) {
if (CGF.getContext().getTargetInfo().useFP16ConversionIntrinsics()) {
@@ -1550,6 +1583,25 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
}
+Value *
+ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
+ ASTContext &Context = CGF.getContext();
+ llvm::Optional<LangAS> GlobalAS =
+ Context.getTargetInfo().getConstantAddressSpace();
+ llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
+ E->ComputeName(Context), "__usn_str",
+ static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
+
+ unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
+
+ if (GlobalConstStr->getType()->getPointerAddressSpace() == ExprAS)
+ return GlobalConstStr;
+
+ llvm::Type *EltTy = GlobalConstStr->getType()->getPointerElementType();
+ llvm::PointerType *NewPtrTy = llvm::PointerType::get(EltTy, ExprAS);
+ return Builder.CreateAddrSpaceCast(GlobalConstStr, NewPtrTy, "usn_addr_cast");
+}
+
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// Vector Mask Case
if (E->getNumSubExprs() == 2) {
@@ -1729,7 +1781,7 @@ Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
return MB.CreateExtractElement(
Matrix, RowIdx, ColumnIdx,
- E->getBase()->getType()->getAs<ConstantMatrixType>()->getNumRows());
+ E->getBase()->getType()->castAs<ConstantMatrixType>()->getNumRows());
}
static int getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
@@ -1913,7 +1965,7 @@ bool CodeGenFunction::ShouldNullCheckClassCastValue(const CastExpr *CE) {
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
// And that glvalue casts are never null.
- if (ICE->getValueKind() != VK_RValue)
+ if (ICE->isGLValue())
return false;
}
@@ -2043,24 +2095,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
isa<llvm::ScalableVectorType>(DstTy)) ||
(isa<llvm::ScalableVectorType>(SrcTy) &&
isa<llvm::FixedVectorType>(DstTy))) {
- if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
- // Call expressions can't have a scalar return unless the return type
- // is a reference type so an lvalue can't be emitted. Create a temp
- // alloca to store the call, bitcast the address then load.
- QualType RetTy = CE->getCallReturnType(CGF.getContext());
- Address Addr =
- CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-call-rvalue");
- LValue LV = CGF.MakeAddrLValue(Addr, RetTy);
- CGF.EmitStoreOfScalar(Src, LV);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
- "castFixedSve");
- LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
- DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
- return EmitLoadOfLValue(DestLV, CE->getExprLoc());
- }
-
- Address Addr = EmitLValue(E).getAddress(CGF);
- Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy));
+ Address Addr = CGF.CreateDefaultAlignTempAlloca(SrcTy, "saved-value");
+ LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
+ CGF.EmitStoreOfScalar(Src, LV);
+ Addr = Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(DestTy),
+ "castFixedSve");
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2230,6 +2269,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
CGF.EmitIgnoredExpr(E);
return nullptr;
}
+ case CK_MatrixCast: {
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy,
+ CE->getExprLoc());
+ }
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
Value *Elt = Visit(const_cast<Expr*>(E));
@@ -2545,7 +2588,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, numElts, "vla.inc");
+ value = Builder.CreateGEP(value->getType()->getPointerElementType(),
+ value, numElts, "vla.inc");
else
value = CGF.EmitCheckedInBoundsGEP(
value, numElts, /*SignedIndices=*/false, isSubtraction,
@@ -2557,7 +2601,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = CGF.EmitCastToVoidPtr(value);
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateGEP(CGF.Int8Ty, value, amt, "incdec.funcptr");
else
value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
isSubtraction, E->getExprLoc(),
@@ -2568,7 +2612,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
} else {
llvm::Value *amt = Builder.getInt32(amount);
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, amt, "incdec.ptr");
+ value = Builder.CreateGEP(value->getType()->getPointerElementType(),
+ value, amt, "incdec.ptr");
else
value = CGF.EmitCheckedInBoundsGEP(value, amt, /*SignedIndices=*/false,
isSubtraction, E->getExprLoc(),
@@ -2677,7 +2722,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
if (CGF.getLangOpts().isSignedOverflowDefined())
- value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateGEP(CGF.Int8Ty, value, sizeValue, "incdec.objptr");
else
value = CGF.EmitCheckedInBoundsGEP(value, sizeValue,
/*SignedIndices=*/false, isSubtraction,
@@ -3125,7 +3170,7 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::Value *IntMin =
Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
- llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+ llvm::Value *NegOne = llvm::Constant::getAllOnesValue(Ty);
llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
@@ -3157,12 +3202,30 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
}
+ if (Ops.Ty->isConstantMatrixType()) {
+ llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ // We need to check the types of the operands of the operator to get the
+ // correct matrix dimensions.
+ auto *BO = cast<BinaryOperator>(Ops.E);
+ (void)BO;
+ assert(
+ isa<ConstantMatrixType>(BO->getLHS()->getType().getCanonicalType()) &&
+ "first operand must be a matrix");
+ assert(BO->getRHS()->getType().getCanonicalType()->isArithmeticType() &&
+ "second operand must be an arithmetic type");
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
+ return MB.CreateScalarDiv(Ops.LHS, Ops.RHS,
+ Ops.Ty->hasUnsignedIntegerRepresentation());
+ }
+
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val;
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, Ops.FPFeatures);
Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getLangOpts().OpenCL &&
- !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
+ if ((CGF.getLangOpts().OpenCL &&
+ !CGF.CGM.getCodeGenOpts().OpenCLCorrectlyRoundedDivSqrt) ||
+ (CGF.getLangOpts().HIP && CGF.getLangOpts().CUDAIsDevice &&
+ !CGF.CGM.getCodeGenOpts().HIPCorrectlyRoundedDivSqrt)) {
// OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
// OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
// build option allows an application to specify that single precision
@@ -3384,7 +3447,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
index = CGF.Builder.CreateMul(index, objectSize);
Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
- result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
@@ -3400,7 +3463,9 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// multiply. We suppress this if overflow is not undefined behavior.
if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
- pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ pointer = CGF.Builder.CreateGEP(
+ pointer->getType()->getPointerElementType(), pointer, index,
+ "add.ptr");
} else {
index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
pointer =
@@ -3415,12 +3480,13 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// future proof.
if (elementType->isVoidType() || elementType->isFunctionType()) {
Value *result = CGF.EmitCastToVoidPtr(pointer);
- result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ result = CGF.Builder.CreateGEP(CGF.Int8Ty, result, index, "add.ptr");
return CGF.Builder.CreateBitCast(result, pointer->getType());
}
if (CGF.getLangOpts().isSignedOverflowDefined())
- return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ return CGF.Builder.CreateGEP(
+ pointer->getType()->getPointerElementType(), pointer, index, "add.ptr");
return CGF.EmitCheckedInBoundsGEP(pointer, index, isSigned, isSubtraction,
op.E->getExprLoc(), "add.ptr");
@@ -3532,6 +3598,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (op.Ty->isConstantMatrixType()) {
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateAdd(op.LHS, op.RHS);
}
@@ -3681,6 +3748,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
if (op.Ty->isConstantMatrixType()) {
llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateSub(op.LHS, op.RHS);
}
@@ -4778,7 +4846,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
Expr *BaseExpr = E->getBase();
Address Addr = Address::invalid();
- if (BaseExpr->isRValue()) {
+ if (BaseExpr->isPRValue()) {
Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
} else {
Addr = EmitLValue(BaseExpr).getAddress(*this);
@@ -4950,14 +5018,14 @@ Value *
CodeGenFunction::EmitCheckedInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
bool SignedIndices, bool IsSubtraction,
SourceLocation Loc, const Twine &Name) {
- Value *GEPVal = Builder.CreateInBoundsGEP(Ptr, IdxList, Name);
+ llvm::Type *PtrTy = Ptr->getType();
+ Value *GEPVal = Builder.CreateInBoundsGEP(
+ PtrTy->getPointerElementType(), Ptr, IdxList, Name);
// If the pointer overflow sanitizer isn't enabled, do nothing.
if (!SanOpts.has(SanitizerKind::PointerOverflow))
return GEPVal;
- llvm::Type *PtrTy = Ptr->getType();
-
// Perform nullptr-and-offset check unless the nullptr is defined.
bool PerformNullCheck = !NullPointerIsDefined(
Builder.GetInsertBlock()->getParent(), PtrTy->getPointerAddressSpace());
diff --git a/clang/lib/CodeGen/CGLoopInfo.cpp b/clang/lib/CodeGen/CGLoopInfo.cpp
index 8ba40599cfaf..12a6cd8da603 100644
--- a/clang/lib/CodeGen/CGLoopInfo.cpp
+++ b/clang/lib/CodeGen/CGLoopInfo.cpp
@@ -250,12 +250,10 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
Args.push_back(nullptr);
Args.append(LoopProperties.begin(), LoopProperties.end());
- // Setting vectorize.predicate
+ // Setting vectorize.predicate when it has been specified and vectorization
+ // has not been disabled.
bool IsVectorPredicateEnabled = false;
- if (Attrs.VectorizePredicateEnable != LoopAttributes::Unspecified &&
- Attrs.VectorizeEnable != LoopAttributes::Disable &&
- Attrs.VectorizeWidth < 1) {
-
+ if (Attrs.VectorizePredicateEnable != LoopAttributes::Unspecified) {
IsVectorPredicateEnabled =
(Attrs.VectorizePredicateEnable == LoopAttributes::Enable);
@@ -303,7 +301,8 @@ LoopInfo::createLoopVectorizeMetadata(const LoopAttributes &Attrs,
// explicitly requested fixed-width vectorization, i.e.
// vectorize.scalable.enable is false.
if (Attrs.VectorizeEnable != LoopAttributes::Unspecified ||
- IsVectorPredicateEnabled || Attrs.VectorizeWidth > 1 ||
+ (IsVectorPredicateEnabled && Attrs.VectorizeWidth != 1) ||
+ Attrs.VectorizeWidth > 1 ||
Attrs.VectorizeScalable == LoopAttributes::Enable ||
(Attrs.VectorizeScalable == LoopAttributes::Disable &&
Attrs.VectorizeWidth != 1)) {
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index d134be83a9dc..ad505fc5a0d4 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -368,7 +368,7 @@ template <class Derived> struct GenFuncBase {
CGF.Builder.CreateNUWMul(BaseEltSizeVal, NumElts);
Address BC = CGF.Builder.CreateBitCast(DstAddr, CGF.CGM.Int8PtrTy);
llvm::Value *DstArrayEnd =
- CGF.Builder.CreateInBoundsGEP(BC.getPointer(), SizeInBytes);
+ CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BC.getPointer(), SizeInBytes);
DstArrayEnd = CGF.Builder.CreateBitCast(DstArrayEnd, CGF.CGM.Int8PtrPtrTy,
"dstarray.end");
llvm::BasicBlock *PreheaderBB = CGF.Builder.GetInsertBlock();
@@ -470,16 +470,12 @@ template <class Derived> struct GenFuncBase {
llvm::Function::Create(FuncTy, llvm::GlobalValue::LinkOnceODRLinkage,
FuncName, &CGM.getModule());
F->setVisibility(llvm::GlobalValue::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, F, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(nullptr, F);
- IdentifierInfo *II = &Ctx.Idents.get(FuncName);
- FunctionDecl *FD = FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- II, Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {}), nullptr,
- SC_PrivateExtern, false, false);
CodeGenFunction NewCGF(CGM);
setCGF(&NewCGF);
- CGF->StartFunction(FD, Ctx.VoidTy, F, FI, Args);
+ CGF->StartFunction(GlobalDecl(), Ctx.VoidTy, F, FI, Args);
+ auto AL = ApplyDebugLocation::CreateArtificial(*CGF);
std::array<Address, N> Addrs =
getParamAddrs<N>(std::make_index_sequence<N>{}, Alignments, Args, CGF);
asDerived().visitStructFields(QT, CharUnits::Zero(), Addrs);
@@ -568,7 +564,7 @@ struct GenBinaryFunc : CopyStructVisitor<Derived, IsMove>,
this->CGF->Builder.CreateBitCast(SrcAddr, PtrTy), FT);
SrcLV = this->CGF->EmitLValueForField(SrcBase, FD);
} else {
- llvm::PointerType *Ty = this->CGF->ConvertType(FT)->getPointerTo();
+ llvm::PointerType *Ty = this->CGF->ConvertTypeForMem(FT)->getPointerTo();
Address DstAddr = this->CGF->Builder.CreateBitCast(Addrs[DstIdx], Ty);
Address SrcAddr = this->CGF->Builder.CreateBitCast(Addrs[SrcIdx], Ty);
DstLV = this->CGF->MakeAddrLValue(DstAddr, FT);
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index 3f930c76fe0a..937a0e8a3b69 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -23,6 +23,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/Analysis/ObjCARCUtil.h"
#include "llvm/BinaryFormat/MachO.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
@@ -759,7 +760,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
if (OMD->isDirectMethod()) {
Fn->setVisibility(llvm::Function::HiddenVisibility);
- CGM.SetLLVMFunctionAttributes(OMD, FI, Fn);
+ CGM.SetLLVMFunctionAttributes(OMD, FI, Fn, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(OMD, Fn);
} else {
CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
@@ -924,10 +925,11 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
IvarSize = TInfo.Width;
IvarAlignment = TInfo.Align;
- // If we have a copy property, we always have to use getProperty/setProperty.
- // TODO: we could actually use setProperty and an expression for non-atomics.
+ // If we have a copy property, we always have to use setProperty.
+ // If the property is atomic we need to use getProperty, but in
+ // the nonatomic case we can just use expression.
if (IsCopy) {
- Kind = GetSetProperty;
+ Kind = IsAtomic ? GetSetProperty : SetPropertyAndExpressionGet;
return;
}
@@ -1521,7 +1523,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
DeclRefExpr self(getContext(), selfDecl, false, selfDecl->getType(),
VK_LValue, SourceLocation());
ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack, selfDecl->getType(),
- CK_LValueToRValue, &self, VK_RValue,
+ CK_LValueToRValue, &self, VK_PRValue,
FPOptionsOverride());
ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
SourceLocation(), SourceLocation(),
@@ -1533,7 +1535,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
SourceLocation());
ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
argType.getUnqualifiedType(), CK_LValueToRValue,
- &arg, VK_RValue, FPOptionsOverride());
+ &arg, VK_PRValue, FPOptionsOverride());
// The property type can differ from the ivar type in some situations with
// Objective-C pointer types, we can always bit cast the RHS in these cases.
@@ -1555,15 +1557,15 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
argCK = CK_BitCast;
}
ImplicitCastExpr argCast(ImplicitCastExpr::OnStack, ivarRef.getType(), argCK,
- &argLoad, VK_RValue, FPOptionsOverride());
+ &argLoad, VK_PRValue, FPOptionsOverride());
Expr *finalArg = &argLoad;
if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
argLoad.getType()))
finalArg = &argCast;
BinaryOperator *assign = BinaryOperator::Create(
- getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(), VK_RValue,
- OK_Ordinary, SourceLocation(), FPOptionsOverride());
+ getContext(), &ivarRef, finalArg, BO_Assign, ivarRef.getType(),
+ VK_PRValue, OK_Ordinary, SourceLocation(), FPOptionsOverride());
EmitStmt(assign);
}
@@ -1818,9 +1820,10 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
llvm::Value *StateMutationsPtr
= Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ llvm::Type *UnsignedLongTy = ConvertType(getContext().UnsignedLongTy);
llvm::Value *initialMutations =
- Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
- "forcoll.initial-mutations");
+ Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr,
+ getPointerAlign(), "forcoll.initial-mutations");
// Start looping. This is the point we return to whenever we have a
// fresh, non-empty batch of objects.
@@ -1842,8 +1845,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// refreshes.
StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
llvm::Value *currentMutations
- = Builder.CreateAlignedLoad(StateMutationsPtr, getPointerAlign(),
- "statemutations");
+ = Builder.CreateAlignedLoad(UnsignedLongTy, StateMutationsPtr,
+ getPointerAlign(), "statemutations");
llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
@@ -1853,9 +1856,9 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// If so, call the enumeration-mutation function.
EmitBlock(WasMutatedBB);
+ llvm::Type *ObjCIdType = ConvertType(getContext().getObjCIdType());
llvm::Value *V =
- Builder.CreateBitCast(Collection,
- ConvertType(getContext().getObjCIdType()));
+ Builder.CreateBitCast(Collection, ObjCIdType);
CallArgList Args2;
Args2.add(RValue::get(V), getContext().getObjCIdType());
// FIXME: We shouldn't need to get the function info here, the runtime already
@@ -1901,10 +1904,11 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
Builder.CreateLoad(StateItemsPtr, "stateitems");
// Fetch the value at the current index from the buffer.
- llvm::Value *CurrentItemPtr =
- Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItemPtr = Builder.CreateGEP(
+ EnumStateItems->getType()->getPointerElementType(), EnumStateItems, index,
+ "currentitem.ptr");
llvm::Value *CurrentItem =
- Builder.CreateAlignedLoad(CurrentItemPtr, getPointerAlign());
+ Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign());
if (SanOpts.has(SanitizerKind::ObjCCast)) {
// Before using an item from the collection, check that the implicit cast
@@ -2078,6 +2082,15 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
EmitNounwindRuntimeCall(fn, values);
}
+/// Emit a call to "clang.arc.noop.use", which consumes the result of a call
+/// that has operand bundle "clang.arc.attachedcall".
+void CodeGenFunction::EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values) {
+ llvm::Function *&fn = CGM.getObjCEntrypoints().clang_arc_noop_use;
+ if (!fn)
+ fn = CGM.getIntrinsic(llvm::Intrinsic::objc_clang_arc_noop_use);
+ EmitNounwindRuntimeCall(fn, values);
+}
+
static void setARCRuntimeFunctionLinkage(CodeGenModule &CGM, llvm::Value *RTF) {
if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
@@ -2304,10 +2317,11 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
// with this marker yet, so leave a breadcrumb for the ARC
// optimizer to pick up.
} else {
- const char *markerKey = "clang.arc.retainAutoreleasedReturnValueMarker";
- if (!CGF.CGM.getModule().getModuleFlag(markerKey)) {
+ const char *retainRVMarkerKey = llvm::objcarc::getRVMarkerModuleFlagStr();
+ if (!CGF.CGM.getModule().getModuleFlag(retainRVMarkerKey)) {
auto *str = llvm::MDString::get(CGF.getLLVMContext(), assembly);
- CGF.CGM.getModule().addModuleFlag(llvm::Module::Error, markerKey, str);
+ CGF.CGM.getModule().addModuleFlag(llvm::Module::Error,
+ retainRVMarkerKey, str);
}
}
}
@@ -2317,6 +2331,47 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
CGF.Builder.CreateCall(marker, None, CGF.getBundlesForFunclet(marker));
}
+static llvm::Value *emitOptimizedARCReturnCall(llvm::Value *value,
+ bool IsRetainRV,
+ CodeGenFunction &CGF) {
+ emitAutoreleasedReturnValueMarker(CGF);
+
+ // Add operand bundle "clang.arc.attachedcall" to the call instead of emitting
+ // retainRV or claimRV calls in the IR. We currently do this only when the
+ // optimization level isn't -O0 since global-isel, which is currently run at
+ // -O0, doesn't know about the operand bundle.
+
+ // FIXME: Do this when the target isn't aarch64.
+ if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0 &&
+ CGF.CGM.getTarget().getTriple().isAArch64()) {
+ llvm::Value *bundleArgs[] = {llvm::ConstantInt::get(
+ CGF.Int64Ty,
+ llvm::objcarc::getAttachedCallOperandBundleEnum(IsRetainRV))};
+ llvm::OperandBundleDef OB("clang.arc.attachedcall", bundleArgs);
+ auto *oldCall = cast<llvm::CallBase>(value);
+ llvm::CallBase *newCall = llvm::CallBase::addOperandBundle(
+ oldCall, llvm::LLVMContext::OB_clang_arc_attachedcall, OB, oldCall);
+ newCall->copyMetadata(*oldCall);
+ oldCall->replaceAllUsesWith(newCall);
+ oldCall->eraseFromParent();
+ CGF.EmitARCNoopIntrinsicUse(newCall);
+ return newCall;
+ }
+
+ bool isNoTail =
+ CGF.CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail();
+ llvm::CallInst::TailCallKind tailKind =
+ isNoTail ? llvm::CallInst::TCK_NoTail : llvm::CallInst::TCK_None;
+ ObjCEntrypoints &EPs = CGF.CGM.getObjCEntrypoints();
+ llvm::Function *&EP = IsRetainRV
+ ? EPs.objc_retainAutoreleasedReturnValue
+ : EPs.objc_unsafeClaimAutoreleasedReturnValue;
+ llvm::Intrinsic::ID IID =
+ IsRetainRV ? llvm::Intrinsic::objc_retainAutoreleasedReturnValue
+ : llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue;
+ return emitARCValueOperation(CGF, value, nullptr, EP, IID, tailKind);
+}
+
/// Retain the given object which is the result of a function call.
/// call i8* \@objc_retainAutoreleasedReturnValue(i8* %value)
///
@@ -2324,15 +2379,7 @@ static void emitAutoreleasedReturnValueMarker(CodeGenFunction &CGF) {
/// call with completely different semantics.
llvm::Value *
CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
- emitAutoreleasedReturnValueMarker(*this);
- llvm::CallInst::TailCallKind tailKind =
- CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
- ? llvm::CallInst::TCK_NoTail
- : llvm::CallInst::TCK_None;
- return emitARCValueOperation(
- *this, value, nullptr,
- CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue,
- llvm::Intrinsic::objc_retainAutoreleasedReturnValue, tailKind);
+ return emitOptimizedARCReturnCall(value, true, *this);
}
/// Claim a possibly-autoreleased return value at +0. This is only
@@ -2344,15 +2391,7 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
/// call i8* \@objc_unsafeClaimAutoreleasedReturnValue(i8* %value)
llvm::Value *
CodeGenFunction::EmitARCUnsafeClaimAutoreleasedReturnValue(llvm::Value *value) {
- emitAutoreleasedReturnValueMarker(*this);
- llvm::CallInst::TailCallKind tailKind =
- CGM.getTargetCodeGenInfo().markARCOptimizedReturnCallsAsNoTail()
- ? llvm::CallInst::TCK_NoTail
- : llvm::CallInst::TCK_None;
- return emitARCValueOperation(
- *this, value, nullptr,
- CGM.getObjCEntrypoints().objc_unsafeClaimAutoreleasedReturnValue,
- llvm::Intrinsic::objc_unsafeClaimAutoreleasedReturnValue, tailKind);
+ return emitOptimizedARCReturnCall(value, false, *this);
}
/// Release the given object.
@@ -2902,8 +2941,12 @@ static llvm::Value *emitARCOperationAfterCall(CodeGenFunction &CGF,
ValueTransform doAfterCall,
ValueTransform doFallback) {
CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+ auto *callBase = dyn_cast<llvm::CallBase>(value);
- if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ if (callBase && llvm::objcarc::hasAttachedCallOpBundle(callBase)) {
+ // Fall back if the call base has operand bundle "clang.arc.attachedcall".
+ value = doFallback(CGF, value);
+ } else if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
// Place the retain immediately following the call.
CGF.Builder.SetInsertPoint(call->getParent(),
++llvm::BasicBlock::iterator(call));
@@ -3657,12 +3700,18 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
FunctionTy, nullptr, SC_Static, false, false);
FunctionArgList args;
- ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
- ImplicitParamDecl::Other);
- args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
- ImplicitParamDecl::Other);
- args.push_back(&SrcDecl);
+ ParmVarDecl *Params[2];
+ ParmVarDecl *DstDecl = ParmVarDecl::Create(
+ C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy,
+ C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None,
+ /*DefArg=*/nullptr);
+ args.push_back(Params[0] = DstDecl);
+ ParmVarDecl *SrcDecl = ParmVarDecl::Create(
+ C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy,
+ C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None,
+ /*DefArg=*/nullptr);
+ args.push_back(Params[1] = SrcDecl);
+ FD->setParams(Params);
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
@@ -3678,12 +3727,12 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr DstExpr(C, &DstDecl, false, DestTy, VK_RValue, SourceLocation());
+ DeclRefExpr DstExpr(C, DstDecl, false, DestTy, VK_PRValue, SourceLocation());
UnaryOperator *DST = UnaryOperator::Create(
C, &DstExpr, UO_Deref, DestTy->getPointeeType(), VK_LValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
- DeclRefExpr SrcExpr(C, &SrcDecl, false, SrcTy, VK_RValue, SourceLocation());
+ DeclRefExpr SrcExpr(C, SrcDecl, false, SrcTy, VK_PRValue, SourceLocation());
UnaryOperator *SRC = UnaryOperator::Create(
C, &SrcExpr, UO_Deref, SrcTy->getPointeeType(), VK_LValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
@@ -3741,12 +3790,18 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
FunctionTy, nullptr, SC_Static, false, false);
FunctionArgList args;
- ImplicitParamDecl DstDecl(C, FD, SourceLocation(), /*Id=*/nullptr, DestTy,
- ImplicitParamDecl::Other);
- args.push_back(&DstDecl);
- ImplicitParamDecl SrcDecl(C, FD, SourceLocation(), /*Id=*/nullptr, SrcTy,
- ImplicitParamDecl::Other);
- args.push_back(&SrcDecl);
+ ParmVarDecl *Params[2];
+ ParmVarDecl *DstDecl = ParmVarDecl::Create(
+ C, FD, SourceLocation(), SourceLocation(), nullptr, DestTy,
+ C.getTrivialTypeSourceInfo(DestTy, SourceLocation()), SC_None,
+ /*DefArg=*/nullptr);
+ args.push_back(Params[0] = DstDecl);
+ ParmVarDecl *SrcDecl = ParmVarDecl::Create(
+ C, FD, SourceLocation(), SourceLocation(), nullptr, SrcTy,
+ C.getTrivialTypeSourceInfo(SrcTy, SourceLocation()), SC_None,
+ /*DefArg=*/nullptr);
+ args.push_back(Params[1] = SrcDecl);
+ FD->setParams(Params);
const CGFunctionInfo &FI =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, args);
@@ -3761,7 +3816,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
StartFunction(FD, ReturnTy, Fn, FI, args);
- DeclRefExpr SrcExpr(getContext(), &SrcDecl, false, SrcTy, VK_RValue,
+ DeclRefExpr SrcExpr(getContext(), SrcDecl, false, SrcTy, VK_PRValue,
SourceLocation());
UnaryOperator *SRC = UnaryOperator::Create(
@@ -3788,7 +3843,7 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
CXXConstExpr->getConstructionKind(),
SourceRange());
- DeclRefExpr DstExpr(getContext(), &DstDecl, false, DestTy, VK_RValue,
+ DeclRefExpr DstExpr(getContext(), DstDecl, false, DestTy, VK_PRValue,
SourceLocation());
RValue DV = EmitAnyExpr(&DstExpr);
diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp
index 9825d7bca18c..3f361f4e7931 100644
--- a/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -739,9 +739,11 @@ class CGObjCGNUstep : public CGObjCGNU {
/// Function to perform atomic copies of C++ objects with nontrivial copy
/// constructors to Objective-C ivars.
LazyRuntimeFunction CxxAtomicObjectSetFn;
- /// Type of an slot structure pointer. This is returned by the various
+ /// Type of a slot structure pointer. This is returned by the various
/// lookup functions.
llvm::Type *SlotTy;
+ /// Type of a slot structure.
+ llvm::Type *SlotStructTy;
public:
llvm::Constant *GetEHType(QualType T) override;
@@ -780,7 +782,8 @@ class CGObjCGNUstep : public CGObjCGNU {
// Load the imp from the slot
llvm::Value *imp = Builder.CreateAlignedLoad(
- Builder.CreateStructGEP(nullptr, slot, 4), CGF.getPointerAlign());
+ IMPTy, Builder.CreateStructGEP(SlotStructTy, slot, 4),
+ CGF.getPointerAlign());
// The lookup function may have changed the receiver, so make sure we use
// the new one.
@@ -798,8 +801,9 @@ class CGObjCGNUstep : public CGObjCGNU {
CGF.EmitNounwindRuntimeCall(SlotLookupSuperFn, lookupArgs);
slot->setOnlyReadsMemory();
- return Builder.CreateAlignedLoad(Builder.CreateStructGEP(nullptr, slot, 4),
- CGF.getPointerAlign());
+ return Builder.CreateAlignedLoad(
+ IMPTy, Builder.CreateStructGEP(SlotStructTy, slot, 4),
+ CGF.getPointerAlign());
}
public:
@@ -809,8 +813,7 @@ class CGObjCGNUstep : public CGObjCGNU {
CGObjCGNU(Mod, ABI, ProtocolABI, ClassABI) {
const ObjCRuntime &R = CGM.getLangOpts().ObjCRuntime;
- llvm::StructType *SlotStructTy =
- llvm::StructType::get(PtrTy, PtrTy, PtrTy, IntTy, IMPTy);
+ SlotStructTy = llvm::StructType::get(PtrTy, PtrTy, PtrTy, IntTy, IMPTy);
SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
// Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
@@ -942,7 +945,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
/// Generate the name of a symbol for a reference to a class. Accesses to
/// classes should be indirected via this.
- typedef std::pair<std::string, std::pair<llvm::Constant*, int>> EarlyInitPair;
+ typedef std::pair<std::string, std::pair<llvm::GlobalVariable*, int>>
+ EarlyInitPair;
std::vector<EarlyInitPair> EarlyInitList;
std::string SymbolForClassRef(StringRef Name, bool isWeak) {
@@ -1093,7 +1097,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
}
}
- auto *ObjCStrGV =
+ llvm::GlobalVariable *ObjCStrGV =
Fields.finishAndCreateGlobal(
isNamed ? StringRef(StringName) : ".objc_string",
Align, false, isNamed ? llvm::GlobalValue::LinkOnceODRLinkage
@@ -1104,7 +1108,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ObjCStrGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
}
if (CGM.getTriple().isOSBinFormatCOFF()) {
- std::pair<llvm::Constant*, int> v{ObjCStrGV, 0};
+ std::pair<llvm::GlobalVariable*, int> v{ObjCStrGV, 0};
EarlyInitList.emplace_back(Sym, v);
}
llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
@@ -1230,7 +1234,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
const ObjCInterfaceDecl *OID = nullptr;
- for (const auto &Result : DC->lookup(&II))
+ for (const auto *Result : DC->lookup(&II))
if ((OID = dyn_cast<ObjCInterfaceDecl>(Result)))
break;
@@ -1328,7 +1332,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Ref = GV;
}
EmittedProtocolRef = true;
- return CGF.Builder.CreateAlignedLoad(Ref, CGM.getPointerAlign());
+ return CGF.Builder.CreateAlignedLoad(ProtocolPtrTy, Ref,
+ CGM.getPointerAlign());
}
llvm::Constant *GenerateProtocolList(ArrayRef<llvm::Constant*> Protocols) {
@@ -1650,9 +1655,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
for (const auto &lateInit : EarlyInitList) {
auto *global = TheModule.getGlobalVariable(lateInit.first);
if (global) {
+ llvm::GlobalVariable *GV = lateInit.second.first;
b.CreateAlignedStore(
global,
- b.CreateStructGEP(lateInit.second.first, lateInit.second.second),
+ b.CreateStructGEP(GV->getValueType(), GV, lateInit.second.second),
CGM.getPointerAlign().getAsAlign());
}
}
@@ -1689,7 +1695,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
IvarOffsetPointer = new llvm::GlobalVariable(TheModule, IntTy, false,
llvm::GlobalValue::ExternalLinkage, nullptr, Name);
CharUnits Align = CGM.getIntAlign();
- llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(IvarOffsetPointer, Align);
+ llvm::Value *Offset =
+ CGF.Builder.CreateAlignedLoad(IntTy, IvarOffsetPointer, Align);
if (Offset->getType() != PtrDiffTy)
Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
return Offset;
@@ -1933,7 +1940,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// struct objc_property_list *properties
classFields.add(GeneratePropertyList(OID, classDecl));
- auto *classStruct =
+ llvm::GlobalVariable *classStruct =
classFields.finishAndCreateGlobal(SymbolForClass(className),
CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
@@ -1944,12 +1951,12 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
if (IsCOFF) {
// we can't import a class struct.
if (OID->getClassInterface()->hasAttr<DLLExportAttr>()) {
- cast<llvm::GlobalValue>(classStruct)->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ classStruct->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
cast<llvm::GlobalValue>(classRefSymbol)->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
}
if (SuperClass) {
- std::pair<llvm::Constant*, int> v{classStruct, 1};
+ std::pair<llvm::GlobalVariable*, int> v{classStruct, 1};
EarlyInitList.emplace_back(std::string(SuperClass->getName()),
std::move(v));
}
@@ -2315,7 +2322,7 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
const VarDecl *VD = nullptr;
- for (const auto &Result : DC->lookup(&II))
+ for (const auto *Result : DC->lookup(&II))
if ((VD = dyn_cast<VarDecl>(Result)))
break;
@@ -2543,7 +2550,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ReceiverClass = Builder.CreateBitCast(ReceiverClass,
llvm::PointerType::getUnqual(IdTy));
ReceiverClass =
- Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
+ Builder.CreateAlignedLoad(IdTy, ReceiverClass, CGF.getPointerAlign());
}
ReceiverClass = EnforceType(Builder, ReceiverClass, IdTy);
} else {
@@ -2588,7 +2595,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ReceiverClass = Builder.CreateStructGEP(CastTy, ReceiverClass, 1);
// Load the superclass pointer
ReceiverClass =
- Builder.CreateAlignedLoad(ReceiverClass, CGF.getPointerAlign());
+ Builder.CreateAlignedLoad(IdTy, ReceiverClass, CGF.getPointerAlign());
}
// Construct the structure used to look up the IMP
llvm::StructType *ObjCSuperTy =
@@ -4086,6 +4093,7 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
return CGF.Builder.CreateZExtOrBitCast(
CGF.Builder.CreateAlignedLoad(
Int32Ty, CGF.Builder.CreateAlignedLoad(
+ llvm::Type::getInt32PtrTy(VMContext),
ObjCIvarOffsetVariable(Interface, Ivar),
CGF.getPointerAlign(), "ivar"),
CharUnits::fromQuantity(4)),
@@ -4101,7 +4109,7 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
GV->setAlignment(Align.getAsAlign());
Offset = GV;
}
- Offset = CGF.Builder.CreateAlignedLoad(Offset, Align);
+ Offset = CGF.Builder.CreateAlignedLoad(IntTy, Offset, Align);
if (Offset->getType() != PtrDiffTy)
Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
return Offset;
diff --git a/clang/lib/CodeGen/CGObjCMac.cpp b/clang/lib/CodeGen/CGObjCMac.cpp
index 4c4a316308ce..3de67bb4bbc5 100644
--- a/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/clang/lib/CodeGen/CGObjCMac.cpp
@@ -2096,6 +2096,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStructGEP(ObjCSuper, 0));
// If this is a class message the metaclass is passed as the target.
+ llvm::Type *ClassTyPtr = llvm::PointerType::getUnqual(ObjCTypes.ClassTy);
llvm::Value *Target;
if (IsClassMessage) {
if (isCategoryImpl) {
@@ -2107,13 +2108,14 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// isa" is the first ivar in a class (which it must be).
Target = EmitClassRef(CGF, Class->getSuperClass());
Target = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, Target, 0);
- Target = CGF.Builder.CreateAlignedLoad(Target, CGF.getPointerAlign());
+ Target = CGF.Builder.CreateAlignedLoad(ClassTyPtr, Target,
+ CGF.getPointerAlign());
} else {
llvm::Constant *MetaClassPtr = EmitMetaClassRef(Class);
llvm::Value *SuperPtr =
CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, MetaClassPtr, 1);
- llvm::Value *Super =
- CGF.Builder.CreateAlignedLoad(SuperPtr, CGF.getPointerAlign());
+ llvm::Value *Super = CGF.Builder.CreateAlignedLoad(ClassTyPtr, SuperPtr,
+ CGF.getPointerAlign());
Target = Super;
}
} else if (isCategoryImpl)
@@ -2121,7 +2123,8 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
else {
llvm::Value *ClassPtr = EmitSuperClassRef(Class);
ClassPtr = CGF.Builder.CreateStructGEP(ObjCTypes.ClassTy, ClassPtr, 1);
- Target = CGF.Builder.CreateAlignedLoad(ClassPtr, CGF.getPointerAlign());
+ Target = CGF.Builder.CreateAlignedLoad(ClassTyPtr, ClassPtr,
+ CGF.getPointerAlign());
}
// FIXME: We shouldn't need to do this cast, rectify the ASTContext and
// ObjCTypes types.
@@ -5285,7 +5288,8 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
CGM.getPointerAlign(), true);
}
- return CGF.Builder.CreateAlignedLoad(Entry, CGF.getPointerAlign());
+ return CGF.Builder.CreateAlignedLoad(Entry->getValueType(), Entry,
+ CGF.getPointerAlign());
}
llvm::Value *CGObjCMac::EmitClassRef(CodeGenFunction &CGF,
@@ -6518,7 +6522,7 @@ static llvm::GlobalValue::DLLStorageClassTypes getStorage(CodeGenModule &CGM,
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
const VarDecl *VD = nullptr;
- for (const auto &Result : DC->lookup(&II))
+ for (const auto *Result : DC->lookup(&II))
if ((VD = dyn_cast<VarDecl>(Result)))
break;
@@ -6682,7 +6686,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
if (PTGV)
- return CGF.Builder.CreateAlignedLoad(PTGV, Align);
+ return CGF.Builder.CreateAlignedLoad(PTGV->getValueType(), PTGV, Align);
PTGV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::WeakAnyLinkage, Init,
ProtocolName);
@@ -6693,7 +6697,7 @@ llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CodeGenFunction &CGF,
if (!CGM.getTriple().isOSBinFormatMachO())
PTGV->setComdat(CGM.getModule().getOrInsertComdat(ProtocolName));
CGM.addUsedGlobal(PTGV);
- return CGF.Builder.CreateAlignedLoad(PTGV, Align);
+ return CGF.Builder.CreateAlignedLoad(PTGV->getValueType(), PTGV, Align);
}
/// GenerateCategory - Build metadata for a category implementation.
@@ -7246,7 +7250,8 @@ CGObjCNonFragileABIMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
} else {
llvm::GlobalVariable *GV = ObjCIvarOffsetVariable(Interface, Ivar);
IvarOffsetValue =
- CGF.Builder.CreateAlignedLoad(GV, CGF.getSizeAlign(), "ivar");
+ CGF.Builder.CreateAlignedLoad(GV->getValueType(), GV,
+ CGF.getSizeAlign(), "ivar");
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
@@ -7486,7 +7491,7 @@ CGObjCNonFragileABIMac::EmitLoadOfClassRef(CodeGenFunction &CGF,
}
CharUnits Align = CGF.getPointerAlign();
- return CGF.Builder.CreateAlignedLoad(Entry, Align);
+ return CGF.Builder.CreateAlignedLoad(Entry->getValueType(), Entry, Align);
}
llvm::Value *
@@ -7578,7 +7583,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
CGM.addCompilerUsedGlobal(Entry);
}
- return CGF.Builder.CreateAlignedLoad(Entry, Align);
+ return CGF.Builder.CreateAlignedLoad(ObjCTypes.ClassnfABIPtrTy, Entry, Align);
}
/// GetClass - Return a reference to the class for the given interface
diff --git a/clang/lib/CodeGen/CGObjCRuntime.cpp b/clang/lib/CodeGen/CGObjCRuntime.cpp
index 9bf4d83f9bc4..108f6fc7ba60 100644
--- a/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -64,7 +64,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
Ivar->getUsageType(ObjectPtrTy).withCVRQualifiers(CVRQualifiers);
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
- V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, V, Offset, "add.ptr");
if (!Ivar->isBitField()) {
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index 57cc2d60e2af..c09797e91b99 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGCleanup.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
+#include "clang/AST/APValue.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/OpenMPClause.h"
@@ -409,6 +410,7 @@ class InlinedOpenMPRegionRAII {
llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
FieldDecl *LambdaThisCaptureField = nullptr;
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
+ bool NoInheritance = false;
public:
/// Constructs region for combined constructs.
@@ -416,16 +418,19 @@ public:
/// a list of functions used for code generation of implicitly inlined
/// regions.
InlinedOpenMPRegionRAII(CodeGenFunction &CGF, const RegionCodeGenTy &CodeGen,
- OpenMPDirectiveKind Kind, bool HasCancel)
- : CGF(CGF) {
+ OpenMPDirectiveKind Kind, bool HasCancel,
+ bool NoInheritance = true)
+ : CGF(CGF), NoInheritance(NoInheritance) {
// Start emission for the construct.
CGF.CapturedStmtInfo = new CGOpenMPInlinedRegionInfo(
CGF.CapturedStmtInfo, CodeGen, Kind, HasCancel);
- std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
- LambdaThisCaptureField = CGF.LambdaThisCaptureField;
- CGF.LambdaThisCaptureField = nullptr;
- BlockInfo = CGF.BlockInfo;
- CGF.BlockInfo = nullptr;
+ if (NoInheritance) {
+ std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
+ LambdaThisCaptureField = CGF.LambdaThisCaptureField;
+ CGF.LambdaThisCaptureField = nullptr;
+ BlockInfo = CGF.BlockInfo;
+ CGF.BlockInfo = nullptr;
+ }
}
~InlinedOpenMPRegionRAII() {
@@ -434,15 +439,17 @@ public:
cast<CGOpenMPInlinedRegionInfo>(CGF.CapturedStmtInfo)->getOldCSI();
delete CGF.CapturedStmtInfo;
CGF.CapturedStmtInfo = OldCSI;
- std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
- CGF.LambdaThisCaptureField = LambdaThisCaptureField;
- CGF.BlockInfo = BlockInfo;
+ if (NoInheritance) {
+ std::swap(CGF.LambdaCaptureFields, LambdaCaptureFields);
+ CGF.LambdaThisCaptureField = LambdaThisCaptureField;
+ CGF.BlockInfo = BlockInfo;
+ }
}
};
/// Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
-/// from https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
+/// from https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
enum OpenMPLocationFlags : unsigned {
/// Use trampoline for internal microtask.
OMP_IDENT_IMD = 0x01,
@@ -497,7 +504,7 @@ enum OpenMPOffloadingReservedDeviceIDs {
/// Describes ident structure that describes a source location.
/// All descriptions are taken from
-/// https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h
+/// https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h
/// Original structure:
/// typedef struct ident {
/// kmp_int32 reserved_1; /**< might be used in Fortran;
@@ -649,11 +656,15 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
InitRVal =
RValue::getComplex(CGF.EmitLoadOfComplex(LV, DRD->getLocation()));
break;
- case TEK_Aggregate:
- InitRVal = RValue::getAggregate(LV.getAddress(CGF));
- break;
+ case TEK_Aggregate: {
+ OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_LValue);
+ CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, LV);
+ CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
+ /*IsInitializer=*/false);
+ return;
}
- OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_RValue);
+ }
+ OpaqueValueExpr OVE(DRD->getLocation(), Ty, VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE, InitRVal);
CGF.EmitAnyExprToMem(&OVE, Private, Ty.getQualifiers(),
/*IsInitializer=*/false);
@@ -687,7 +698,8 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
SrcBegin = SrcAddr.getPointer();
llvm::Value *DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- llvm::Value *DestEnd = CGF.Builder.CreateGEP(DestBegin, NumElements);
+ llvm::Value *DestEnd =
+ CGF.Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arrayinit.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arrayinit.done");
@@ -732,13 +744,15 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
if (DRD) {
// Shift the address forward by one element.
llvm::Value *SrcElementNext = CGF.Builder.CreateConstGEP1_32(
- SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.dest.element");
SrcElementPHI->addIncoming(SrcElementNext, CGF.Builder.GetInsertBlock());
}
// Shift the address forward by one element.
llvm::Value *DestElementNext = CGF.Builder.CreateConstGEP1_32(
- DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.dest.element");
// Check whether we've reached the end.
llvm::Value *Done =
CGF.Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
@@ -999,13 +1013,14 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
+ Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
- BaseLValue.getPointer(CGF), SharedAddresses[N].first.getPointer(CGF));
+ BaseLValue.getPointer(CGF), SharedAddr.getPointer());
llvm::Value *PrivatePointer =
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivateAddr.getPointer(),
- SharedAddresses[N].first.getAddress(CGF).getType());
- llvm::Value *Ptr = CGF.Builder.CreateGEP(PrivatePointer, Adjustment);
+ PrivateAddr.getPointer(), SharedAddr.getType());
+ llvm::Value *Ptr = CGF.Builder.CreateGEP(
+ SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
OriginalBaseLValue.getAddress(CGF).getType(),
@@ -1028,7 +1043,7 @@ LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
getThreadIDVariable()->getType()->castAs<PointerType>());
}
-void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
+void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt *S) {
if (!CGF.HaveInsertPoint())
return;
// 1.2.2 OpenMP Language Terminology
@@ -1037,6 +1052,8 @@ void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, const Stmt * /*S*/) {
// The point of exit cannot be a branch out of the structured block.
// longjmp() and throw() must not violate the entry/exit criteria.
CGF.EHStack.pushTerminate();
+ if (S)
+ CGF.incrementProfileCounter(S);
CodeGen(CGF);
CGF.EHStack.popTerminate();
}
@@ -1193,7 +1210,7 @@ namespace {
// Builder if one is present.
struct PushAndPopStackRAII {
PushAndPopStackRAII(llvm::OpenMPIRBuilder *OMPBuilder, CodeGenFunction &CGF,
- bool HasCancel)
+ bool HasCancel, llvm::omp::Directive Kind)
: OMPBuilder(OMPBuilder) {
if (!OMPBuilder)
return;
@@ -1222,8 +1239,7 @@ struct PushAndPopStackRAII {
// TODO: Remove this once we emit parallel regions through the
// OpenMPIRBuilder as it can do this setup internally.
- llvm::OpenMPIRBuilder::FinalizationInfo FI(
- {FiniCB, OMPD_parallel, HasCancel});
+ llvm::OpenMPIRBuilder::FinalizationInfo FI({FiniCB, Kind, HasCancel});
OMPBuilder->pushFinalizationCB(std::move(FI));
}
~PushAndPopStackRAII() {
@@ -1264,7 +1280,7 @@ static llvm::Function *emitParallelOrTeamsOutlinedFunction(
// TODO: Temporarily inform the OpenMPIRBuilder, if any, about the new
// parallel region to make cancellation barriers work properly.
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
- PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel);
+ PushAndPopStackRAII PSR(&OMPBuilder, CGF, HasCancel, InnermostKind);
CGOpenMPOutlinedRegionInfo CGInfo(*CS, ThreadIDVar, CodeGen, InnermostKind,
HasCancel, OutlinedHelperName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
@@ -1648,9 +1664,13 @@ static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
assert(PLoc.isValid() && "Source location is expected to be always valid.");
llvm::sys::fs::UniqueID ID;
- if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
- SM.getDiagnostics().Report(diag::err_cannot_open_file)
- << PLoc.getFilename() << EC.message();
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
+ PLoc = SM.getPresumedLoc(Loc, /*UseLineDirectives=*/false);
+ assert(PLoc.isValid() && "Source location is expected to be always valid.");
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ SM.getDiagnostics().Report(diag::err_cannot_open_file)
+ << PLoc.getFilename() << EC.message();
+ }
DeviceID = ID.getDevice();
FileID = ID.getFile();
@@ -2263,6 +2283,35 @@ void CGOpenMPRuntime::emitMasterRegion(CodeGenFunction &CGF,
Action.Done(CGF);
}
+void CGOpenMPRuntime::emitMaskedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MaskedOpGen,
+ SourceLocation Loc, const Expr *Filter) {
+ if (!CGF.HaveInsertPoint())
+ return;
+ // if(__kmpc_masked(ident_t *, gtid, filter)) {
+ // MaskedOpGen();
+ // __kmpc_end_masked(iden_t *, gtid);
+ // }
+ // Prepare arguments and build a call to __kmpc_masked
+ llvm::Value *FilterVal = Filter
+ ? CGF.EmitScalarExpr(Filter, CGF.Int32Ty)
+ : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
+ llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ FilterVal};
+ llvm::Value *ArgsEnd[] = {emitUpdateLocation(CGF, Loc),
+ getThreadID(CGF, Loc)};
+ CommonActionTy Action(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_masked),
+ Args,
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_end_masked),
+ ArgsEnd,
+ /*Conditional=*/true);
+ MaskedOpGen.setAction(Action);
+ emitInlinedDirective(CGF, OMPD_masked, MaskedOpGen);
+ Action.Done(CGF);
+}
+
void CGOpenMPRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
if (!CGF.HaveInsertPoint())
@@ -2951,8 +3000,7 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
if (CGM.getLangOpts().OpenMPIsDevice) {
// This could happen if the device compilation is invoked standalone.
if (!hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum))
- initializeTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
- OffloadingEntriesNum);
+ return;
auto &Entry =
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum];
Entry.setAddress(Addr);
@@ -3023,10 +3071,8 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
if (CGM.getLangOpts().OpenMPIsDevice) {
// This could happen if the device compilation is invoked standalone.
if (!hasDeviceGlobalVarEntryInfo(VarName))
- initializeDeviceGlobalVarEntryInfo(VarName, Flags, OffloadingEntriesNum);
+ return;
auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
- assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
- "Resetting with the new address.");
if (Entry.getAddress() && hasDeviceGlobalVarEntryInfo(VarName)) {
if (Entry.getVarSize().isZero()) {
Entry.setVarSize(VarSize);
@@ -3042,8 +3088,6 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
auto &Entry = OffloadEntriesDeviceGlobalVar[VarName];
assert(Entry.isValid() && Entry.getFlags() == Flags &&
"Entry not initialized!");
- assert((!Entry.getAddress() || Entry.getAddress() == Addr) &&
- "Resetting with the new address.");
if (Entry.getVarSize().isZero()) {
Entry.setVarSize(VarSize);
Entry.setLinkage(Linkage);
@@ -3853,7 +3897,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Processing for implicitly captured variables.
InlinedOpenMPRegionRAII Region(
CGF, [](CodeGenFunction &, PrePostActionTy &) {}, OMPD_unknown,
- /*HasCancel=*/false);
+ /*HasCancel=*/false, /*NoInheritance=*/true);
SharedRefLValue = CGF.EmitLValue(Pair.second.OriginalRef);
}
if (Type->isArrayType()) {
@@ -4115,8 +4159,9 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
dyn_cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal =
CGF.EmitOMPArraySectionExpr(ASE, /*IsLowerBound=*/false);
- llvm::Value *UpAddr =
- CGF.Builder.CreateConstGEP1_32(UpAddrLVal.getPointer(CGF), /*Idx0=*/1);
+ Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
+ llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
+ UpAddrAddress.getElementType(), UpAddrAddress.getPointer(), /*Idx0=*/1);
llvm::Value *LowIntPtr = CGF.Builder.CreatePtrToInt(Addr, CGF.SizeTy);
llvm::Value *UpIntPtr = CGF.Builder.CreatePtrToInt(UpAddr, CGF.SizeTy);
SizeVal = CGF.Builder.CreateNUWSub(UpIntPtr, LowIntPtr);
@@ -4249,7 +4294,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
// kmp_routine_entry_t *task_entry);
// Task flags. Format is taken from
- // https://github.com/llvm/llvm-project/blob/master/openmp/runtime/src/kmp.h,
+ // https://github.com/llvm/llvm-project/blob/main/openmp/runtime/src/kmp.h,
// description of kmp_tasking_flags struct.
enum {
TiedFlag = 0x1,
@@ -4358,7 +4403,7 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
OpaqueValueExpr OVE(
Loc,
C.getIntTypeForBitwidth(C.getTypeSize(C.getSizeType()), /*Signed=*/0),
- VK_RValue);
+ VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
RValue::get(NumOfElements));
KmpTaskAffinityInfoArrayTy =
@@ -4431,7 +4476,8 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
std::tie(Addr, Size) = getPointerAndSize(CGF, E);
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
LValue Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(AffinitiesArray.getPointer(), Idx),
+ Address(CGF.Builder.CreateGEP(AffinitiesArray.getElementType(),
+ AffinitiesArray.getPointer(), Idx),
AffinitiesArray.getAlignment()),
KmpTaskAffinityInfoTy);
// affs[i].base_addr = &<Affinities[i].second>;
@@ -4598,7 +4644,7 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getPointer(),
+ Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
@@ -4638,7 +4684,8 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
- Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Idx),
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
+ DependenciesArray.getPointer(), Idx),
DependenciesArray.getAlignment()),
KmpDependInfoTy);
}
@@ -4698,7 +4745,7 @@ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getPointer(),
+ Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
@@ -4759,7 +4806,7 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
// Get number of elements in a single depobj.
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getPointer(),
+ Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
Address(DepObjAddr, Addr.getAlignment()), KmpDependInfoTy,
@@ -4776,7 +4823,8 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Address DepAddr =
- Address(CGF.Builder.CreateGEP(DependenciesArray.getPointer(), Pos),
+ Address(CGF.Builder.CreateGEP(DependenciesArray.getElementType(),
+ DependenciesArray.getPointer(), Pos),
DependenciesArray.getAlignment());
CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
@@ -4852,7 +4900,7 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
}
OpaqueValueExpr OVE(Loc,
C.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/0),
- VK_RValue);
+ VK_PRValue);
CodeGenFunction::OpaqueValueMapping OpaqueMap(CGF, &OVE,
RValue::get(NumOfElements));
KmpDependInfoArrayTy =
@@ -5004,7 +5052,7 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
- Addr.getPointer(),
+ Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
DepObjAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(DepObjAddr,
CGF.VoidPtrTy);
@@ -5034,7 +5082,8 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
Address Begin = Base.getAddress(CGF);
// Cast from pointer to array type to pointer to single element.
- llvm::Value *End = CGF.Builder.CreateGEP(Begin.getPointer(), NumDeps);
+ llvm::Value *End = CGF.Builder.CreateGEP(
+ Begin.getElementType(), Begin.getPointer(), NumDeps);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.done");
@@ -5297,7 +5346,8 @@ static void EmitOMPAggregateReduction(
llvm::Value *RHSBegin = RHSAddr.getPointer();
llvm::Value *LHSBegin = LHSAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- llvm::Value *LHSEnd = CGF.Builder.CreateGEP(LHSBegin, NumElements);
+ llvm::Value *LHSEnd =
+ CGF.Builder.CreateGEP(LHSAddr.getElementType(), LHSBegin, NumElements);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = CGF.createBasicBlock("omp.arraycpy.body");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("omp.arraycpy.done");
@@ -5335,9 +5385,11 @@ static void EmitOMPAggregateReduction(
// Shift the address forward by one element.
llvm::Value *LHSElementNext = CGF.Builder.CreateConstGEP1_32(
- LHSElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ LHSAddr.getElementType(), LHSElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.dest.element");
llvm::Value *RHSElementNext = CGF.Builder.CreateConstGEP1_32(
- RHSElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
+ RHSAddr.getElementType(), RHSElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.src.element");
// Check whether we've reached the end.
llvm::Value *Done =
CGF.Builder.CreateICmpEQ(LHSElementNext, LHSEnd, "omp.arraycpy.done");
@@ -6214,7 +6266,10 @@ void CGOpenMPRuntime::emitInlinedDirective(CodeGenFunction &CGF,
bool HasCancel) {
if (!CGF.HaveInsertPoint())
return;
- InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel);
+ InlinedOpenMPRegionRAII Region(CGF, CodeGen, InnerKind, HasCancel,
+ InnerKind != OMPD_critical &&
+ InnerKind != OMPD_master &&
+ InnerKind != OMPD_masked);
CGF.CapturedStmtInfo->EmitBody(CGF, /*S=*/nullptr);
}
@@ -6264,6 +6319,7 @@ void CGOpenMPRuntime::emitCancellationPointCall(
CGM.getModule(), OMPRTL___kmpc_cancellationpoint),
Args);
// if (__kmpc_cancellationpoint()) {
+ // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
// exit from construct;
// }
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
@@ -6271,6 +6327,8 @@ void CGOpenMPRuntime::emitCancellationPointCall(
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
+ if (CancelRegion == OMPD_parallel)
+ emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
// exit from construct;
CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
@@ -6300,6 +6358,7 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *Result = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(M, OMPRTL___kmpc_cancel), Args);
// if (__kmpc_cancel()) {
+ // call i32 @__kmpc_cancel_barrier( // for parallel cancellation only
// exit from construct;
// }
llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".cancel.exit");
@@ -6307,6 +6366,8 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *Cmp = CGF.Builder.CreateIsNotNull(Result);
CGF.Builder.CreateCondBr(Cmp, ExitBB, ContBB);
CGF.EmitBlock(ExitBB);
+ if (CancelRegion == OMPD_parallel)
+ RT.emitBarrierCall(CGF, Loc, OMPD_unknown, /*EmitChecks=*/false);
// exit from construct;
CodeGenFunction::JumpDest CancelDest =
CGF.getOMPCancelDestination(OMPRegionInfo->getDirectiveKind());
@@ -6490,6 +6551,20 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
+
+ // Add NumTeams and ThreadLimit attributes to the outlined GPU function
+ int32_t DefaultValTeams = -1;
+ getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
+ if (DefaultValTeams > 0) {
+ OutlinedFn->addFnAttr("omp_target_num_teams",
+ std::to_string(DefaultValTeams));
+ }
+ int32_t DefaultValThreads = -1;
+ getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
+ if (DefaultValThreads > 0) {
+ OutlinedFn->addFnAttr("omp_target_thread_limit",
+ std::to_string(DefaultValThreads));
+ }
}
/// Checks if the expression is constant or does not have non-trivial function
@@ -6518,7 +6593,7 @@ const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
continue;
// Analyze declarations.
if (const auto *DS = dyn_cast<DeclStmt>(S)) {
- if (llvm::all_of(DS->decls(), [&Ctx](const Decl *D) {
+ if (llvm::all_of(DS->decls(), [](const Decl *D) {
if (isa<EmptyDecl>(D) || isa<DeclContext>(D) ||
isa<TypeDecl>(D) || isa<PragmaCommentDecl>(D) ||
isa<PragmaDetectMismatchDecl>(D) || isa<UsingDecl>(D) ||
@@ -6529,10 +6604,7 @@ const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
const auto *VD = dyn_cast<VarDecl>(D);
if (!VD)
return false;
- return VD->isConstexpr() ||
- ((VD->getType().isTrivialType(Ctx) ||
- VD->getType()->isReferenceType()) &&
- (!VD->hasInit() || isTrivial(Ctx, VD->getInit())));
+ return VD->hasGlobalStorage() || !VD->isUsed();
}))
continue;
}
@@ -6547,24 +6619,13 @@ const Stmt *CGOpenMPRuntime::getSingleCompoundChild(ASTContext &Ctx,
return Child;
}
-/// Emit the number of teams for a target directive. Inspect the num_teams
-/// clause associated with a teams construct combined or closely nested
-/// with the target directive.
-///
-/// Emit a team of size one for directives such as 'target parallel' that
-/// have no associated teams construct.
-///
-/// Otherwise, return nullptr.
-static llvm::Value *
-emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D) {
- assert(!CGF.getLangOpts().OpenMPIsDevice &&
- "Clauses associated with the teams directive expected to be emitted "
- "only for the host!");
+const Expr *CGOpenMPRuntime::getNumTeamsExprForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ int32_t &DefaultVal) {
+
OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
"Expected target-based executable directive.");
- CGBuilderTy &Bld = CGF.Builder;
switch (DirectiveKind) {
case OMPD_target: {
const auto *CS = D.getInnermostCapturedStmt();
@@ -6576,23 +6637,27 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
dyn_cast_or_null<OMPExecutableDirective>(ChildStmt)) {
if (isOpenMPTeamsDirective(NestedDir->getDirectiveKind())) {
if (NestedDir->hasClausesOfKind<OMPNumTeamsClause>()) {
- CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
const Expr *NumTeams =
NestedDir->getSingleClause<OMPNumTeamsClause>()->getNumTeams();
- llvm::Value *NumTeamsVal =
- CGF.EmitScalarExpr(NumTeams,
- /*IgnoreResultAssign*/ true);
- return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
- /*isSigned=*/true);
+ if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
+ if (auto Constant =
+ NumTeams->getIntegerConstantExpr(CGF.getContext()))
+ DefaultVal = Constant->getExtValue();
+ return NumTeams;
}
- return Bld.getInt32(0);
+ DefaultVal = 0;
+ return nullptr;
}
if (isOpenMPParallelDirective(NestedDir->getDirectiveKind()) ||
- isOpenMPSimdDirective(NestedDir->getDirectiveKind()))
- return Bld.getInt32(1);
- return Bld.getInt32(0);
+ isOpenMPSimdDirective(NestedDir->getDirectiveKind())) {
+ DefaultVal = 1;
+ return nullptr;
+ }
+ DefaultVal = 1;
+ return nullptr;
}
+ // A value of -1 is used to check if we need to emit no teams region
+ DefaultVal = -1;
return nullptr;
}
case OMPD_target_teams:
@@ -6601,22 +6666,22 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd: {
if (D.hasClausesOfKind<OMPNumTeamsClause>()) {
- CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
const Expr *NumTeams =
D.getSingleClause<OMPNumTeamsClause>()->getNumTeams();
- llvm::Value *NumTeamsVal =
- CGF.EmitScalarExpr(NumTeams,
- /*IgnoreResultAssign*/ true);
- return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
- /*isSigned=*/true);
+ if (NumTeams->isIntegerConstantExpr(CGF.getContext()))
+ if (auto Constant = NumTeams->getIntegerConstantExpr(CGF.getContext()))
+ DefaultVal = Constant->getExtValue();
+ return NumTeams;
}
- return Bld.getInt32(0);
+ DefaultVal = 0;
+ return nullptr;
}
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
case OMPD_target_simd:
- return Bld.getInt32(1);
+ DefaultVal = 1;
+ return nullptr;
case OMPD_parallel:
case OMPD_for:
case OMPD_parallel_for:
@@ -6631,6 +6696,8 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
@@ -6680,6 +6747,48 @@ emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
llvm_unreachable("Unexpected directive kind.");
}
+llvm::Value *CGOpenMPRuntime::emitNumTeamsForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) {
+ assert(!CGF.getLangOpts().OpenMPIsDevice &&
+ "Clauses associated with the teams directive expected to be emitted "
+ "only for the host!");
+ CGBuilderTy &Bld = CGF.Builder;
+ int32_t DefaultNT = -1;
+ const Expr *NumTeams = getNumTeamsExprForTargetDirective(CGF, D, DefaultNT);
+ if (NumTeams != nullptr) {
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+
+ switch (DirectiveKind) {
+ case OMPD_target: {
+ const auto *CS = D.getInnermostCapturedStmt();
+ CGOpenMPInnerExprInfo CGInfo(CGF, *CS);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
+ llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
+ /*IgnoreResultAssign*/ true);
+ return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
+ /*isSigned=*/true);
+ }
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
+ CodeGenFunction::RunCleanupsScope NumTeamsScope(CGF);
+ llvm::Value *NumTeamsVal = CGF.EmitScalarExpr(NumTeams,
+ /*IgnoreResultAssign*/ true);
+ return Bld.CreateIntCast(NumTeamsVal, CGF.Int32Ty,
+ /*isSigned=*/true);
+ }
+ default:
+ break;
+ }
+ } else if (DefaultNT == -1) {
+ return nullptr;
+ }
+
+ return Bld.getInt32(DefaultNT);
+}
+
static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
llvm::Value *DefaultThreadLimitVal) {
const Stmt *Child = CGOpenMPRuntime::getSingleCompoundChild(
@@ -6772,17 +6881,130 @@ static llvm::Value *getNumThreads(CodeGenFunction &CGF, const CapturedStmt *CS,
: CGF.Builder.getInt32(0);
}
-/// Emit the number of threads for a target directive. Inspect the
-/// thread_limit clause associated with a teams construct combined or closely
-/// nested with the target directive.
-///
-/// Emit the num_threads clause for directives such as 'target parallel' that
-/// have no associated teams construct.
-///
-/// Otherwise, return nullptr.
-static llvm::Value *
-emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
- const OMPExecutableDirective &D) {
+const Expr *CGOpenMPRuntime::getNumThreadsExprForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D,
+ int32_t &DefaultVal) {
+ OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
+ assert(isOpenMPTargetExecutionDirective(DirectiveKind) &&
+ "Expected target-based executable directive.");
+
+ switch (DirectiveKind) {
+ case OMPD_target:
+ // Teams have no clause thread_limit
+ return nullptr;
+ case OMPD_target_teams:
+ case OMPD_target_teams_distribute:
+ if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
+ const Expr *ThreadLimit = ThreadLimitClause->getThreadLimit();
+ if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
+ if (auto Constant =
+ ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
+ DefaultVal = Constant->getExtValue();
+ return ThreadLimit;
+ }
+ return nullptr;
+ case OMPD_target_parallel:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd: {
+ Expr *ThreadLimit = nullptr;
+ Expr *NumThreads = nullptr;
+ if (D.hasClausesOfKind<OMPThreadLimitClause>()) {
+ const auto *ThreadLimitClause = D.getSingleClause<OMPThreadLimitClause>();
+ ThreadLimit = ThreadLimitClause->getThreadLimit();
+ if (ThreadLimit->isIntegerConstantExpr(CGF.getContext()))
+ if (auto Constant =
+ ThreadLimit->getIntegerConstantExpr(CGF.getContext()))
+ DefaultVal = Constant->getExtValue();
+ }
+ if (D.hasClausesOfKind<OMPNumThreadsClause>()) {
+ const auto *NumThreadsClause = D.getSingleClause<OMPNumThreadsClause>();
+ NumThreads = NumThreadsClause->getNumThreads();
+ if (NumThreads->isIntegerConstantExpr(CGF.getContext())) {
+ if (auto Constant =
+ NumThreads->getIntegerConstantExpr(CGF.getContext())) {
+ if (Constant->getExtValue() < DefaultVal) {
+ DefaultVal = Constant->getExtValue();
+ ThreadLimit = NumThreads;
+ }
+ }
+ }
+ }
+ return ThreadLimit;
+ }
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_target_simd:
+ DefaultVal = 1;
+ return nullptr;
+ case OMPD_parallel:
+ case OMPD_for:
+ case OMPD_parallel_for:
+ case OMPD_parallel_master:
+ case OMPD_parallel_sections:
+ case OMPD_for_simd:
+ case OMPD_parallel_for_simd:
+ case OMPD_cancel:
+ case OMPD_cancellation_point:
+ case OMPD_ordered:
+ case OMPD_threadprivate:
+ case OMPD_allocate:
+ case OMPD_task:
+ case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_taskgroup:
+ case OMPD_atomic:
+ case OMPD_flush:
+ case OMPD_depobj:
+ case OMPD_scan:
+ case OMPD_teams:
+ case OMPD_target_data:
+ case OMPD_target_exit_data:
+ case OMPD_target_enter_data:
+ case OMPD_distribute:
+ case OMPD_distribute_simd:
+ case OMPD_distribute_parallel_for:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_target_update:
+ case OMPD_declare_simd:
+ case OMPD_declare_variant:
+ case OMPD_begin_declare_variant:
+ case OMPD_end_declare_variant:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_declare_reduction:
+ case OMPD_declare_mapper:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_master_taskloop:
+ case OMPD_master_taskloop_simd:
+ case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_master_taskloop_simd:
+ case OMPD_requires:
+ case OMPD_unknown:
+ break;
+ default:
+ break;
+ }
+ llvm_unreachable("Unsupported directive kind.");
+}
+
+llvm::Value *CGOpenMPRuntime::emitNumThreadsForTargetDirective(
+ CodeGenFunction &CGF, const OMPExecutableDirective &D) {
assert(!CGF.getLangOpts().OpenMPIsDevice &&
"Clauses associated with the teams directive expected to be emitted "
"only for the host!");
@@ -6948,6 +7170,8 @@ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
@@ -7149,12 +7373,15 @@ public:
/// [ValueDecl *] --> {LE(FieldIndex, Pointer),
/// HE(FieldIndex, Pointer)}
struct StructRangeInfoTy {
+ MapCombinedInfoTy PreliminaryMapData;
std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> LowestElem = {
0, Address::invalid()};
std::pair<unsigned /*FieldIndex*/, Address /*Pointer*/> HighestElem = {
0, Address::invalid()};
Address Base = Address::invalid();
+ Address LB = Address::invalid();
bool IsArraySection = false;
+ bool HasCompleteRecord = false;
};
private:
@@ -7337,11 +7564,10 @@ private:
if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_close)
!= MapModifiers.end())
Bits |= OMP_MAP_CLOSE;
- if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present)
- != MapModifiers.end())
- Bits |= OMP_MAP_PRESENT;
- if (llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present)
- != MotionModifiers.end())
+ if (llvm::find(MapModifiers, OMPC_MAP_MODIFIER_present) !=
+ MapModifiers.end() ||
+ llvm::find(MotionModifiers, OMPC_MOTION_MODIFIER_present) !=
+ MotionModifiers.end())
Bits |= OMP_MAP_PRESENT;
if (IsNonContiguous)
Bits |= OMP_MAP_NON_CONTIG;
@@ -7421,6 +7647,7 @@ private:
// S1 s;
// double *p;
// struct S2 *ps;
+ // int &ref;
// }
// S2 s;
// S2 *ps;
@@ -7464,6 +7691,14 @@ private:
// optimizes this entry out, same in the examples below)
// (***) map the pointee (map: to)
//
+ // map(to: s.ref)
+ // &s, &(s.ref), sizeof(int*), TARGET_PARAM (*)
+ // &s, &(s.ref), sizeof(int), MEMBER_OF(1) | PTR_AND_OBJ | TO (***)
+ // (*) alloc space for struct members, only this is a target parameter
+ // (**) map the pointer (nothing to be mapped in this example) (the compiler
+ // optimizes this entry out, same in the examples below)
+ // (***) map the pointee (map: to)
+ //
// map(s.ps)
// &s, &(s.ps), sizeof(S2*), TARGET_PARAM | TO | FROM
//
@@ -7661,6 +7896,7 @@ private:
uint64_t DimSize = 1;
bool IsNonContiguous = CombinedInfo.NonContigInfo.IsNonContiguous;
+ bool IsPrevMemberReference = false;
for (; I != CE; ++I) {
// If the current component is member of a struct (parent struct) mark it.
@@ -7702,6 +7938,8 @@ private:
const ValueDecl *MapDecl = (I->getAssociatedDeclaration())
? I->getAssociatedDeclaration()
: BaseDecl;
+ MapExpr = (I->getAssociatedExpression()) ? I->getAssociatedExpression()
+ : MapExpr;
// Get information on whether the element is a pointer. Have to do a
// special treatment for array sections given that they are built-in
@@ -7718,12 +7956,16 @@ private:
.getCanonicalType()
->isAnyPointerType()) ||
I->getAssociatedExpression()->getType()->isAnyPointerType();
+ bool IsMemberReference = isa<MemberExpr>(I->getAssociatedExpression()) &&
+ MapDecl &&
+ MapDecl->getType()->isLValueReferenceType();
bool IsNonDerefPointer = IsPointer && !UO && !BO && !IsNonContiguous;
if (OASE)
++DimSize;
- if (Next == CE || IsNonDerefPointer || IsFinalArraySection) {
+ if (Next == CE || IsMemberReference || IsNonDerefPointer ||
+ IsFinalArraySection) {
// If this is not the last component, we expect the pointer to be
// associated with an array expression or member expression.
assert((Next == CE ||
@@ -7736,36 +7978,65 @@ private:
"Unexpected expression");
Address LB = Address::invalid();
+ Address LowestElem = Address::invalid();
+ auto &&EmitMemberExprBase = [](CodeGenFunction &CGF,
+ const MemberExpr *E) {
+ const Expr *BaseExpr = E->getBase();
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a
+ // scalar.
+ LValue BaseLV;
+ if (E->isArrow()) {
+ LValueBaseInfo BaseInfo;
+ TBAAAccessInfo TBAAInfo;
+ Address Addr =
+ CGF.EmitPointerWithAlignment(BaseExpr, &BaseInfo, &TBAAInfo);
+ QualType PtrTy = BaseExpr->getType()->getPointeeType();
+ BaseLV = CGF.MakeAddrLValue(Addr, PtrTy, BaseInfo, TBAAInfo);
+ } else {
+ BaseLV = CGF.EmitOMPSharedLValue(BaseExpr);
+ }
+ return BaseLV;
+ };
if (OAShE) {
- LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
- CGF.getContext().getTypeAlignInChars(
- OAShE->getBase()->getType()));
- } else {
- LB = CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
+ LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.getContext().getTypeAlignInChars(
+ OAShE->getBase()->getType()));
+ } else if (IsMemberReference) {
+ const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
+ LValue BaseLVal = EmitMemberExprBase(CGF, ME);
+ LowestElem = CGF.EmitLValueForFieldInitialization(
+ BaseLVal, cast<FieldDecl>(MapDecl))
+ .getAddress(CGF);
+ LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
.getAddress(CGF);
+ } else {
+ LowestElem = LB =
+ CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
+ .getAddress(CGF);
}
// If this component is a pointer inside the base struct then we don't
// need to create any entry for it - it will be combined with the object
// it is pointing to into a single PTR_AND_OBJ entry.
bool IsMemberPointerOrAddr =
- (IsPointer || ForDeviceAddr) && EncounteredME &&
- (dyn_cast<MemberExpr>(I->getAssociatedExpression()) ==
- EncounteredME);
- if (!OverlappedElements.empty()) {
+ EncounteredME &&
+ (((IsPointer || ForDeviceAddr) &&
+ I->getAssociatedExpression() == EncounteredME) ||
+ (IsPrevMemberReference && !IsPointer) ||
+ (IsMemberReference && Next != CE &&
+ !Next->getAssociatedExpression()->getType()->isPointerType()));
+ if (!OverlappedElements.empty() && Next == CE) {
// Handle base element with the info for overlapped elements.
assert(!PartialStruct.Base.isValid() && "The base element is set.");
- assert(Next == CE &&
- "Expected last element for the overlapped elements.");
assert(!IsPointer &&
"Unexpected base element with the pointer type.");
// Mark the whole struct as the struct that requires allocation on the
// device.
- PartialStruct.LowestElem = {0, LB};
+ PartialStruct.LowestElem = {0, LowestElem};
CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
I->getAssociatedExpression()->getType());
Address HB = CGF.Builder.CreateConstGEP(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LB,
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem,
CGF.VoidPtrTy),
TypeSize.getQuantity() - 1);
PartialStruct.HighestElem = {
@@ -7773,13 +8044,17 @@ private:
PartialStruct.HighestElem.first)>::max(),
HB};
PartialStruct.Base = BP;
+ PartialStruct.LB = LB;
+ assert(
+ PartialStruct.PreliminaryMapData.BasePointers.empty() &&
+ "Overlapped elements must be used only once for the variable.");
+ std::swap(PartialStruct.PreliminaryMapData, CombinedInfo);
// Emit data for non-overlapped data.
OpenMPOffloadMappingFlags Flags =
OMP_MAP_MEMBER_OF |
getMapTypeBits(MapType, MapModifiers, MotionModifiers, IsImplicit,
/*AddPtrFlag=*/false,
/*AddIsTargetParamFlag=*/false, IsNonContiguous);
- LB = BP;
llvm::Value *Size = nullptr;
// Do bitcopy of all non-overlapped structure elements.
for (OMPClauseMappableExprCommon::MappableExprComponentListRef
@@ -7787,10 +8062,20 @@ private:
Address ComponentLB = Address::invalid();
for (const OMPClauseMappableExprCommon::MappableComponent &MC :
Component) {
- if (MC.getAssociatedDeclaration()) {
- ComponentLB =
- CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress(CGF);
+ if (const ValueDecl *VD = MC.getAssociatedDeclaration()) {
+ const auto *FD = dyn_cast<FieldDecl>(VD);
+ if (FD && FD->getType()->isLValueReferenceType()) {
+ const auto *ME =
+ cast<MemberExpr>(MC.getAssociatedExpression());
+ LValue BaseLVal = EmitMemberExprBase(CGF, ME);
+ ComponentLB =
+ CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
+ .getAddress(CGF);
+ } else {
+ ComponentLB =
+ CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
+ .getAddress(CGF);
+ }
Size = CGF.Builder.CreatePtrDiff(
CGF.EmitCastToVoidPtr(ComponentLB.getPointer()),
CGF.EmitCastToVoidPtr(LB.getPointer()));
@@ -7813,8 +8098,7 @@ private:
CombinedInfo.BasePointers.push_back(BP.getPointer());
CombinedInfo.Pointers.push_back(LB.getPointer());
Size = CGF.Builder.CreatePtrDiff(
- CGF.EmitCastToVoidPtr(
- CGF.Builder.CreateConstGEP(HB, 1).getPointer()),
+ CGF.Builder.CreateConstGEP(HB, 1).getPointer(),
CGF.EmitCastToVoidPtr(LB.getPointer()));
CombinedInfo.Sizes.push_back(
CGF.Builder.CreateIntCast(Size, CGF.Int64Ty, /*isSigned=*/true));
@@ -7846,13 +8130,13 @@ private:
OpenMPOffloadMappingFlags Flags = getMapTypeBits(
MapType, MapModifiers, MotionModifiers, IsImplicit,
!IsExpressionFirstInfo || RequiresReference ||
- FirstPointerInComplexData,
+ FirstPointerInComplexData || IsMemberReference,
IsCaptureFirstInfo && !RequiresReference, IsNonContiguous);
- if (!IsExpressionFirstInfo) {
+ if (!IsExpressionFirstInfo || IsMemberReference) {
// If we have a PTR_AND_OBJ pair where the OBJ is a pointer as well,
// then we reset the TO/FROM/ALWAYS/DELETE/CLOSE flags.
- if (IsPointer)
+ if (IsPointer || (IsMemberReference && Next != CE))
Flags &= ~(OMP_MAP_TO | OMP_MAP_FROM | OMP_MAP_ALWAYS |
OMP_MAP_DELETE | OMP_MAP_CLOSE);
@@ -7878,20 +8162,21 @@ private:
// Update info about the lowest and highest elements for this struct
if (!PartialStruct.Base.isValid()) {
- PartialStruct.LowestElem = {FieldIndex, LB};
+ PartialStruct.LowestElem = {FieldIndex, LowestElem};
if (IsFinalArraySection) {
Address HB =
CGF.EmitOMPArraySectionExpr(OASE, /*IsLowerBound=*/false)
.getAddress(CGF);
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
- PartialStruct.HighestElem = {FieldIndex, LB};
+ PartialStruct.HighestElem = {FieldIndex, LowestElem};
}
PartialStruct.Base = BP;
+ PartialStruct.LB = BP;
} else if (FieldIndex < PartialStruct.LowestElem.first) {
- PartialStruct.LowestElem = {FieldIndex, LB};
+ PartialStruct.LowestElem = {FieldIndex, LowestElem};
} else if (FieldIndex > PartialStruct.HighestElem.first) {
- PartialStruct.HighestElem = {FieldIndex, LB};
+ PartialStruct.HighestElem = {FieldIndex, LowestElem};
}
}
@@ -7905,11 +8190,12 @@ private:
// The pointer becomes the base for the next element.
if (Next != CE)
- BP = LB;
+ BP = IsMemberReference ? LowestElem : LB;
IsExpressionFirstInfo = false;
IsCaptureFirstInfo = false;
FirstPointerInComplexData = false;
+ IsPrevMemberReference = IsMemberReference;
} else if (FirstPointerInComplexData) {
QualType Ty = Components.rbegin()
->getAssociatedDeclaration()
@@ -7919,6 +8205,10 @@ private:
FirstPointerInComplexData = false;
}
}
+ // If ran into the whole component - allocate the space for the whole
+ // record.
+ if (!EncounteredME)
+ PartialStruct.HasCompleteRecord = true;
if (!IsNonContiguous)
return;
@@ -8108,10 +8398,6 @@ private:
// 'private ptr' and 'map to' flag. Return the right flags if the captured
// declaration is known as first-private in this handler.
if (FirstPrivateDecls.count(Cap.getCapturedVar())) {
- if (Cap.getCapturedVar()->getType().isConstant(CGF.getContext()) &&
- Cap.getCaptureKind() == CapturedStmt::VCK_ByRef)
- return MappableExprsHandler::OMP_MAP_ALWAYS |
- MappableExprsHandler::OMP_MAP_TO;
if (Cap.getCapturedVar()->getType()->isAnyPointerType())
return MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_PTR_AND_OBJ;
@@ -8203,151 +8489,100 @@ private:
}
}
-public:
- MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {
- // Extract firstprivate clause information.
- for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
- for (const auto *D : C->varlists())
- FirstPrivateDecls.try_emplace(
- cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
- // Extract implicit firstprivates from uses_allocators clauses.
- for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
- for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
- OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
- if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
- FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
- /*Implicit=*/true);
- else if (const auto *VD = dyn_cast<VarDecl>(
- cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
- ->getDecl()))
- FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
- }
- }
- // Extract device pointer clause information.
- for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
- for (auto L : C->component_lists())
- DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
- }
-
- /// Constructor for the declare mapper directive.
- MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
- : CurDir(&Dir), CGF(CGF) {}
-
- /// Generate code for the combined entry if we have a partially mapped struct
- /// and take care of the mapping flags of the arguments corresponding to
- /// individual struct members.
- void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
- MapFlagsArrayTy &CurTypes,
- const StructRangeInfoTy &PartialStruct,
- const ValueDecl *VD = nullptr,
- bool NotTargetParams = true) const {
- if (CurTypes.size() == 1 &&
- ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
- !PartialStruct.IsArraySection)
- return;
- CombinedInfo.Exprs.push_back(VD);
- // Base is the base of the struct
- CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
- // Pointer is the address of the lowest element
- llvm::Value *LB = PartialStruct.LowestElem.second.getPointer();
- CombinedInfo.Pointers.push_back(LB);
- // There should not be a mapper for a combined entry.
- CombinedInfo.Mappers.push_back(nullptr);
- // Size is (addr of {highest+1} element) - (addr of lowest element)
- llvm::Value *HB = PartialStruct.HighestElem.second.getPointer();
- llvm::Value *HAddr = CGF.Builder.CreateConstGEP1_32(HB, /*Idx0=*/1);
- llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
- llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
- llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
- llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
- /*isSigned=*/false);
- CombinedInfo.Sizes.push_back(Size);
- // Map type is always TARGET_PARAM, if generate info for captures.
- CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
- : OMP_MAP_TARGET_PARAM);
- // If any element has the present modifier, then make sure the runtime
- // doesn't attempt to allocate the struct.
- if (CurTypes.end() !=
- llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
- return Type & OMP_MAP_PRESENT;
- }))
- CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
- // Remove TARGET_PARAM flag from the first element if any.
- if (!CurTypes.empty())
- CurTypes.front() &= ~OMP_MAP_TARGET_PARAM;
-
- // All other current entries will be MEMBER_OF the combined entry
- // (except for PTR_AND_OBJ entries which do not have a placeholder value
- // 0xFFFF in the MEMBER_OF field).
- OpenMPOffloadMappingFlags MemberOfFlag =
- getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
- for (auto &M : CurTypes)
- setCorrectMemberOfFlag(M, MemberOfFlag);
- }
-
/// Generate all the base pointers, section pointers, sizes, map types, and
/// mappers for the extracted mappable expressions (all included in \a
/// CombinedInfo). Also, for each item that relates with a device pointer, a
/// pair of the relevant declaration and index where it occurs is appended to
/// the device pointers info array.
- void generateAllInfo(
- MapCombinedInfoTy &CombinedInfo,
+ void generateAllInfoForClauses(
+ ArrayRef<const OMPClause *> Clauses, MapCombinedInfoTy &CombinedInfo,
const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
// We have to process the component lists that relate with the same
// declaration in a single chunk so that we can generate the map flags
// correctly. Therefore, we organize all lists in a map.
- llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
+ enum MapKind { Present, Allocs, Other, Total };
+ llvm::MapVector<CanonicalDeclPtr<const Decl>,
+ SmallVector<SmallVector<MapInfo, 8>, 4>>
+ Info;
// Helper function to fill the information map for the different supported
// clauses.
auto &&InfoGen =
[&Info, &SkipVarSet](
- const ValueDecl *D,
+ const ValueDecl *D, MapKind Kind,
OMPClauseMappableExprCommon::MappableExprComponentListRef L,
OpenMPMapClauseKind MapType,
ArrayRef<OpenMPMapModifierKind> MapModifiers,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
bool ReturnDevicePointer, bool IsImplicit, const ValueDecl *Mapper,
const Expr *VarRef = nullptr, bool ForDeviceAddr = false) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- if (SkipVarSet.count(VD))
+ if (SkipVarSet.contains(D))
return;
- Info[VD].emplace_back(L, MapType, MapModifiers, MotionModifiers,
- ReturnDevicePointer, IsImplicit, Mapper, VarRef,
- ForDeviceAddr);
+ auto It = Info.find(D);
+ if (It == Info.end())
+ It = Info
+ .insert(std::make_pair(
+ D, SmallVector<SmallVector<MapInfo, 8>, 4>(Total)))
+ .first;
+ It->second[Kind].emplace_back(
+ L, MapType, MapModifiers, MotionModifiers, ReturnDevicePointer,
+ IsImplicit, Mapper, VarRef, ForDeviceAddr);
};
- assert(CurDir.is<const OMPExecutableDirective *>() &&
- "Expect a executable directive");
- const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
- for (const auto *C : CurExecDir->getClausesOfKind<OMPMapClause>()) {
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPMapClause>(Cl);
+ if (!C)
+ continue;
+ MapKind Kind = Other;
+ if (!C->getMapTypeModifiers().empty() &&
+ llvm::any_of(C->getMapTypeModifiers(), [](OpenMPMapModifierKind K) {
+ return K == OMPC_MAP_MODIFIER_present;
+ }))
+ Kind = Present;
+ else if (C->getMapType() == OMPC_MAP_alloc)
+ Kind = Allocs;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- // The Expression is not correct if the mapping is implicit
const Expr *E = (C->getMapLoc().isValid()) ? *EI : nullptr;
- InfoGen(std::get<0>(L), std::get<1>(L), C->getMapType(),
+ InfoGen(std::get<0>(L), Kind, std::get<1>(L), C->getMapType(),
C->getMapTypeModifiers(), llvm::None,
/*ReturnDevicePointer=*/false, C->isImplicit(), std::get<2>(L),
E);
++EI;
}
}
- for (const auto *C : CurExecDir->getClausesOfKind<OMPToClause>()) {
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPToClause>(Cl);
+ if (!C)
+ continue;
+ MapKind Kind = Other;
+ if (!C->getMotionModifiers().empty() &&
+ llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
+ return K == OMPC_MOTION_MODIFIER_present;
+ }))
+ Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_to, llvm::None,
+ InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_to, llvm::None,
C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
C->isImplicit(), std::get<2>(L), *EI);
++EI;
}
}
- for (const auto *C : CurExecDir->getClausesOfKind<OMPFromClause>()) {
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPFromClause>(Cl);
+ if (!C)
+ continue;
+ MapKind Kind = Other;
+ if (!C->getMotionModifiers().empty() &&
+ llvm::any_of(C->getMotionModifiers(), [](OpenMPMotionModifierKind K) {
+ return K == OMPC_MOTION_MODIFIER_present;
+ }))
+ Kind = Present;
const auto *EI = C->getVarRefs().begin();
for (const auto L : C->component_lists()) {
- InfoGen(std::get<0>(L), std::get<1>(L), OMPC_MAP_from, llvm::None,
+ InfoGen(std::get<0>(L), Kind, std::get<1>(L), OMPC_MAP_from, llvm::None,
C->getMotionModifiers(), /*ReturnDevicePointer=*/false,
C->isImplicit(), std::get<2>(L), *EI);
++EI;
@@ -8360,12 +8595,15 @@ public:
// section. It is the user fault if that was not mapped before. If there is
// no map information and the pointer is a struct member, then we defer the
// emission of that entry until the whole struct has been processed.
- llvm::MapVector<const ValueDecl *, SmallVector<DeferredDevicePtrEntryTy, 4>>
+ llvm::MapVector<CanonicalDeclPtr<const Decl>,
+ SmallVector<DeferredDevicePtrEntryTy, 4>>
DeferredInfo;
MapCombinedInfoTy UseDevicePtrCombinedInfo;
- for (const auto *C :
- CurExecDir->getClausesOfKind<OMPUseDevicePtrClause>()) {
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPUseDevicePtrClause>(Cl);
+ if (!C)
+ continue;
for (const auto L : C->component_lists()) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components =
std::get<1>(L);
@@ -8382,28 +8620,34 @@ public:
// We potentially have map information for this declaration already.
// Look for the first set of components that refer to it.
if (It != Info.end()) {
- auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
- // If we found a map entry, signal that the pointer has to be returned
- // and move on to the next declaration.
- // Exclude cases where the base pointer is mapped as array subscript,
- // array section or array shaping. The base address is passed as a
- // pointer to base in this case and cannot be used as a base for
- // use_device_ptr list item.
- if (CI != It->second.end()) {
- auto PrevCI = std::next(CI->Components.rbegin());
- const auto *VarD = dyn_cast<VarDecl>(VD);
- if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
- isa<MemberExpr>(IE) ||
- !VD->getType().getNonReferenceType()->isPointerType() ||
- PrevCI == CI->Components.rend() ||
- isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
- VarD->hasLocalStorage()) {
- CI->ReturnDevicePointer = true;
- continue;
+ bool Found = false;
+ for (auto &Data : It->second) {
+ auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be
+ // returned and move on to the next declaration. Exclude cases where
+ // the base pointer is mapped as array subscript, array section or
+ // array shaping. The base address is passed as a pointer to base in
+ // this case and cannot be used as a base for use_device_ptr list
+ // item.
+ if (CI != Data.end()) {
+ auto PrevCI = std::next(CI->Components.rbegin());
+ const auto *VarD = dyn_cast<VarDecl>(VD);
+ if (CGF.CGM.getOpenMPRuntime().hasRequiresUnifiedSharedMemory() ||
+ isa<MemberExpr>(IE) ||
+ !VD->getType().getNonReferenceType()->isPointerType() ||
+ PrevCI == CI->Components.rend() ||
+ isa<MemberExpr>(PrevCI->getAssociatedExpression()) || !VarD ||
+ VarD->hasLocalStorage()) {
+ CI->ReturnDevicePointer = true;
+ Found = true;
+ break;
+ }
}
}
+ if (Found)
+ continue;
}
// We didn't find any match in our map information - generate a zero
@@ -8417,8 +8661,9 @@ public:
// Nonetheless, generateInfoForComponentList must be called to take
// the pointer into account for the calculation of the range of the
// partial struct.
- InfoGen(nullptr, Components, OMPC_MAP_unknown, llvm::None, llvm::None,
- /*ReturnDevicePointer=*/false, C->isImplicit(), nullptr);
+ InfoGen(nullptr, Other, Components, OMPC_MAP_unknown, llvm::None,
+ llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
+ nullptr);
DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/false);
} else {
llvm::Value *Ptr =
@@ -8441,8 +8686,10 @@ public:
// no map information and the pointer is a struct member, then we defer the
// emission of that entry until the whole struct has been processed.
llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
- for (const auto *C :
- CurExecDir->getClausesOfKind<OMPUseDeviceAddrClause>()) {
+ for (const auto *Cl : Clauses) {
+ const auto *C = dyn_cast<OMPUseDeviceAddrClause>(Cl);
+ if (!C)
+ continue;
for (const auto L : C->component_lists()) {
assert(!std::get<1>(L).empty() &&
"Not expecting empty list of components!");
@@ -8459,15 +8706,21 @@ public:
// We potentially have map information for this declaration already.
// Look for the first set of components that refer to it.
if (It != Info.end()) {
- auto *CI = llvm::find_if(It->second, [VD](const MapInfo &MI) {
- return MI.Components.back().getAssociatedDeclaration() == VD;
- });
- // If we found a map entry, signal that the pointer has to be returned
- // and move on to the next declaration.
- if (CI != It->second.end()) {
- CI->ReturnDevicePointer = true;
- continue;
+ bool Found = false;
+ for (auto &Data : It->second) {
+ auto *CI = llvm::find_if(Data, [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be
+ // returned and move on to the next declaration.
+ if (CI != Data.end()) {
+ CI->ReturnDevicePointer = true;
+ Found = true;
+ break;
+ }
}
+ if (Found)
+ continue;
}
// We didn't find any match in our map information - generate a zero
@@ -8481,7 +8734,7 @@ public:
// Nonetheless, generateInfoForComponentList must be called to take
// the pointer into account for the calculation of the range of the
// partial struct.
- InfoGen(nullptr, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
+ InfoGen(nullptr, Other, std::get<1>(L), OMPC_MAP_unknown, llvm::None,
llvm::None, /*ReturnDevicePointer=*/false, C->isImplicit(),
nullptr, nullptr, /*ForDeviceAddr=*/true);
DeferredInfo[nullptr].emplace_back(IE, VD, /*ForDeviceAddr=*/true);
@@ -8502,47 +8755,47 @@ public:
}
}
- for (const auto &M : Info) {
- // Underlying variable declaration used in the map clause.
- const ValueDecl *VD = std::get<0>(M);
-
+ for (const auto &Data : Info) {
+ StructRangeInfoTy PartialStruct;
// Temporary generated information.
MapCombinedInfoTy CurInfo;
- StructRangeInfoTy PartialStruct;
-
- for (const MapInfo &L : M.second) {
- assert(!L.Components.empty() &&
- "Not expecting declaration with no component lists.");
-
- // Remember the current base pointer index.
- unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
- CurInfo.NonContigInfo.IsNonContiguous =
- L.Components.back().isNonContiguous();
- generateInfoForComponentList(
- L.MapType, L.MapModifiers, L.MotionModifiers, L.Components, CurInfo,
- PartialStruct, /*IsFirstComponentList=*/false, L.IsImplicit,
- L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
-
- // If this entry relates with a device pointer, set the relevant
- // declaration and add the 'return pointer' flag.
- if (L.ReturnDevicePointer) {
- assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
- "Unexpected number of mapped base pointers.");
-
- const ValueDecl *RelevantVD =
- L.Components.back().getAssociatedDeclaration();
- assert(RelevantVD &&
- "No relevant declaration related with device pointer??");
-
- CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
- RelevantVD);
- CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
+ const Decl *D = Data.first;
+ const ValueDecl *VD = cast_or_null<ValueDecl>(D);
+ for (const auto &M : Data.second) {
+ for (const MapInfo &L : M) {
+ assert(!L.Components.empty() &&
+ "Not expecting declaration with no component lists.");
+
+ // Remember the current base pointer index.
+ unsigned CurrentBasePointersIdx = CurInfo.BasePointers.size();
+ CurInfo.NonContigInfo.IsNonContiguous =
+ L.Components.back().isNonContiguous();
+ generateInfoForComponentList(
+ L.MapType, L.MapModifiers, L.MotionModifiers, L.Components,
+ CurInfo, PartialStruct, /*IsFirstComponentList=*/false,
+ L.IsImplicit, L.Mapper, L.ForDeviceAddr, VD, L.VarRef);
+
+ // If this entry relates with a device pointer, set the relevant
+ // declaration and add the 'return pointer' flag.
+ if (L.ReturnDevicePointer) {
+ assert(CurInfo.BasePointers.size() > CurrentBasePointersIdx &&
+ "Unexpected number of mapped base pointers.");
+
+ const ValueDecl *RelevantVD =
+ L.Components.back().getAssociatedDeclaration();
+ assert(RelevantVD &&
+ "No relevant declaration related with device pointer??");
+
+ CurInfo.BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(
+ RelevantVD);
+ CurInfo.Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PARAM;
+ }
}
}
// Append any pending zero-length pointers which are struct members and
// used with use_device_ptr or use_device_addr.
- auto CI = DeferredInfo.find(M.first);
+ auto CI = DeferredInfo.find(Data.first);
if (CI != DeferredInfo.end()) {
for (const DeferredDevicePtrEntryTy &L : CI->second) {
llvm::Value *BasePtr;
@@ -8561,9 +8814,9 @@ public:
BasePtr = this->CGF.EmitLValue(L.IE).getPointer(CGF);
Ptr = this->CGF.EmitLoadOfScalar(this->CGF.EmitLValue(L.IE),
L.IE->getExprLoc());
- // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the placeholder
- // value MEMBER_OF=FFFF so that the entry is later updated with the
- // correct value of MEMBER_OF.
+ // Entry is PTR_AND_OBJ and RETURN_PARAM. Also, set the
+ // placeholder value MEMBER_OF=FFFF so that the entry is later
+ // updated with the correct value of MEMBER_OF.
CurInfo.Types.push_back(OMP_MAP_PTR_AND_OBJ | OMP_MAP_RETURN_PARAM |
OMP_MAP_MEMBER_OF);
}
@@ -8575,82 +8828,133 @@ public:
CurInfo.Mappers.push_back(nullptr);
}
}
-
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid())
+ if (PartialStruct.Base.isValid()) {
+ CurInfo.NonContigInfo.Dims.push_back(0);
emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
+ }
- // We need to append the results of this capture to what we already have.
+ // We need to append the results of this capture to what we already
+ // have.
CombinedInfo.append(CurInfo);
}
// Append data for use_device_ptr clauses.
CombinedInfo.append(UseDevicePtrCombinedInfo);
}
- /// Generate all the base pointers, section pointers, sizes, map types, and
- /// mappers for the extracted map clauses of user-defined mapper (all included
- /// in \a CombinedInfo).
- void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
- assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
- "Expect a declare mapper directive");
- const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
- // We have to process the component lists that relate with the same
- // declaration in a single chunk so that we can generate the map flags
- // correctly. Therefore, we organize all lists in a map.
- llvm::MapVector<const ValueDecl *, SmallVector<MapInfo, 8>> Info;
-
- // Fill the information map for map clauses.
- for (const auto *C : CurMapperDir->clauselists()) {
- const auto *MC = cast<OMPMapClause>(C);
- const auto *EI = MC->getVarRefs().begin();
- for (const auto L : MC->component_lists()) {
- // The Expression is not correct if the mapping is implicit
- const Expr *E = (MC->getMapLoc().isValid()) ? *EI : nullptr;
- const ValueDecl *VD =
- std::get<0>(L) ? cast<ValueDecl>(std::get<0>(L)->getCanonicalDecl())
- : nullptr;
- // Get the corresponding user-defined mapper.
- Info[VD].emplace_back(std::get<1>(L), MC->getMapType(),
- MC->getMapTypeModifiers(), llvm::None,
- /*ReturnDevicePointer=*/false, MC->isImplicit(),
- std::get<2>(L), E);
- ++EI;
+public:
+ MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
+ : CurDir(&Dir), CGF(CGF) {
+ // Extract firstprivate clause information.
+ for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
+ for (const auto *D : C->varlists())
+ FirstPrivateDecls.try_emplace(
+ cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl()), C->isImplicit());
+ // Extract implicit firstprivates from uses_allocators clauses.
+ for (const auto *C : Dir.getClausesOfKind<OMPUsesAllocatorsClause>()) {
+ for (unsigned I = 0, E = C->getNumberOfAllocators(); I < E; ++I) {
+ OMPUsesAllocatorsClause::Data D = C->getAllocatorData(I);
+ if (const auto *DRE = dyn_cast_or_null<DeclRefExpr>(D.AllocatorTraits))
+ FirstPrivateDecls.try_emplace(cast<VarDecl>(DRE->getDecl()),
+ /*Implicit=*/true);
+ else if (const auto *VD = dyn_cast<VarDecl>(
+ cast<DeclRefExpr>(D.Allocator->IgnoreParenImpCasts())
+ ->getDecl()))
+ FirstPrivateDecls.try_emplace(VD, /*Implicit=*/true);
}
}
+ // Extract device pointer clause information.
+ for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
+ for (auto L : C->component_lists())
+ DevPointersMap[std::get<0>(L)].push_back(std::get<1>(L));
+ }
- for (const auto &M : Info) {
- // We need to know when we generate information for the first component
- // associated with a capture, because the mapping flags depend on it.
- bool IsFirstComponentList = true;
-
- // Underlying variable declaration used in the map clause.
- const ValueDecl *VD = std::get<0>(M);
+ /// Constructor for the declare mapper directive.
+ MappableExprsHandler(const OMPDeclareMapperDecl &Dir, CodeGenFunction &CGF)
+ : CurDir(&Dir), CGF(CGF) {}
- // Temporary generated information.
- MapCombinedInfoTy CurInfo;
- StructRangeInfoTy PartialStruct;
+ /// Generate code for the combined entry if we have a partially mapped struct
+ /// and take care of the mapping flags of the arguments corresponding to
+ /// individual struct members.
+ void emitCombinedEntry(MapCombinedInfoTy &CombinedInfo,
+ MapFlagsArrayTy &CurTypes,
+ const StructRangeInfoTy &PartialStruct,
+ const ValueDecl *VD = nullptr,
+ bool NotTargetParams = true) const {
+ if (CurTypes.size() == 1 &&
+ ((CurTypes.back() & OMP_MAP_MEMBER_OF) != OMP_MAP_MEMBER_OF) &&
+ !PartialStruct.IsArraySection)
+ return;
+ Address LBAddr = PartialStruct.LowestElem.second;
+ Address HBAddr = PartialStruct.HighestElem.second;
+ if (PartialStruct.HasCompleteRecord) {
+ LBAddr = PartialStruct.LB;
+ HBAddr = PartialStruct.LB;
+ }
+ CombinedInfo.Exprs.push_back(VD);
+ // Base is the base of the struct
+ CombinedInfo.BasePointers.push_back(PartialStruct.Base.getPointer());
+ // Pointer is the address of the lowest element
+ llvm::Value *LB = LBAddr.getPointer();
+ CombinedInfo.Pointers.push_back(LB);
+ // There should not be a mapper for a combined entry.
+ CombinedInfo.Mappers.push_back(nullptr);
+ // Size is (addr of {highest+1} element) - (addr of lowest element)
+ llvm::Value *HB = HBAddr.getPointer();
+ llvm::Value *HAddr =
+ CGF.Builder.CreateConstGEP1_32(HBAddr.getElementType(), HB, /*Idx0=*/1);
+ llvm::Value *CLAddr = CGF.Builder.CreatePointerCast(LB, CGF.VoidPtrTy);
+ llvm::Value *CHAddr = CGF.Builder.CreatePointerCast(HAddr, CGF.VoidPtrTy);
+ llvm::Value *Diff = CGF.Builder.CreatePtrDiff(CHAddr, CLAddr);
+ llvm::Value *Size = CGF.Builder.CreateIntCast(Diff, CGF.Int64Ty,
+ /*isSigned=*/false);
+ CombinedInfo.Sizes.push_back(Size);
+ // Map type is always TARGET_PARAM, if generate info for captures.
+ CombinedInfo.Types.push_back(NotTargetParams ? OMP_MAP_NONE
+ : OMP_MAP_TARGET_PARAM);
+ // If any element has the present modifier, then make sure the runtime
+ // doesn't attempt to allocate the struct.
+ if (CurTypes.end() !=
+ llvm::find_if(CurTypes, [](OpenMPOffloadMappingFlags Type) {
+ return Type & OMP_MAP_PRESENT;
+ }))
+ CombinedInfo.Types.back() |= OMP_MAP_PRESENT;
+ // Remove TARGET_PARAM flag from the first element
+ (*CurTypes.begin()) &= ~OMP_MAP_TARGET_PARAM;
- for (const MapInfo &L : M.second) {
- assert(!L.Components.empty() &&
- "Not expecting declaration with no component lists.");
- generateInfoForComponentList(
- L.MapType, L.MapModifiers, L.MotionModifiers, L.Components, CurInfo,
- PartialStruct, IsFirstComponentList, L.IsImplicit, L.Mapper,
- L.ForDeviceAddr, VD, L.VarRef);
- IsFirstComponentList = false;
- }
+ // All other current entries will be MEMBER_OF the combined entry
+ // (except for PTR_AND_OBJ entries which do not have a placeholder value
+ // 0xFFFF in the MEMBER_OF field).
+ OpenMPOffloadMappingFlags MemberOfFlag =
+ getMemberOfFlag(CombinedInfo.BasePointers.size() - 1);
+ for (auto &M : CurTypes)
+ setCorrectMemberOfFlag(M, MemberOfFlag);
+ }
- // If there is an entry in PartialStruct it means we have a struct with
- // individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid()) {
- CurInfo.NonContigInfo.Dims.push_back(0);
- emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct, VD);
- }
+ /// Generate all the base pointers, section pointers, sizes, map types, and
+ /// mappers for the extracted mappable expressions (all included in \a
+ /// CombinedInfo). Also, for each item that relates with a device pointer, a
+ /// pair of the relevant declaration and index where it occurs is appended to
+ /// the device pointers info array.
+ void generateAllInfo(
+ MapCombinedInfoTy &CombinedInfo,
+ const llvm::DenseSet<CanonicalDeclPtr<const Decl>> &SkipVarSet =
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>>()) const {
+ assert(CurDir.is<const OMPExecutableDirective *>() &&
+ "Expect a executable directive");
+ const auto *CurExecDir = CurDir.get<const OMPExecutableDirective *>();
+ generateAllInfoForClauses(CurExecDir->clauses(), CombinedInfo, SkipVarSet);
+ }
- // We need to append the results of this capture to what we already have.
- CombinedInfo.append(CurInfo);
- }
+ /// Generate all the base pointers, section pointers, sizes, map types, and
+ /// mappers for the extracted map clauses of user-defined mapper (all included
+ /// in \a CombinedInfo).
+ void generateAllInfoForMapper(MapCombinedInfoTy &CombinedInfo) const {
+ assert(CurDir.is<const OMPDeclareMapperDecl *>() &&
+ "Expect a declare mapper directive");
+ const auto *CurMapperDir = CurDir.get<const OMPDeclareMapperDecl *>();
+ generateAllInfoForClauses(CurMapperDir->clauses(), CombinedInfo);
}
/// Emit capture info for lambdas for variables captured by reference.
@@ -8804,6 +9108,25 @@ public:
++EI;
}
}
+ llvm::stable_sort(DeclComponentLists, [](const MapData &LHS,
+ const MapData &RHS) {
+ ArrayRef<OpenMPMapModifierKind> MapModifiers = std::get<2>(LHS);
+ OpenMPMapClauseKind MapType = std::get<1>(RHS);
+ bool HasPresent = !MapModifiers.empty() &&
+ llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
+ return K == clang::OMPC_MAP_MODIFIER_present;
+ });
+ bool HasAllocs = MapType == OMPC_MAP_alloc;
+ MapModifiers = std::get<2>(RHS);
+ MapType = std::get<1>(LHS);
+ bool HasPresentR =
+ !MapModifiers.empty() &&
+ llvm::any_of(MapModifiers, [](OpenMPMapModifierKind K) {
+ return K == clang::OMPC_MAP_MODIFIER_present;
+ });
+ bool HasAllocsR = MapType == OMPC_MAP_alloc;
+ return (HasPresent && !HasPresentR) || (HasAllocs && !HasAllocsR);
+ });
// Find overlapping elements (including the offset from the base element).
llvm::SmallDenseMap<
@@ -8839,11 +9162,28 @@ public:
if (CI->getAssociatedDeclaration() != SI->getAssociatedDeclaration())
break;
}
- // Found overlapping if, at least for one component, reached the head of
- // the components list.
+ // Found overlapping if, at least for one component, reached the head
+ // of the components list.
if (CI == CE || SI == SE) {
- assert((CI != CE || SI != SE) &&
- "Unexpected full match of the mapping components.");
+ // Ignore it if it is the same component.
+ if (CI == CE && SI == SE)
+ continue;
+ const auto It = (SI == SE) ? CI : SI;
+ // If one component is a pointer and another one is a kind of
+ // dereference of this pointer (array subscript, section, dereference,
+ // etc.), it is not an overlapping.
+ // Same, if one component is a base and another component is a
+ // dereferenced pointer memberexpr with the same base.
+ if (!isa<MemberExpr>(It->getAssociatedExpression()) ||
+ (std::prev(It)->getAssociatedDeclaration() &&
+ std::prev(It)
+ ->getAssociatedDeclaration()
+ ->getType()
+ ->isPointerType()) ||
+ (It->getAssociatedDeclaration() &&
+ It->getAssociatedDeclaration()->getType()->isPointerType() &&
+ std::next(It) != CE && std::next(It) != SE))
+ continue;
const MapData &BaseData = CI == CE ? L : L1;
OMPClauseMappableExprCommon::MappableExprComponentListRef SubData =
SI == SE ? Components : Components1;
@@ -8855,16 +9195,22 @@ public:
// Sort the overlapped elements for each item.
llvm::SmallVector<const FieldDecl *, 4> Layout;
if (!OverlappedData.empty()) {
- if (const auto *CRD =
- VD->getType().getCanonicalType()->getAsCXXRecordDecl())
+ const Type *BaseType = VD->getType().getCanonicalType().getTypePtr();
+ const Type *OrigType = BaseType->getPointeeOrArrayElementType();
+ while (BaseType != OrigType) {
+ BaseType = OrigType->getCanonicalTypeInternal().getTypePtr();
+ OrigType = BaseType->getPointeeOrArrayElementType();
+ }
+
+ if (const auto *CRD = BaseType->getAsCXXRecordDecl())
getPlainLayout(CRD, Layout, /*AsBase=*/false);
else {
- const auto *RD = VD->getType().getCanonicalType()->getAsRecordDecl();
+ const auto *RD = BaseType->getAsRecordDecl();
Layout.append(RD->field_begin(), RD->field_end());
}
}
for (auto &Pair : OverlappedData) {
- llvm::sort(
+ llvm::stable_sort(
Pair.getSecond(),
[&Layout](
OMPClauseMappableExprCommon::MappableExprComponentListRef First,
@@ -8896,7 +9242,7 @@ public:
const auto *FD2 = cast<FieldDecl>(SI->getAssociatedDeclaration());
if (FD1->getParent() == FD2->getParent())
return FD1->getFieldIndex() < FD2->getFieldIndex();
- const auto It =
+ const auto *It =
llvm::find_if(Layout, [FD1, FD2](const FieldDecl *FD) {
return FD == FD1 || FD == FD2;
});
@@ -8906,6 +9252,7 @@ public:
// Associated with a capture, because the mapping flags depend on it.
// Go through all of the elements with the overlapped elements.
+ bool IsFirstComponentList = true;
for (const auto &Pair : OverlappedData) {
const MapData &L = *Pair.getFirst();
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
@@ -8918,14 +9265,13 @@ public:
L;
ArrayRef<OMPClauseMappableExprCommon::MappableExprComponentListRef>
OverlappedComponents = Pair.getSecond();
- bool IsFirstComponentList = true;
generateInfoForComponentList(
MapType, MapModifiers, llvm::None, Components, CombinedInfo,
PartialStruct, IsFirstComponentList, IsImplicit, Mapper,
/*ForDeviceAddr=*/false, VD, VarRef, OverlappedComponents);
+ IsFirstComponentList = false;
}
// Go through other elements without overlapped elements.
- bool IsFirstComponentList = OverlappedData.empty();
for (const MapData &L : DeclComponentLists) {
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
@@ -8994,30 +9340,15 @@ public:
CombinedInfo.Types.push_back(getMapModifiersForPrivateClauses(CI));
const VarDecl *VD = CI.getCapturedVar();
auto I = FirstPrivateDecls.find(VD);
- if (I != FirstPrivateDecls.end() &&
- VD->getType().isConstant(CGF.getContext())) {
- llvm::Constant *Addr =
- CGF.CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(CGF, VD);
- // Copy the value of the original variable to the new global copy.
- CGF.Builder.CreateMemCpy(
- CGF.MakeNaturalAlignAddrLValue(Addr, ElementType).getAddress(CGF),
- Address(CV, CGF.getContext().getTypeAlignInChars(ElementType)),
- CombinedInfo.Sizes.back(), /*IsVolatile=*/false);
- // Use new global variable as the base pointers.
- CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
- CombinedInfo.BasePointers.push_back(Addr);
- CombinedInfo.Pointers.push_back(Addr);
+ CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
+ CombinedInfo.BasePointers.push_back(CV);
+ if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
+ Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
+ CV, ElementType, CGF.getContext().getDeclAlign(VD),
+ AlignmentSource::Decl));
+ CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
} else {
- CombinedInfo.Exprs.push_back(VD->getCanonicalDecl());
- CombinedInfo.BasePointers.push_back(CV);
- if (I != FirstPrivateDecls.end() && ElementType->isAnyPointerType()) {
- Address PtrAddr = CGF.EmitLoadOfReference(CGF.MakeAddrLValue(
- CV, ElementType, CGF.getContext().getDeclAlign(VD),
- AlignmentSource::Decl));
- CombinedInfo.Pointers.push_back(PtrAddr.getPointer());
- } else {
- CombinedInfo.Pointers.push_back(CV);
- }
+ CombinedInfo.Pointers.push_back(CV);
}
if (I != FirstPrivateDecls.end())
IsImplicit = I->getSecond();
@@ -9130,7 +9461,6 @@ emitMappingInformation(CodeGenFunction &CGF, llvm::OpenMPIRBuilder &OMPBuilder,
SrcLocStr = OMPBuilder.getOrCreateSrcLocStr(FileName, ExprName.c_str(),
Line, Column);
}
-
return SrcLocStr;
}
@@ -9211,15 +9541,10 @@ static void emitOffloadingArrays(
// fill arrays. Instead, we create an array constant.
SmallVector<uint64_t, 4> Mapping(CombinedInfo.Types.size(), 0);
llvm::copy(CombinedInfo.Types, Mapping.begin());
- llvm::Constant *MapTypesArrayInit =
- llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
std::string MaptypesName =
CGM.getOpenMPRuntime().getName({"offload_maptypes"});
- auto *MapTypesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), MapTypesArrayInit->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- MapTypesArrayInit, MaptypesName);
- MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ auto *MapTypesArrayGbl =
+ OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
Info.MapTypesArray = MapTypesArrayGbl;
// The information types are only built if there is debug information
@@ -9233,17 +9558,10 @@ static void emitOffloadingArrays(
};
SmallVector<llvm::Constant *, 4> InfoMap(CombinedInfo.Exprs.size());
llvm::transform(CombinedInfo.Exprs, InfoMap.begin(), fillInfoMap);
-
- llvm::Constant *MapNamesArrayInit = llvm::ConstantArray::get(
- llvm::ArrayType::get(
- llvm::Type::getInt8Ty(CGF.Builder.getContext())->getPointerTo(),
- CombinedInfo.Exprs.size()),
- InfoMap);
- auto *MapNamesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), MapNamesArrayInit->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- MapNamesArrayInit,
- CGM.getOpenMPRuntime().getName({"offload_mapnames"}));
+ std::string MapnamesName =
+ CGM.getOpenMPRuntime().getName({"offload_mapnames"});
+ auto *MapNamesArrayGbl =
+ OMPBuilder.createOffloadMapnames(InfoMap, MapnamesName);
Info.MapNamesArray = MapNamesArrayGbl;
}
@@ -9258,15 +9576,8 @@ static void emitOffloadingArrays(
}
}
if (EndMapTypesDiffer) {
- MapTypesArrayInit =
- llvm::ConstantDataArray::get(CGF.Builder.getContext(), Mapping);
- MaptypesName = CGM.getOpenMPRuntime().getName({"offload_maptypes"});
- MapTypesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), MapTypesArrayInit->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- MapTypesArrayInit, MaptypesName);
- MapTypesArrayGbl->setUnnamedAddr(
- llvm::GlobalValue::UnnamedAddr::Global);
+ MapTypesArrayGbl =
+ OMPBuilder.createOffloadMaptypes(Mapping, MaptypesName);
Info.MapTypesArrayEnd = MapTypesArrayGbl;
}
}
@@ -9455,6 +9766,8 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
@@ -9512,14 +9825,16 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
/// void *base, void *begin,
/// int64_t size, int64_t type,
/// void *name = nullptr) {
-/// // Allocate space for an array section first.
-/// if (size > 1 && !maptype.IsDelete)
+/// // Allocate space for an array section first or add a base/begin for
+/// // pointer dereference.
+/// if ((size > 1 || (base != begin && maptype.IsPtrAndObj)) &&
+/// !maptype.IsDelete)
/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
-/// size*sizeof(Ty), clearToFrom(type));
+/// size*sizeof(Ty), clearToFromMember(type));
/// // Map members.
/// for (unsigned i = 0; i < size; i++) {
/// // For each component specified by this mapper:
-/// for (auto c : all_components) {
+/// for (auto c : begin[i]->all_components) {
/// if (c.hasMapper())
/// (*c.Mapper())(rt_mapper_handle, c.arg_base, c.arg_begin, c.arg_size,
/// c.arg_type, c.arg_name);
@@ -9532,7 +9847,7 @@ getNestedDistributeDirective(ASTContext &Ctx, const OMPExecutableDirective &D) {
/// // Delete the array section.
/// if (size > 1 && maptype.IsDelete)
/// __tgt_push_mapper_component(rt_mapper_handle, base, begin,
-/// size*sizeof(Ty), clearToFrom(type));
+/// size*sizeof(Ty), clearToFromMember(type));
/// }
/// \endcode
void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
@@ -9582,20 +9897,10 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Start the mapper function code generation.
CodeGenFunction MapperCGF(CGM);
MapperCGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
- // Compute the starting and end addreses of array elements.
+ // Compute the starting and end addresses of array elements.
llvm::Value *Size = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&SizeArg), /*Volatile=*/false,
C.getPointerType(Int64Ty), Loc);
- // Convert the size in bytes into the number of array elements.
- Size = MapperCGF.Builder.CreateExactUDiv(
- Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
- llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
- MapperCGF.GetAddrOfLocalVar(&BeginArg).getPointer(),
- CGM.getTypes().ConvertTypeForMem(C.getPointerType(PtrTy)));
- llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(PtrBegin, Size);
- llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
- MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
- C.getPointerType(Int64Ty), Loc);
// Prepare common arguments for array initiation and deletion.
llvm::Value *Handle = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&HandleArg),
@@ -9606,12 +9911,25 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::Value *BeginIn = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&BeginArg),
/*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
+ // Convert the size in bytes into the number of array elements.
+ Size = MapperCGF.Builder.CreateExactUDiv(
+ Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
+ llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
+ BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
+ llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(
+ PtrBegin->getType()->getPointerElementType(), PtrBegin, Size);
+ llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
+ MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
+ C.getPointerType(Int64Ty), Loc);
+ llvm::Value *MapName = MapperCGF.EmitLoadOfScalar(
+ MapperCGF.GetAddrOfLocalVar(&NameArg),
+ /*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
// Emit array initiation if this is an array section and \p MapType indicates
// that memory allocation is required.
llvm::BasicBlock *HeadBB = MapperCGF.createBasicBlock("omp.arraymap.head");
emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
- ElementSize, HeadBB, /*IsInit=*/true);
+ MapName, ElementSize, HeadBB, /*IsInit=*/true);
// Emit a for loop to iterate through SizeArg of elements and map all of them.
@@ -9637,11 +9955,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
.alignmentOfArrayElement(ElementSize));
// Privatize the declared variable of mapper to be the current array element.
CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
- Scope.addPrivate(MapperVarDecl, [&MapperCGF, PtrCurrent, PtrTy]() {
- return MapperCGF
- .EmitLoadOfPointerLValue(PtrCurrent, PtrTy->castAs<PointerType>())
- .getAddress(MapperCGF);
- });
+ Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; });
(void)Scope.Privatize();
// Get map clause information. Fill up the arrays with all mapped variables.
@@ -9673,28 +9987,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
: emitMappingInformation(MapperCGF, OMPBuilder, Info.Exprs[I]);
// Extract the MEMBER_OF field from the map type.
- llvm::BasicBlock *MemberBB = MapperCGF.createBasicBlock("omp.member");
- MapperCGF.EmitBlock(MemberBB);
llvm::Value *OriMapType = MapperCGF.Builder.getInt64(Info.Types[I]);
- llvm::Value *Member = MapperCGF.Builder.CreateAnd(
- OriMapType,
- MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_MEMBER_OF));
- llvm::BasicBlock *MemberCombineBB =
- MapperCGF.createBasicBlock("omp.member.combine");
- llvm::BasicBlock *TypeBB = MapperCGF.createBasicBlock("omp.type");
- llvm::Value *IsMember = MapperCGF.Builder.CreateIsNull(Member);
- MapperCGF.Builder.CreateCondBr(IsMember, TypeBB, MemberCombineBB);
- // Add the number of pre-existing components to the MEMBER_OF field if it
- // is valid.
- MapperCGF.EmitBlock(MemberCombineBB);
- llvm::Value *CombinedMember =
+ llvm::Value *MemberMapType =
MapperCGF.Builder.CreateNUWAdd(OriMapType, ShiftedPreviousSize);
- // Do nothing if it is not a member of previous components.
- MapperCGF.EmitBlock(TypeBB);
- llvm::PHINode *MemberMapType =
- MapperCGF.Builder.CreatePHI(CGM.Int64Ty, 4, "omp.membermaptype");
- MemberMapType->addIncoming(OriMapType, MemberBB);
- MemberMapType->addIncoming(CombinedMember, MemberCombineBB);
// Combine the map type inherited from user-defined mapper with that
// specified in the program. According to the OMP_MAP_TO and OMP_MAP_FROM
@@ -9779,8 +10074,9 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Update the pointer to point to the next element that needs to be mapped,
// and check whether we have mapped all elements.
+ llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType();
llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
- PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
+ ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
PtrPHI->addIncoming(PtrNext, LastBB);
llvm::Value *IsDone =
MapperCGF.Builder.CreateICmpEQ(PtrNext, PtrEnd, "omp.arraymap.isdone");
@@ -9791,7 +10087,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Emit array deletion if this is an array section and \p MapType indicates
// that deletion is required.
emitUDMapperArrayInitOrDel(MapperCGF, Handle, BaseIn, BeginIn, Size, MapType,
- ElementSize, DoneBB, /*IsInit=*/false);
+ MapName, ElementSize, DoneBB, /*IsInit=*/false);
// Emit the function exit block.
MapperCGF.EmitBlock(DoneBB, /*IsFinished=*/true);
@@ -9812,32 +10108,40 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *Base,
llvm::Value *Begin, llvm::Value *Size, llvm::Value *MapType,
- CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit) {
+ llvm::Value *MapName, CharUnits ElementSize, llvm::BasicBlock *ExitBB,
+ bool IsInit) {
StringRef Prefix = IsInit ? ".init" : ".del";
// Evaluate if this is an array section.
- llvm::BasicBlock *IsDeleteBB =
- MapperCGF.createBasicBlock(getName({"omp.array", Prefix, ".evaldelete"}));
llvm::BasicBlock *BodyBB =
MapperCGF.createBasicBlock(getName({"omp.array", Prefix}));
- llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGE(
+ llvm::Value *IsArray = MapperCGF.Builder.CreateICmpSGT(
Size, MapperCGF.Builder.getInt64(1), "omp.arrayinit.isarray");
- MapperCGF.Builder.CreateCondBr(IsArray, IsDeleteBB, ExitBB);
-
- // Evaluate if we are going to delete this section.
- MapperCGF.EmitBlock(IsDeleteBB);
llvm::Value *DeleteBit = MapperCGF.Builder.CreateAnd(
MapType,
MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_DELETE));
llvm::Value *DeleteCond;
+ llvm::Value *Cond;
if (IsInit) {
+ // base != begin?
+ llvm::Value *BaseIsBegin = MapperCGF.Builder.CreateIsNotNull(
+ MapperCGF.Builder.CreatePtrDiff(Base, Begin));
+ // IsPtrAndObj?
+ llvm::Value *PtrAndObjBit = MapperCGF.Builder.CreateAnd(
+ MapType,
+ MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_PTR_AND_OBJ));
+ PtrAndObjBit = MapperCGF.Builder.CreateIsNotNull(PtrAndObjBit);
+ BaseIsBegin = MapperCGF.Builder.CreateAnd(BaseIsBegin, PtrAndObjBit);
+ Cond = MapperCGF.Builder.CreateOr(IsArray, BaseIsBegin);
DeleteCond = MapperCGF.Builder.CreateIsNull(
DeleteBit, getName({"omp.array", Prefix, ".delete"}));
} else {
+ Cond = IsArray;
DeleteCond = MapperCGF.Builder.CreateIsNotNull(
DeleteBit, getName({"omp.array", Prefix, ".delete"}));
}
- MapperCGF.Builder.CreateCondBr(DeleteCond, BodyBB, ExitBB);
+ Cond = MapperCGF.Builder.CreateAnd(Cond, DeleteCond);
+ MapperCGF.Builder.CreateCondBr(Cond, BodyBB, ExitBB);
MapperCGF.EmitBlock(BodyBB);
// Get the array size by multiplying element size and element number (i.e., \p
@@ -9850,12 +10154,14 @@ void CGOpenMPRuntime::emitUDMapperArrayInitOrDel(
MapType,
MapperCGF.Builder.getInt64(~(MappableExprsHandler::OMP_MAP_TO |
MappableExprsHandler::OMP_MAP_FROM)));
- llvm::Value *MapNameArg = llvm::ConstantPointerNull::get(CGM.VoidPtrTy);
+ MapTypeArg = MapperCGF.Builder.CreateOr(
+ MapTypeArg,
+ MapperCGF.Builder.getInt64(MappableExprsHandler::OMP_MAP_IMPLICIT));
// Call the runtime API __tgt_push_mapper_component to fill up the runtime
// data structure.
llvm::Value *OffloadingArgs[] = {Handle, Base, Begin,
- ArraySize, MapTypeArg, MapNameArg};
+ ArraySize, MapTypeArg, MapName};
MapperCGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
OMPRTL___tgt_push_mapper_component),
@@ -9892,7 +10198,7 @@ void CGOpenMPRuntime::emitTargetNumIterationsCall(
llvm::Value *Args[] = {RTLoc, DeviceID, NumIterations};
CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_push_target_tripcount),
+ CGM.getModule(), OMPRTL___kmpc_push_target_tripcount_mapper),
Args);
}
};
@@ -10016,18 +10322,27 @@ void CGOpenMPRuntime::emitTargetCall(
// passed to the runtime library - a 32-bit integer with the value zero.
assert(NumThreads && "Thread limit expression should be available along "
"with number of teams.");
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- OutlinedFnID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer(),
- NumTeams,
- NumThreads};
+ SmallVector<llvm::Value *> OffloadingArgs = {
+ RTLoc,
+ DeviceID,
+ OutlinedFnID,
+ PointerNum,
+ InputInfo.BasePointersArray.getPointer(),
+ InputInfo.PointersArray.getPointer(),
+ InputInfo.SizesArray.getPointer(),
+ MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.getPointer(),
+ NumTeams,
+ NumThreads};
+ if (HasNowait) {
+ // Add int32_t depNum = 0, void *depList = nullptr, int32_t
+ // noAliasDepNum = 0, void *noAliasDepList = nullptr.
+ OffloadingArgs.push_back(CGF.Builder.getInt32(0));
+ OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
+ OffloadingArgs.push_back(CGF.Builder.getInt32(0));
+ OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
+ }
Return = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), HasNowait
@@ -10035,16 +10350,25 @@ void CGOpenMPRuntime::emitTargetCall(
: OMPRTL___tgt_target_teams_mapper),
OffloadingArgs);
} else {
- llvm::Value *OffloadingArgs[] = {RTLoc,
- DeviceID,
- OutlinedFnID,
- PointerNum,
- InputInfo.BasePointersArray.getPointer(),
- InputInfo.PointersArray.getPointer(),
- InputInfo.SizesArray.getPointer(),
- MapTypesArray,
- MapNamesArray,
- InputInfo.MappersArray.getPointer()};
+ SmallVector<llvm::Value *> OffloadingArgs = {
+ RTLoc,
+ DeviceID,
+ OutlinedFnID,
+ PointerNum,
+ InputInfo.BasePointersArray.getPointer(),
+ InputInfo.PointersArray.getPointer(),
+ InputInfo.SizesArray.getPointer(),
+ MapTypesArray,
+ MapNamesArray,
+ InputInfo.MappersArray.getPointer()};
+ if (HasNowait) {
+ // Add int32_t depNum = 0, void *depList = nullptr, int32_t
+ // noAliasDepNum = 0, void *noAliasDepList = nullptr.
+ OffloadingArgs.push_back(CGF.Builder.getInt32(0));
+ OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
+ OffloadingArgs.push_back(CGF.Builder.getInt32(0));
+ OffloadingArgs.push_back(llvm::ConstantPointerNull::get(CGM.VoidPtrTy));
+ }
Return = CGF.EmitRuntimeCall(
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), HasNowait ? OMPRTL___tgt_target_nowait_mapper
@@ -10094,7 +10418,7 @@ void CGOpenMPRuntime::emitTargetCall(
llvm::DenseSet<CanonicalDeclPtr<const Decl>> MappedVarSet;
auto RI = CS.getCapturedRecordDecl()->field_begin();
- auto CV = CapturedVars.begin();
+ auto *CV = CapturedVars.begin();
for (CapturedStmt::const_capture_iterator CI = CS.capture_begin(),
CE = CS.capture_end();
CI != CE; ++CI, ++RI, ++CV) {
@@ -10141,9 +10465,12 @@ void CGOpenMPRuntime::emitTargetCall(
// If there is an entry in PartialStruct it means we have a struct with
// individual members mapped. Emit an extra combined entry.
- if (PartialStruct.Base.isValid())
- MEHandler.emitCombinedEntry(CombinedInfo, CurInfo.Types, PartialStruct,
- nullptr, /*NoTargetParam=*/false);
+ if (PartialStruct.Base.isValid()) {
+ CombinedInfo.append(PartialStruct.PreliminaryMapData);
+ MEHandler.emitCombinedEntry(
+ CombinedInfo, CurInfo.Types, PartialStruct, nullptr,
+ !PartialStruct.PreliminaryMapData.BasePointers.empty());
+ }
// We need to append the results of this capture to what we already have.
CombinedInfo.append(CurInfo);
@@ -10289,6 +10616,8 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
@@ -10354,17 +10683,28 @@ void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
scanForTargetRegionsFunctions(II, ParentName);
}
+static bool isAssumedToBeNotEmitted(const ValueDecl *VD, bool IsDevice) {
+ Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ if (!DevTy)
+ return false;
+ // Do not emit device_type(nohost) functions for the host.
+ if (!IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
+ return true;
+ // Do not emit device_type(host) functions for the device.
+ if (IsDevice && DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ return true;
+ return false;
+}
+
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
// If emitting code for the host, we do not process FD here. Instead we do
// the normal code generation.
if (!CGM.getLangOpts().OpenMPIsDevice) {
- if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl())) {
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- // Do not emit device_type(nohost) functions for the host.
- if (DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
+ if (const auto *FD = dyn_cast<FunctionDecl>(GD.getDecl()))
+ if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
+ CGM.getLangOpts().OpenMPIsDevice))
return true;
- }
return false;
}
@@ -10373,10 +10713,8 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
StringRef Name = CGM.getMangledName(GD);
scanForTargetRegionsFunctions(FD->getBody(), Name);
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD);
- // Do not emit device_type(nohost) functions for the host.
- if (DevTy && *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ if (isAssumedToBeNotEmitted(cast<ValueDecl>(FD),
+ CGM.getLangOpts().OpenMPIsDevice))
return true;
}
@@ -10386,6 +10724,10 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
}
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
+ if (isAssumedToBeNotEmitted(cast<ValueDecl>(GD.getDecl()),
+ CGM.getLangOpts().OpenMPIsDevice))
+ return true;
+
if (!CGM.getLangOpts().OpenMPIsDevice)
return false;
@@ -10419,45 +10761,18 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
return false;
}
-llvm::Constant *
-CGOpenMPRuntime::registerTargetFirstprivateCopy(CodeGenFunction &CGF,
- const VarDecl *VD) {
- assert(VD->getType().isConstant(CGM.getContext()) &&
- "Expected constant variable.");
- StringRef VarName;
- llvm::Constant *Addr;
- llvm::GlobalValue::LinkageTypes Linkage;
- QualType Ty = VD->getType();
- SmallString<128> Buffer;
- {
- unsigned DeviceID;
- unsigned FileID;
- unsigned Line;
- getTargetEntryUniqueInfo(CGM.getContext(), VD->getLocation(), DeviceID,
- FileID, Line);
- llvm::raw_svector_ostream OS(Buffer);
- OS << "__omp_offloading_firstprivate_" << llvm::format("_%x", DeviceID)
- << llvm::format("_%x_", FileID) << VD->getName() << "_l" << Line;
- VarName = OS.str();
- }
- Linkage = llvm::GlobalValue::InternalLinkage;
- Addr =
- getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(Ty), VarName,
- getDefaultFirstprivateAddressSpace());
- cast<llvm::GlobalValue>(Addr)->setLinkage(Linkage);
- CharUnits VarSize = CGM.getContext().getTypeSizeInChars(Ty);
- CGM.addCompilerUsedGlobal(cast<llvm::GlobalValue>(Addr));
- OffloadEntriesInfoManager.registerDeviceGlobalVarEntryInfo(
- VarName, Addr, VarSize,
- OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryTo, Linkage);
- return Addr;
-}
-
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
if (CGM.getLangOpts().OMPTargetTriples.empty() &&
!CGM.getLangOpts().OpenMPIsDevice)
return;
+
+ // If we have host/nohost variables, they do not need to be registered.
+ Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(VD);
+ if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any)
+ return;
+
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res) {
@@ -10488,6 +10803,10 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
Linkage = CGM.getLLVMLinkageVarDefinition(VD, /*IsConstant=*/false);
// Temp solution to prevent optimizations of the internal variables.
if (CGM.getLangOpts().OpenMPIsDevice && !VD->isExternallyVisible()) {
+ // Do not create a "ref-variable" if the original is not also available
+ // on the host.
+ if (!OffloadEntriesInfoManager.hasDeviceGlobalVarEntryInfo(VarName))
+ return;
std::string RefName = getName({VarName, "ref"});
if (!CGM.GetGlobalValue(RefName)) {
llvm::Constant *AddrRef =
@@ -10971,6 +11290,8 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
case OMPD_allocate:
case OMPD_task:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
@@ -11859,13 +12180,14 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
// Cleanup action for allocate support.
class OMPAllocateCleanupTy final : public EHScopeStack::Cleanup {
llvm::FunctionCallee RTLFn;
- unsigned LocEncoding;
+ SourceLocation::UIntTy LocEncoding;
Address Addr;
const Expr *Allocator;
public:
- OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn, unsigned LocEncoding,
- Address Addr, const Expr *Allocator)
+ OMPAllocateCleanupTy(llvm::FunctionCallee RTLFn,
+ SourceLocation::UIntTy LocEncoding, Address Addr,
+ const Expr *Allocator)
: RTLFn(RTLFn), LocEncoding(LocEncoding), Addr(Addr),
Allocator(Allocator) {}
void Emit(CodeGenFunction &CGF, Flags /*flags*/) override {
@@ -11943,8 +12265,8 @@ CGOpenMPRuntime::NontemporalDeclsRAII::~NontemporalDeclsRAII() {
CGOpenMPRuntime::UntiedTaskLocalDeclsRAII::UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
- const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
- std::pair<Address, Address>> &LocalVars)
+ const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>> &LocalVars)
: CGM(CGF.CGM), NeedToPush(!LocalVars.empty()) {
if (!NeedToPush)
return;
@@ -12465,6 +12787,13 @@ void CGOpenMPSIMDRuntime::emitMasterRegion(CodeGenFunction &CGF,
llvm_unreachable("Not supported in SIMD-only mode");
}
+void CGOpenMPSIMDRuntime::emitMaskedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MasterOpGen,
+ SourceLocation Loc,
+ const Expr *Filter) {
+ llvm_unreachable("Not supported in SIMD-only mode");
+}
+
void CGOpenMPSIMDRuntime::emitTaskyieldCall(CodeGenFunction &CGF,
SourceLocation Loc) {
llvm_unreachable("Not supported in SIMD-only mode");
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.h b/clang/lib/CodeGen/CGOpenMPRuntime.h
index b8bb6d85f005..c24648aae7e1 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -73,7 +73,6 @@ class RegionCodeGenTy final {
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
- RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
@@ -254,8 +253,8 @@ public:
public:
UntiedTaskLocalDeclsRAII(
CodeGenFunction &CGF,
- const llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
- std::pair<Address, Address>> &LocalVars);
+ const llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>> &LocalVars);
~UntiedTaskLocalDeclsRAII();
};
@@ -341,6 +340,35 @@ protected:
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
+ /// Emit the number of teams for a target directive. Inspect the num_teams
+ /// clause associated with a teams construct combined or closely nested
+ /// with the target directive.
+ ///
+ /// Emit a team of size one for directives such as 'target parallel' that
+ /// have no associated teams construct.
+ ///
+ /// Otherwise, return nullptr.
+ const Expr *getNumTeamsExprForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ int32_t &DefaultVal);
+ llvm::Value *emitNumTeamsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+ /// Emit the number of threads for a target directive. Inspect the
+ /// thread_limit clause associated with a teams construct combined or closely
+ /// nested with the target directive.
+ ///
+ /// Emit the num_threads clause for directives such as 'target parallel' that
+ /// have no associated teams construct.
+ ///
+ /// Otherwise, return nullptr.
+ const Expr *
+ getNumThreadsExprForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D,
+ int32_t &DefaultVal);
+ llvm::Value *
+ emitNumThreadsForTargetDirective(CodeGenFunction &CGF,
+ const OMPExecutableDirective &D);
+
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
@@ -724,8 +752,8 @@ private:
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
using UntiedLocalVarsAddressesMap =
- llvm::DenseMap<CanonicalDeclPtr<const VarDecl>,
- std::pair<Address, Address>>;
+ llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>>;
llvm::SmallVector<UntiedLocalVarsAddressesMap, 4> UntiedLocalVarsStack;
/// Stack for list of addresses of declarations in current context marked as
@@ -824,7 +852,8 @@ private:
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
- llvm::Value *MapType, CharUnits ElementSize,
+ llvm::Value *MapType, llvm::Value *MapName,
+ CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
@@ -862,10 +891,6 @@ private:
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
- /// Returns default address space for the constant firstprivates, 0 by
- /// default.
- virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
-
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
@@ -1011,6 +1036,14 @@ public:
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
+ /// Emits a masked region.
+ /// \param MaskedOpGen Generator for the statement associated with the given
+ /// masked region.
+ virtual void emitMaskedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MaskedOpGen,
+ SourceLocation Loc,
+ const Expr *Filter = nullptr);
+
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
@@ -1583,11 +1616,6 @@ public:
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
- /// Registers provided target firstprivate variable as global on the
- /// target.
- llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
- const VarDecl *VD);
-
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
@@ -1983,6 +2011,17 @@ public:
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
+ /// Emits a masked region.
+ /// \param MaskedOpGen Generator for the statement associated with the given
+ /// masked region.
+ void emitMaskedRegion(CodeGenFunction &CGF,
+ const RegionCodeGenTy &MaskedOpGen, SourceLocation Loc,
+ const Expr *Filter = nullptr) override;
+
+ /// Emits a masked region.
+ /// \param MaskedOpGen Generator for the statement associated with the given
+ /// masked region.
+
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 9c8c7b83d1d2..63fecedc6fb7 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -553,57 +553,6 @@ static llvm::Value *getNVPTXLaneID(CodeGenFunction &CGF) {
"nvptx_lane_id");
}
-/// Get the value of the thread_limit clause in the teams directive.
-/// For the 'generic' execution mode, the runtime encodes thread_limit in
-/// the launch parameters, always starting thread_limit+warpSize threads per
-/// CTA. The threads in the last warp are reserved for master execution.
-/// For the 'spmd' execution mode, all threads in a CTA are part of the team.
-static llvm::Value *getThreadLimit(CodeGenFunction &CGF,
- bool IsInSPMDExecutionMode = false) {
- CGBuilderTy &Bld = CGF.Builder;
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- return IsInSPMDExecutionMode
- ? RT.getGPUNumThreads(CGF)
- : Bld.CreateNUWSub(RT.getGPUNumThreads(CGF),
- RT.getGPUWarpSize(CGF), "thread_limit");
-}
-
-/// Get the thread id of the OMP master thread.
-/// The master thread id is the first thread (lane) of the last warp in the
-/// GPU block. Warp size is assumed to be some power of 2.
-/// Thread id is 0 indexed.
-/// E.g: If NumThreads is 33, master id is 32.
-/// If NumThreads is 64, master id is 32.
-/// If NumThreads is 1024, master id is 992.
-static llvm::Value *getMasterThreadID(CodeGenFunction &CGF) {
- CGBuilderTy &Bld = CGF.Builder;
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- llvm::Value *NumThreads = RT.getGPUNumThreads(CGF);
- // We assume that the warp size is a power of 2.
- llvm::Value *Mask = Bld.CreateNUWSub(RT.getGPUWarpSize(CGF), Bld.getInt32(1));
-
- return Bld.CreateAnd(Bld.CreateNUWSub(NumThreads, Bld.getInt32(1)),
- Bld.CreateNot(Mask), "master_tid");
-}
-
-CGOpenMPRuntimeGPU::WorkerFunctionState::WorkerFunctionState(
- CodeGenModule &CGM, SourceLocation Loc)
- : WorkerFn(nullptr), CGFI(CGM.getTypes().arrangeNullaryFunction()),
- Loc(Loc) {
- createWorkerFunction(CGM);
-}
-
-void CGOpenMPRuntimeGPU::WorkerFunctionState::createWorkerFunction(
- CodeGenModule &CGM) {
- // Create an worker function with no arguments.
-
- WorkerFn = llvm::Function::Create(
- CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
- /*placeholder=*/"_worker", &CGM.getModule());
- CGM.SetInternalFunctionAttributes(GlobalDecl(), WorkerFn, CGFI);
- WorkerFn->setDoesNotRecurse();
-}
-
CGOpenMPRuntimeGPU::ExecutionMode
CGOpenMPRuntimeGPU::getExecutionMode() const {
return CurrentExecutionMode;
@@ -1067,23 +1016,19 @@ void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
const RegionCodeGenTy &CodeGen) {
ExecutionRuntimeModesRAII ModeRAII(CurrentExecutionMode);
EntryFunctionState EST;
- WorkerFunctionState WST(CGM, D.getBeginLoc());
- Work.clear();
WrapperFunctionsMap.clear();
// Emit target region as a standalone region.
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeGPU::EntryFunctionState &EST;
- CGOpenMPRuntimeGPU::WorkerFunctionState &WST;
public:
- NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST,
- CGOpenMPRuntimeGPU::WorkerFunctionState &WST)
- : EST(EST), WST(WST) {}
+ NVPTXPrePostActionTy(CGOpenMPRuntimeGPU::EntryFunctionState &EST)
+ : EST(EST) {}
void Enter(CodeGenFunction &CGF) override {
auto &RT =
static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- RT.emitNonSPMDEntryHeader(CGF, EST, WST);
+ RT.emitKernelInit(CGF, EST, /* IsSPMD */ false);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
@@ -1091,106 +1036,33 @@ void CGOpenMPRuntimeGPU::emitNonSPMDKernel(const OMPExecutableDirective &D,
auto &RT =
static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
RT.clearLocThreadIdInsertPt(CGF);
- RT.emitNonSPMDEntryFooter(CGF, EST);
+ RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ false);
}
- } Action(EST, WST);
+ } Action(EST);
CodeGen.setAction(Action);
IsInTTDRegion = true;
- // Reserve place for the globalized memory.
- GlobalizedRecords.emplace_back();
- if (!KernelStaticGlobalized) {
- KernelStaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::UndefValue::get(CGM.VoidPtrTy),
- "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
- }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
IsInTTDRegion = false;
-
- // Now change the name of the worker function to correspond to this target
- // region's entry function.
- WST.WorkerFn->setName(Twine(OutlinedFn->getName(), "_worker"));
-
- // Create the worker function
- emitWorkerFunction(WST);
}
-// Setup NVPTX threads for master-worker OpenMP scheme.
-void CGOpenMPRuntimeGPU::emitNonSPMDEntryHeader(CodeGenFunction &CGF,
- EntryFunctionState &EST,
- WorkerFunctionState &WST) {
+void CGOpenMPRuntimeGPU::emitKernelInit(CodeGenFunction &CGF,
+ EntryFunctionState &EST, bool IsSPMD) {
CGBuilderTy &Bld = CGF.Builder;
-
- llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
- llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
- llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- auto &RT = static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
- llvm::Value *IsWorker =
- Bld.CreateICmpULT(RT.getGPUThreadID(CGF), getThreadLimit(CGF));
- Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
-
- CGF.EmitBlock(WorkerBB);
- emitCall(CGF, WST.Loc, WST.WorkerFn);
- CGF.EmitBranch(EST.ExitBB);
-
- CGF.EmitBlock(MasterCheckBB);
- llvm::Value *IsMaster =
- Bld.CreateICmpEQ(RT.getGPUThreadID(CGF), getMasterThreadID(CGF));
- Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
-
- CGF.EmitBlock(MasterBB);
- IsInTargetMasterThreadRegion = true;
- // SEQUENTIAL (MASTER) REGION START
- // First action in sequential region:
- // Initialize the state of the OpenMP runtime library on the GPU.
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {getThreadLimit(CGF),
- Bld.getInt16(/*RequiresOMPRuntime=*/1)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_kernel_init),
- Args);
-
- // For data sharing, we need to initialize the stack.
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack));
-
- emitGenericVarsProlog(CGF, WST.Loc);
+ Bld.restoreIP(OMPBuilder.createTargetInit(Bld, IsSPMD, requiresFullRuntime()));
+ IsInTargetMasterThreadRegion = IsSPMD;
+ if (!IsSPMD)
+ emitGenericVarsProlog(CGF, EST.Loc);
}
-void CGOpenMPRuntimeGPU::emitNonSPMDEntryFooter(CodeGenFunction &CGF,
- EntryFunctionState &EST) {
- IsInTargetMasterThreadRegion = false;
- if (!CGF.HaveInsertPoint())
- return;
-
- emitGenericVarsEpilog(CGF);
-
- if (!EST.ExitBB)
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
- CGF.EmitBranch(TerminateBB);
-
- CGF.EmitBlock(TerminateBB);
- // Signal termination condition.
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_kernel_deinit),
- Args);
- // Barrier to terminate worker threads.
- syncCTAThreads(CGF);
- // Master thread jumps to exit point.
- CGF.EmitBranch(EST.ExitBB);
+void CGOpenMPRuntimeGPU::emitKernelDeinit(CodeGenFunction &CGF,
+ EntryFunctionState &EST,
+ bool IsSPMD) {
+ if (!IsSPMD)
+ emitGenericVarsEpilog(CGF);
- CGF.EmitBlock(EST.ExitBB);
- EST.ExitBB = nullptr;
+ CGBuilderTy &Bld = CGF.Builder;
+ OMPBuilder.createTargetDeinit(Bld, IsSPMD, requiresFullRuntime());
}
void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
@@ -1209,95 +1081,28 @@ void CGOpenMPRuntimeGPU::emitSPMDKernel(const OMPExecutableDirective &D,
class NVPTXPrePostActionTy : public PrePostActionTy {
CGOpenMPRuntimeGPU &RT;
CGOpenMPRuntimeGPU::EntryFunctionState &EST;
- const OMPExecutableDirective &D;
public:
NVPTXPrePostActionTy(CGOpenMPRuntimeGPU &RT,
- CGOpenMPRuntimeGPU::EntryFunctionState &EST,
- const OMPExecutableDirective &D)
- : RT(RT), EST(EST), D(D) {}
+ CGOpenMPRuntimeGPU::EntryFunctionState &EST)
+ : RT(RT), EST(EST) {}
void Enter(CodeGenFunction &CGF) override {
- RT.emitSPMDEntryHeader(CGF, EST, D);
+ RT.emitKernelInit(CGF, EST, /* IsSPMD */ true);
// Skip target region initialization.
RT.setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
}
void Exit(CodeGenFunction &CGF) override {
RT.clearLocThreadIdInsertPt(CGF);
- RT.emitSPMDEntryFooter(CGF, EST);
+ RT.emitKernelDeinit(CGF, EST, /* IsSPMD */ true);
}
- } Action(*this, EST, D);
+ } Action(*this, EST);
CodeGen.setAction(Action);
IsInTTDRegion = true;
- // Reserve place for the globalized memory.
- GlobalizedRecords.emplace_back();
- if (!KernelStaticGlobalized) {
- KernelStaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.VoidPtrTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::UndefValue::get(CGM.VoidPtrTy),
- "_openmp_kernel_static_glob_rd$ptr", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- CGM.getContext().getTargetAddressSpace(LangAS::cuda_shared));
- }
emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
IsOffloadEntry, CodeGen);
IsInTTDRegion = false;
}
-void CGOpenMPRuntimeGPU::emitSPMDEntryHeader(
- CodeGenFunction &CGF, EntryFunctionState &EST,
- const OMPExecutableDirective &D) {
- CGBuilderTy &Bld = CGF.Builder;
-
- // Setup BBs in entry function.
- llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSPMDExecutionMode=*/true),
- /*RequiresOMPRuntime=*/
- Bld.getInt16(RequiresFullRuntime ? 1 : 0)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_spmd_kernel_init),
- Args);
-
- if (RequiresFullRuntime) {
- // For data sharing, we need to initialize the stack.
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_init_stack_spmd));
- }
-
- CGF.EmitBranch(ExecuteBB);
-
- CGF.EmitBlock(ExecuteBB);
-
- IsInTargetMasterThreadRegion = true;
-}
-
-void CGOpenMPRuntimeGPU::emitSPMDEntryFooter(CodeGenFunction &CGF,
- EntryFunctionState &EST) {
- IsInTargetMasterThreadRegion = false;
- if (!CGF.HaveInsertPoint())
- return;
-
- if (!EST.ExitBB)
- EST.ExitBB = CGF.createBasicBlock(".exit");
-
- llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
- CGF.EmitBranch(OMPDeInitBB);
-
- CGF.EmitBlock(OMPDeInitBB);
- // DeInitialize the OMP state in the runtime; called by all active threads.
- llvm::Value *Args[] = {/*RequiresOMPRuntime=*/
- CGF.Builder.getInt16(RequiresFullRuntime ? 1 : 0)};
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_spmd_kernel_deinit_v2),
- Args);
- CGF.EmitBranch(EST.ExitBB);
-
- CGF.EmitBlock(EST.ExitBB);
- EST.ExitBB = nullptr;
-}
-
// Create a unique global variable to indicate the execution mode of this target
// region. The execution mode is either 'generic', or 'spmd' depending on the
// target directive. This variable is picked up by the offload library to setup
@@ -1314,137 +1119,6 @@ static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
CGM.addCompilerUsedGlobal(GVMode);
}
-void CGOpenMPRuntimeGPU::emitWorkerFunction(WorkerFunctionState &WST) {
- ASTContext &Ctx = CGM.getContext();
-
- CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
- CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, WST.CGFI, {},
- WST.Loc, WST.Loc);
- emitWorkerLoop(CGF, WST);
- CGF.FinishFunction();
-}
-
-void CGOpenMPRuntimeGPU::emitWorkerLoop(CodeGenFunction &CGF,
- WorkerFunctionState &WST) {
- //
- // The workers enter this loop and wait for parallel work from the master.
- // When the master encounters a parallel region it sets up the work + variable
- // arguments, and wakes up the workers. The workers first check to see if
- // they are required for the parallel region, i.e., within the # of requested
- // parallel threads. The activated workers load the variable arguments and
- // execute the parallel work.
- //
-
- CGBuilderTy &Bld = CGF.Builder;
-
- llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
- llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
- llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
- llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
- llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
-
- CGF.EmitBranch(AwaitBB);
-
- // Workers wait for work from master.
- CGF.EmitBlock(AwaitBB);
- // Wait for parallel work
- syncCTAThreads(CGF);
-
- Address WorkFn =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
- Address ExecStatus =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
- CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
- CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
-
- // TODO: Optimize runtime initialization and pass in correct value.
- llvm::Value *Args[] = {WorkFn.getPointer()};
- llvm::Value *Ret =
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_kernel_parallel),
- Args);
- Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
-
- // On termination condition (workid == 0), exit loop.
- llvm::Value *WorkID = Bld.CreateLoad(WorkFn);
- llvm::Value *ShouldTerminate = Bld.CreateIsNull(WorkID, "should_terminate");
- Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
-
- // Activate requested workers.
- CGF.EmitBlock(SelectWorkersBB);
- llvm::Value *IsActive =
- Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
- Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
-
- // Signal start of parallel region.
- CGF.EmitBlock(ExecuteBB);
- // Skip initialization.
- setLocThreadIdInsertPt(CGF, /*AtCurrentPoint=*/true);
-
- // Process work items: outlined parallel functions.
- for (llvm::Function *W : Work) {
- // Try to match this outlined function.
- llvm::Value *ID = Bld.CreatePointerBitCastOrAddrSpaceCast(W, CGM.Int8PtrTy);
-
- llvm::Value *WorkFnMatch =
- Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
-
- llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
- llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
- Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
-
- // Execute this outlined function.
- CGF.EmitBlock(ExecuteFNBB);
-
- // Insert call to work function via shared wrapper. The shared
- // wrapper takes two arguments:
- // - the parallelism level;
- // - the thread ID;
- emitCall(CGF, WST.Loc, W,
- {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
-
- // Go to end of parallel region.
- CGF.EmitBranch(TerminateBB);
-
- CGF.EmitBlock(CheckNextBB);
- }
- // Default case: call to outlined function through pointer if the target
- // region makes a declare target call that may contain an orphaned parallel
- // directive.
- auto *ParallelFnTy =
- llvm::FunctionType::get(CGM.VoidTy, {CGM.Int16Ty, CGM.Int32Ty},
- /*isVarArg=*/false);
- llvm::Value *WorkFnCast =
- Bld.CreateBitCast(WorkID, ParallelFnTy->getPointerTo());
- // Insert call to work function via shared wrapper. The shared
- // wrapper takes two arguments:
- // - the parallelism level;
- // - the thread ID;
- emitCall(CGF, WST.Loc, {ParallelFnTy, WorkFnCast},
- {Bld.getInt16(/*ParallelLevel=*/0), getThreadID(CGF, WST.Loc)});
- // Go to end of parallel region.
- CGF.EmitBranch(TerminateBB);
-
- // Signal end of parallel region.
- CGF.EmitBlock(TerminateBB);
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_kernel_end_parallel),
- llvm::None);
- CGF.EmitBranch(BarrierBB);
-
- // All active and inactive workers wait at a barrier after parallel region.
- CGF.EmitBlock(BarrierBB);
- // Barrier after parallel region.
- syncCTAThreads(CGF);
- CGF.EmitBranch(AwaitBB);
-
- // Exit target region.
- CGF.EmitBlock(ExitBB);
- // Skip initialization.
- clearLocThreadIdInsertPt(CGF);
-}
-
void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
llvm::Constant *Addr,
uint64_t Size, int32_t,
@@ -1671,16 +1345,13 @@ llvm::Function *CGOpenMPRuntimeGPU::emitTeamsOutlinedFunction(
static_cast<CGOpenMPRuntimeGPU &>(CGF.CGM.getOpenMPRuntime());
if (GlobalizedRD) {
auto I = Rt.FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
- I->getSecond().GlobalRecord = GlobalizedRD;
I->getSecond().MappedParams =
std::make_unique<CodeGenFunction::OMPMapVars>();
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const auto &Pair : MappedDeclsFields) {
assert(Pair.getFirst()->isCanonicalDecl() &&
"Expected canonical declaration");
- Data.insert(std::make_pair(Pair.getFirst(),
- MappedVarData(Pair.getSecond(),
- /*IsOnePerTeam=*/true)));
+ Data.insert(std::make_pair(Pair.getFirst(), MappedVarData()));
}
}
Rt.emitGenericVarsProlog(CGF, Loc);
@@ -1709,281 +1380,71 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I == FunctionGlobalizedDecls.end())
return;
- if (const RecordDecl *GlobalizedVarsRecord = I->getSecond().GlobalRecord) {
- QualType GlobalRecTy = CGM.getContext().getRecordType(GlobalizedVarsRecord);
- QualType SecGlobalRecTy;
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
+ for (auto &Rec : I->getSecond().LocalVarData) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
+ QualType VarTy = VD->getType();
+
+ // Get the local allocation of a firstprivate variable before sharing
+ llvm::Value *ParValue;
+ if (EscapedParam) {
+ LValue ParLVal =
+ CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
+ ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
+ }
+
+ // Allocate space for the variable to be globalized
+ llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
+ llvm::Instruction *VoidPtr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc_shared),
+ AllocArgs, VD->getName());
+
+ // Cast the void pointer and get the address of the globalized variable.
+ llvm::PointerType *VarPtrTy = CGF.ConvertTypeForMem(VarTy)->getPointerTo();
+ llvm::Value *CastedVoidPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
+ LValue VarAddr = CGF.MakeNaturalAlignAddrLValue(CastedVoidPtr, VarTy);
+ Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ Rec.second.GlobalizedVal = VoidPtr;
+
+ // Assign the local allocation to the newly globalized location.
+ if (EscapedParam) {
+ CGF.EmitStoreOfScalar(ParValue, VarAddr);
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
+ }
+ if (auto *DI = CGF.getDebugInfo())
+ VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
+ }
+ for (const auto *VD : I->getSecond().EscapedVariableLengthDecls) {
+ // Use actual memory size of the VLA object including the padding
// for alignment purposes.
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
- unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(GlobalRecTy).getQuantity();
- GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
-
- llvm::PointerType *GlobalRecPtrTy =
- CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo();
- llvm::Value *GlobalRecCastAddr;
- llvm::Value *IsTTD = nullptr;
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *SPMDBB = CGF.createBasicBlock(".spmd");
- llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
- if (I->getSecond().SecondaryGlobalRecord.hasValue()) {
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *PL = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_parallel_level),
- {RTLoc, ThreadID});
- IsTTD = Bld.CreateIsNull(PL);
- }
- llvm::Value *IsSPMD = Bld.CreateIsNotNull(
- CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
- Bld.CreateCondBr(IsSPMD, SPMDBB, NonSPMDBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(SPMDBB);
- Address RecPtr = Address(llvm::ConstantPointerNull::get(GlobalRecPtrTy),
- CharUnits::fromQuantity(Alignment));
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(NonSPMDBB);
- llvm::Value *Size = llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize);
- if (const RecordDecl *SecGlobalizedVarsRecord =
- I->getSecond().SecondaryGlobalRecord.getValueOr(nullptr)) {
- SecGlobalRecTy =
- CGM.getContext().getRecordType(SecGlobalizedVarsRecord);
-
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
- // for alignment purposes.
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(SecGlobalRecTy).getQuantity();
- unsigned GlobalRecordSize =
- CGM.getContext().getTypeSizeInChars(SecGlobalRecTy).getQuantity();
- GlobalRecordSize = llvm::alignTo(GlobalRecordSize, Alignment);
- Size = Bld.CreateSelect(
- IsTTD, llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize), Size);
- }
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- llvm::Value *GlobalRecordSizeArg[] = {
- Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, GlobalRecPtrTy);
- CGF.EmitBlock(ExitBB);
- auto *Phi = Bld.CreatePHI(GlobalRecPtrTy,
- /*NumReservedValues=*/2, "_select_stack");
- Phi->addIncoming(RecPtr.getPointer(), SPMDBB);
- Phi->addIncoming(GlobalRecCastAddr, NonSPMDBB);
- GlobalRecCastAddr = Phi;
- I->getSecond().GlobalRecordAddr = Phi;
- I->getSecond().IsInSPMDModeFlag = IsSPMD;
- } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
- assert(GlobalizedRecords.back().Records.size() < 2 &&
- "Expected less than 2 globalized records: one for target and one "
- "for teams.");
- unsigned Offset = 0;
- for (const RecordDecl *RD : GlobalizedRecords.back().Records) {
- QualType RDTy = CGM.getContext().getRecordType(RD);
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(RDTy).getQuantity();
- unsigned Size = CGM.getContext().getTypeSizeInChars(RDTy).getQuantity();
- Offset =
- llvm::alignTo(llvm::alignTo(Offset, Alignment) + Size, Alignment);
- }
- unsigned Alignment =
- CGM.getContext().getTypeAlignInChars(GlobalRecTy).getQuantity();
- Offset = llvm::alignTo(Offset, Alignment);
- GlobalizedRecords.back().Records.push_back(GlobalizedVarsRecord);
- ++GlobalizedRecords.back().RegionCounter;
- if (GlobalizedRecords.back().Records.size() == 1) {
- assert(KernelStaticGlobalized &&
- "Kernel static pointer must be initialized already.");
- auto *UseSharedMemory = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int16Ty, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_static_kernel$is_shared");
- UseSharedMemory->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
- /*DestWidth=*/16, /*Signed=*/0);
- llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
- Address(UseSharedMemory,
- CGM.getContext().getTypeAlignInChars(Int16Ty)),
- /*Volatile=*/false, Int16Ty, Loc);
- auto *StaticGlobalized = new llvm::GlobalVariable(
- CGM.getModule(), CGM.Int8Ty, /*isConstant=*/false,
- llvm::GlobalValue::CommonLinkage, nullptr);
- auto *RecSize = new llvm::GlobalVariable(
- CGM.getModule(), CGM.SizeTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, nullptr,
- "_openmp_static_kernel$size");
- RecSize->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- llvm::Value *Ld = CGF.EmitLoadOfScalar(
- Address(RecSize, CGM.getSizeAlign()), /*Volatile=*/false,
- CGM.getContext().getSizeType(), Loc);
- llvm::Value *ResAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- KernelStaticGlobalized, CGM.VoidPtrPtrTy);
- llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(
- CGM.Int16Ty,
- getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
- StaticGlobalized, Ld, IsInSharedMemory, ResAddr};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_get_team_static_memory),
- GlobalRecordSizeArg);
- GlobalizedRecords.back().Buffer = StaticGlobalized;
- GlobalizedRecords.back().RecSize = RecSize;
- GlobalizedRecords.back().UseSharedMemory = UseSharedMemory;
- GlobalizedRecords.back().Loc = Loc;
- }
- assert(KernelStaticGlobalized && "Global address must be set already.");
- Address FrameAddr = CGF.EmitLoadOfPointer(
- Address(KernelStaticGlobalized, CGM.getPointerAlign()),
- CGM.getContext()
- .getPointerType(CGM.getContext().VoidPtrTy)
- .castAs<PointerType>());
- llvm::Value *GlobalRecValue =
- Bld.CreateConstInBoundsGEP(FrameAddr, Offset).getPointer();
- I->getSecond().GlobalRecordAddr = GlobalRecValue;
- I->getSecond().IsInSPMDModeFlag = nullptr;
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(GlobalRecTy)->getPointerTo());
- } else {
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- bool UseSharedMemory =
- IsInTTDRegion && GlobalRecordSize <= SharedMemorySize;
- llvm::Value *GlobalRecordSizeArg[] = {
- llvm::ConstantInt::get(CGM.SizeTy, GlobalRecordSize),
- CGF.Builder.getInt16(UseSharedMemory ? 1 : 0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(),
- IsInTTDRegion ? OMPRTL___kmpc_data_sharing_push_stack
- : OMPRTL___kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, GlobalRecPtrTy);
- I->getSecond().GlobalRecordAddr = GlobalRecValue;
- I->getSecond().IsInSPMDModeFlag = nullptr;
- }
- LValue Base =
- CGF.MakeNaturalAlignPointeeAddrLValue(GlobalRecCastAddr, GlobalRecTy);
-
- // Emit the "global alloca" which is a GEP from the global declaration
- // record using the pointer returned by the runtime.
- LValue SecBase;
- decltype(I->getSecond().LocalVarData)::const_iterator SecIt;
- if (IsTTD) {
- SecIt = I->getSecond().SecondaryLocalVarData->begin();
- llvm::PointerType *SecGlobalRecPtrTy =
- CGF.ConvertTypeForMem(SecGlobalRecTy)->getPointerTo();
- SecBase = CGF.MakeNaturalAlignPointeeAddrLValue(
- Bld.CreatePointerBitCastOrAddrSpaceCast(
- I->getSecond().GlobalRecordAddr, SecGlobalRecPtrTy),
- SecGlobalRecTy);
- }
- for (auto &Rec : I->getSecond().LocalVarData) {
- bool EscapedParam = I->getSecond().EscapedParameters.count(Rec.first);
- llvm::Value *ParValue;
- if (EscapedParam) {
- const auto *VD = cast<VarDecl>(Rec.first);
- LValue ParLVal =
- CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
- ParValue = CGF.EmitLoadOfScalar(ParLVal, Loc);
- }
- LValue VarAddr = CGF.EmitLValueForField(Base, Rec.second.FD);
- // Emit VarAddr basing on lane-id if required.
- QualType VarTy;
- if (Rec.second.IsOnePerTeam) {
- VarTy = Rec.second.FD->getType();
- } else {
- llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
- VarAddr.getAddress(CGF).getPointer(),
- {Bld.getInt32(0), getNVPTXLaneID(CGF)});
- VarTy =
- Rec.second.FD->getType()->castAsArrayTypeUnsafe()->getElementType();
- VarAddr = CGF.MakeAddrLValue(
- Address(Ptr, CGM.getContext().getDeclAlign(Rec.first)), VarTy,
- AlignmentSource::Decl);
- }
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
- assert(I->getSecond().IsInSPMDModeFlag &&
- "Expected unknown execution mode or required SPMD check.");
- if (IsTTD) {
- assert(SecIt->second.IsOnePerTeam &&
- "Secondary glob data must be one per team.");
- LValue SecVarAddr = CGF.EmitLValueForField(SecBase, SecIt->second.FD);
- VarAddr.setAddress(
- Address(Bld.CreateSelect(IsTTD, SecVarAddr.getPointer(CGF),
- VarAddr.getPointer(CGF)),
- VarAddr.getAlignment()));
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
- }
- Address GlobalPtr = Rec.second.PrivateAddr;
- Address LocalAddr = CGF.CreateMemTemp(VarTy, Rec.second.FD->getName());
- Rec.second.PrivateAddr = Address(
- Bld.CreateSelect(I->getSecond().IsInSPMDModeFlag,
- LocalAddr.getPointer(), GlobalPtr.getPointer()),
- LocalAddr.getAlignment());
- }
- if (EscapedParam) {
- const auto *VD = cast<VarDecl>(Rec.first);
- CGF.EmitStoreOfScalar(ParValue, VarAddr);
- I->getSecond().MappedParams->setVarAddr(CGF, VD,
- VarAddr.getAddress(CGF));
- }
- if (IsTTD)
- ++SecIt;
- }
- }
- for (const ValueDecl *VD : I->getSecond().EscapedVariableLengthDecls) {
- // Recover pointer to this function's global record. The runtime will
- // handle the specifics of the allocation of the memory.
- // Use actual memory size of the record including the padding
- // for alignment purposes.
- CGBuilderTy &Bld = CGF.Builder;
llvm::Value *Size = CGF.getTypeSize(VD->getType());
CharUnits Align = CGM.getContext().getDeclAlign(VD);
Size = Bld.CreateNUWAdd(
Size, llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity() - 1));
llvm::Value *AlignVal =
llvm::ConstantInt::get(CGF.SizeTy, Align.getQuantity());
+
Size = Bld.CreateUDiv(Size, AlignVal);
Size = Bld.CreateNUWMul(Size, AlignVal);
- // TODO: allow the usage of shared memory to be controlled by
- // the user, for now, default to global.
- llvm::Value *GlobalRecordSizeArg[] = {
- Size, CGF.Builder.getInt16(/*UseSharedMemory=*/0)};
- llvm::Value *GlobalRecValue = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_coalesced_push_stack),
- GlobalRecordSizeArg);
- llvm::Value *GlobalRecCastAddr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- GlobalRecValue, CGF.ConvertTypeForMem(VD->getType())->getPointerTo());
- LValue Base = CGF.MakeAddrLValue(GlobalRecCastAddr, VD->getType(),
+
+ // Allocate space for this VLA object to be globalized.
+ llvm::Value *AllocArgs[] = {CGF.getTypeSize(VD->getType())};
+ llvm::Instruction *VoidPtr =
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_alloc_shared),
+ AllocArgs, VD->getName());
+
+ I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(
+ std::pair<llvm::Value *, llvm::Value *>(
+ {VoidPtr, CGF.getTypeSize(VD->getType())}));
+ LValue Base = CGF.MakeAddrLValue(VoidPtr, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
I->getSecond().MappedParams->setVarAddr(CGF, cast<VarDecl>(VD),
Base.getAddress(CGF));
- I->getSecond().EscapedVariableLengthDeclsAddrs.emplace_back(GlobalRecValue);
}
I->getSecond().MappedParams->apply(CGF);
}
@@ -1996,60 +1457,23 @@ void CGOpenMPRuntimeGPU::emitGenericVarsEpilog(CodeGenFunction &CGF,
const auto I = FunctionGlobalizedDecls.find(CGF.CurFn);
if (I != FunctionGlobalizedDecls.end()) {
- I->getSecond().MappedParams->restore(CGF);
- if (!CGF.HaveInsertPoint())
- return;
- for (llvm::Value *Addr :
+ // Deallocate the memory for each globalized VLA object
+ for (auto AddrSizePair :
llvm::reverse(I->getSecond().EscapedVariableLengthDeclsAddrs)) {
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
- Addr);
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free_shared),
+ {AddrSizePair.first, AddrSizePair.second});
}
- if (I->getSecond().GlobalRecordAddr) {
- if (!IsInTTDRegion &&
- (WithSPMDCheck ||
- getExecutionMode() == CGOpenMPRuntimeGPU::EM_Unknown)) {
- CGBuilderTy &Bld = CGF.Builder;
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *NonSPMDBB = CGF.createBasicBlock(".non-spmd");
- Bld.CreateCondBr(I->getSecond().IsInSPMDModeFlag, ExitBB, NonSPMDBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(NonSPMDBB);
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
- CGF.EmitCastToVoidPtr(I->getSecond().GlobalRecordAddr));
- CGF.EmitBlock(ExitBB);
- } else if (!CGM.getLangOpts().OpenMPCUDATargetParallel && IsInTTDRegion) {
- assert(GlobalizedRecords.back().RegionCounter > 0 &&
- "region counter must be > 0.");
- --GlobalizedRecords.back().RegionCounter;
- // Emit the restore function only in the target region.
- if (GlobalizedRecords.back().RegionCounter == 0) {
- QualType Int16Ty = CGM.getContext().getIntTypeForBitwidth(
- /*DestWidth=*/16, /*Signed=*/0);
- llvm::Value *IsInSharedMemory = CGF.EmitLoadOfScalar(
- Address(GlobalizedRecords.back().UseSharedMemory,
- CGM.getContext().getTypeAlignInChars(Int16Ty)),
- /*Volatile=*/false, Int16Ty, GlobalizedRecords.back().Loc);
- llvm::Value *Args[] = {
- llvm::ConstantInt::get(
- CGM.Int16Ty,
- getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD ? 1 : 0),
- IsInSharedMemory};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_restore_team_static_memory),
- Args);
- }
- } else {
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_data_sharing_pop_stack),
- I->getSecond().GlobalRecordAddr);
- }
+ // Deallocate the memory for each globalized value
+ for (auto &Rec : llvm::reverse(I->getSecond().LocalVarData)) {
+ const auto *VD = cast<VarDecl>(Rec.first);
+ I->getSecond().MappedParams->restore(CGF);
+
+ llvm::Value *FreeArgs[] = {Rec.second.GlobalizedVal,
+ CGF.getTypeSize(VD->getType())};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_free_shared),
+ FreeArgs);
}
}
}
@@ -2072,113 +1496,38 @@ void CGOpenMPRuntimeGPU::emitTeamsCall(CodeGenFunction &CGF,
emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
}
-void CGOpenMPRuntimeGPU::emitParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
+void CGOpenMPRuntimeGPU::emitParallelCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Function *OutlinedFn,
+ ArrayRef<llvm::Value *> CapturedVars,
+ const Expr *IfCond) {
if (!CGF.HaveInsertPoint())
return;
- if (getExecutionMode() == CGOpenMPRuntimeGPU::EM_SPMD)
- emitSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
- else
- emitNonSPMDParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
-}
-
-void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
- llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
-
- // Force inline this outlined function at its call site.
- Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
-
- // Ensure we do not inline the function. This is trivially true for the ones
- // passed to __kmpc_fork_call but the ones calles in serialized regions
- // could be inlined. This is not a perfect but it is closer to the invariant
- // we want, namely, every data environment starts with a new function.
- // TODO: We should pass the if condition to the runtime function and do the
- // handling there. Much cleaner code.
- cast<llvm::Function>(OutlinedFn)->addFnAttr(llvm::Attribute::NoInline);
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- // ThreadId for serialized parallels is 0.
- Address ThreadIDAddr = ZeroAddr;
- auto &&CodeGen = [this, Fn, CapturedVars, Loc, &ThreadIDAddr](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
-
- Address ZeroAddr =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".bound.zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
- };
- auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
-
- RegionCodeGenTy RCG(CodeGen);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *Args[] = {RTLoc, ThreadID};
-
- NVPTXActionTy Action(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
- Args);
- RCG.setAction(Action);
- RCG(CGF);
- };
-
- auto &&L0ParallelGen = [this, CapturedVars, Fn](CodeGenFunction &CGF,
- PrePostActionTy &Action) {
+ auto &&ParallelGen = [this, Loc, OutlinedFn, CapturedVars,
+ IfCond](CodeGenFunction &CGF, PrePostActionTy &Action) {
CGBuilderTy &Bld = CGF.Builder;
- llvm::Function *WFn = WrapperFunctionsMap[Fn];
- assert(WFn && "Wrapper function does not exist!");
- llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
-
- // Prepare for parallel region. Indicate the outlined function.
- llvm::Value *Args[] = {ID};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_kernel_prepare_parallel),
- Args);
+ llvm::Function *WFn = WrapperFunctionsMap[OutlinedFn];
+ llvm::Value *ID = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ if (WFn)
+ ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
+ llvm::Value *FnPtr = Bld.CreateBitOrPointerCast(OutlinedFn, CGM.Int8PtrTy);
// Create a private scope that will globalize the arguments
// passed from the outside of the target region.
+ // TODO: Is that needed?
CodeGenFunction::OMPPrivateScope PrivateArgScope(CGF);
+ Address CapturedVarsAddrs = CGF.CreateDefaultAlignTempAlloca(
+ llvm::ArrayType::get(CGM.VoidPtrTy, CapturedVars.size()),
+ "captured_vars_addrs");
// There's something to share.
if (!CapturedVars.empty()) {
// Prepare for parallel region. Indicate the outlined function.
- Address SharedArgs =
- CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy, "shared_arg_refs");
- llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
-
- llvm::Value *DataSharingArgs[] = {
- SharedArgsPtr,
- llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
- CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_begin_sharing_variables),
- DataSharingArgs);
-
- // Store variable address in a list of references to pass to workers.
- unsigned Idx = 0;
ASTContext &Ctx = CGF.getContext();
- Address SharedArgListAddress = CGF.EmitLoadOfPointer(
- SharedArgs, Ctx.getPointerType(Ctx.getPointerType(Ctx.VoidPtrTy))
- .castAs<PointerType>());
+ unsigned Idx = 0;
for (llvm::Value *V : CapturedVars) {
- Address Dst = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
+ Address Dst = Bld.CreateConstArrayGEP(CapturedVarsAddrs, Idx);
llvm::Value *PtrV;
if (V->getType()->isIntegerTy())
PtrV = Bld.CreateIntToPtr(V, CGF.VoidPtrTy);
@@ -2190,141 +1539,33 @@ void CGOpenMPRuntimeGPU::emitNonSPMDParallelCall(
}
}
- // Activate workers. This barrier is used by the master to signal
- // work for the workers.
- syncCTAThreads(CGF);
-
- // OpenMP [2.5, Parallel Construct, p.49]
- // There is an implied barrier at the end of a parallel region. After the
- // end of a parallel region, only the master thread of the team resumes
- // execution of the enclosing task region.
- //
- // The master waits at this barrier until all workers are done.
- syncCTAThreads(CGF);
-
- if (!CapturedVars.empty())
- CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_sharing_variables));
-
- // Remember for post-processing in worker loop.
- Work.emplace_back(WFn);
- };
-
- auto &&LNParallelGen = [this, Loc, &SeqGen, &L0ParallelGen](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- if (IsInParallelRegion) {
- SeqGen(CGF, Action);
- } else if (IsInTargetMasterThreadRegion) {
- L0ParallelGen(CGF, Action);
- } else {
- // Check for master and then parallelism:
- // if (__kmpc_is_spmd_exec_mode() || __kmpc_parallel_level(loc, gtid)) {
- // Serialized execution.
- // } else {
- // Worker call.
- // }
- CGBuilderTy &Bld = CGF.Builder;
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
- llvm::BasicBlock *SeqBB = CGF.createBasicBlock(".sequential");
- llvm::BasicBlock *ParallelCheckBB = CGF.createBasicBlock(".parcheck");
- llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
- llvm::Value *IsSPMD = Bld.CreateIsNotNull(
- CGF.EmitNounwindRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_is_spmd_exec_mode)));
- Bld.CreateCondBr(IsSPMD, SeqBB, ParallelCheckBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(ParallelCheckBB);
- llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *PL = CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(CGM.getModule(),
- OMPRTL___kmpc_parallel_level),
- {RTLoc, ThreadID});
- llvm::Value *Res = Bld.CreateIsNotNull(PL);
- Bld.CreateCondBr(Res, SeqBB, MasterBB);
- CGF.EmitBlock(SeqBB);
- SeqGen(CGF, Action);
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- CGF.EmitBlock(MasterBB);
- L0ParallelGen(CGF, Action);
- CGF.EmitBranch(ExitBB);
- // There is no need to emit line number for unconditional branch.
- (void)ApplyDebugLocation::CreateEmpty(CGF);
- // Emit the continuation block for code after the if.
- CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
- }
- };
-
- if (IfCond) {
- emitIfClause(CGF, IfCond, LNParallelGen, SeqGen);
- } else {
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- RegionCodeGenTy ThenRCG(LNParallelGen);
- ThenRCG(CGF);
- }
-}
-
-void CGOpenMPRuntimeGPU::emitSPMDParallelCall(
- CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn,
- ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
- // Just call the outlined function to execute the parallel region.
- // OutlinedFn(&GTid, &zero, CapturedStruct);
- //
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
-
- Address ZeroAddr = CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- // ThreadId for serialized parallels is 0.
- Address ThreadIDAddr = ZeroAddr;
- auto &&CodeGen = [this, OutlinedFn, CapturedVars, Loc, &ThreadIDAddr](
- CodeGenFunction &CGF, PrePostActionTy &Action) {
- Action.Enter(CGF);
-
- Address ZeroAddr =
- CGF.CreateDefaultAlignTempAlloca(CGF.Int32Ty,
- /*Name=*/".bound.zero.addr");
- CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
- llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
- OutlinedFnArgs.push_back(ThreadIDAddr.getPointer());
- OutlinedFnArgs.push_back(ZeroAddr.getPointer());
- OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
- emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
- };
- auto &&SeqGen = [this, &CodeGen, Loc](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ llvm::Value *IfCondVal = nullptr;
+ if (IfCond)
+ IfCondVal = Bld.CreateIntCast(CGF.EvaluateExprAsBool(IfCond), CGF.Int32Ty,
+ /* isSigned */ false);
+ else
+ IfCondVal = llvm::ConstantInt::get(CGF.Int32Ty, 1);
- RegionCodeGenTy RCG(CodeGen);
+ assert(IfCondVal && "Expected a value");
llvm::Value *RTLoc = emitUpdateLocation(CGF, Loc);
- llvm::Value *ThreadID = getThreadID(CGF, Loc);
- llvm::Value *Args[] = {RTLoc, ThreadID};
-
- NVPTXActionTy Action(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_serialized_parallel),
- Args,
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_end_serialized_parallel),
- Args);
- RCG.setAction(Action);
- RCG(CGF);
+ llvm::Value *Args[] = {
+ RTLoc,
+ getThreadID(CGF, Loc),
+ IfCondVal,
+ llvm::ConstantInt::get(CGF.Int32Ty, -1),
+ llvm::ConstantInt::get(CGF.Int32Ty, -1),
+ FnPtr,
+ ID,
+ Bld.CreateBitOrPointerCast(CapturedVarsAddrs.getPointer(),
+ CGF.VoidPtrPtrTy),
+ llvm::ConstantInt::get(CGM.SizeTy, CapturedVars.size())};
+ CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_parallel_51),
+ Args);
};
- if (IsInTargetMasterThreadRegion) {
- // In the worker need to use the real thread id.
- ThreadIDAddr = emitThreadIDAddress(CGF, Loc);
- RegionCodeGenTy RCG(CodeGen);
- RCG(CGF);
- } else {
- // If we are not in the target region, it is definitely L2 parallelism or
- // more, because for SPMD mode we always has L1 parallel level, sowe don't
- // need to check for orphaned directives.
- RegionCodeGenTy RCG(SeqGen);
- RCG(CGF);
- }
+ RegionCodeGenTy RCG(ParallelGen);
+ RCG(CGF);
}
void CGOpenMPRuntimeGPU::syncCTAThreads(CodeGenFunction &CGF) {
@@ -2936,14 +2177,16 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address ElemPtr = Address(ElemPtrPtr, Align);
ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
if (NumIters > 1) {
- ElemPtr = Address(Bld.CreateGEP(ElemPtr.getPointer(), Cnt),
+ ElemPtr = Address(Bld.CreateGEP(ElemPtr.getElementType(),
+ ElemPtr.getPointer(), Cnt),
ElemPtr.getAlignment());
}
// Get pointer to location in transfer medium.
// MediumPtr = &medium[warp_id]
llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
+ TransferMedium->getValueType(), TransferMedium,
+ {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
Address MediumPtr(MediumPtrVal, Align);
// Casting to actual data type.
// MediumPtr = (CopyType*)MediumPtrAddr;
@@ -2991,7 +2234,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
// SrcMediumPtr = &medium[tid]
llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
- TransferMedium,
+ TransferMedium->getValueType(), TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
Address SrcMediumPtr(SrcMediumPtrVal, Align);
// SrcMediumVal = *SrcMediumPtr;
@@ -3004,7 +2247,8 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
Address TargetElemPtr = Address(TargetElemPtrVal, Align);
TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
if (NumIters > 1) {
- TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getPointer(), Cnt),
+ TargetElemPtr = Address(Bld.CreateGEP(TargetElemPtr.getElementType(),
+ TargetElemPtr.getPointer(), Cnt),
TargetElemPtr.getAlignment());
}
@@ -3327,9 +2571,10 @@ static llvm::Value *emitListToGlobalCopyFunction(
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ Address GlobAddr = GlobLVal.getAddress(CGF);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
+ GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(
@@ -3426,8 +2671,9 @@ static llvm::Value *emitListToGlobalReduceFunction(
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ Address GlobAddr = GlobLVal.getAddress(CGF);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
+ GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -3531,9 +2777,10 @@ static llvm::Value *emitGlobalToListCopyFunction(
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobLVal.getAlignment()));
+ Address GlobAddr = GlobLVal.getAddress(CGF);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
+ GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
@@ -3630,8 +2877,9 @@ static llvm::Value *emitGlobalToListReduceFunction(
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
- llvm::Value *BufferPtr =
- Bld.CreateInBoundsGEP(GlobLVal.getPointer(CGF), Idxs);
+ Address GlobAddr = GlobLVal.getAddress(CGF);
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
+ GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
llvm::Value *Ptr = CGF.EmitCastToVoidPtr(BufferPtr);
CGF.EmitStoreOfScalar(Ptr, Elem, /*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -4100,10 +3348,6 @@ CGOpenMPRuntimeGPU::translateParameter(const FieldDecl *FD,
if (Attr->getCaptureKind() == OMPC_map) {
PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
LangAS::opencl_global);
- } else if (Attr->getCaptureKind() == OMPC_firstprivate &&
- PointeeTy.isConstant(CGM.getContext())) {
- PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
- LangAS::opencl_generic);
}
}
ArgType = CGM.getContext().getPointerType(PointeeTy);
@@ -4210,6 +3454,15 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
auto *Fn = llvm::Function::Create(
CGM.getTypes().GetFunctionType(CGFI), llvm::GlobalValue::InternalLinkage,
Twine(OutlinedParallelFn->getName(), "_wrapper"), &CGM.getModule());
+
+ // Ensure we do not inline the function. This is trivially true for the ones
+ // passed to __kmpc_fork_call but the ones calles in serialized regions
+ // could be inlined. This is not a perfect but it is closer to the invariant
+ // we want, namely, every data environment starts with a new function.
+ // TODO: We should pass the if condition to the runtime function and do the
+ // handling there. Much cleaner code.
+ Fn->addFnAttr(llvm::Attribute::NoInline);
+
CGM.SetInternalFunctionAttributes(GlobalDecl(), Fn, CGFI);
Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
Fn->setDoesNotRecurse();
@@ -4340,7 +3593,6 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
auto I = FunctionGlobalizedDecls.try_emplace(CGF.CurFn).first;
I->getSecond().MappedParams =
std::make_unique<CodeGenFunction::OMPMapVars>();
- I->getSecond().GlobalRecord = GlobalizedVarsRecord;
I->getSecond().EscapedParameters.insert(
VarChecker.getEscapedParameters().begin(),
VarChecker.getEscapedParameters().end());
@@ -4349,21 +3601,16 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
DeclToAddrMapTy &Data = I->getSecond().LocalVarData;
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
- const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(std::make_pair(VD, MappedVarData(FD, IsInTTDRegion)));
+ Data.insert(std::make_pair(VD, MappedVarData()));
}
if (!IsInTTDRegion && !NeedToDelayGlobalization && !IsInParallelRegion) {
CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
VarChecker.Visit(Body);
- I->getSecond().SecondaryGlobalRecord =
- VarChecker.getGlobalizedRecord(/*IsInTTDRegion=*/true);
I->getSecond().SecondaryLocalVarData.emplace();
DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
- const FieldDecl *FD = VarChecker.getFieldForGlobalizedVar(VD);
- Data.insert(
- std::make_pair(VD, MappedVarData(FD, /*IsInTTDRegion=*/true)));
+ Data.insert(std::make_pair(VD, MappedVarData()));
}
}
if (!NeedToDelayGlobalization) {
@@ -4535,10 +3782,6 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
}
}
-unsigned CGOpenMPRuntimeGPU::getDefaultFirstprivateAddressSpace() const {
- return CGM.getContext().getTargetAddressSpace(LangAS::cuda_constant);
-}
-
bool CGOpenMPRuntimeGPU::hasAllocateAttributeForGlobalVar(const VarDecl *VD,
LangAS &AS) {
if (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())
@@ -4598,10 +3841,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::SM_37:
case CudaArch::SM_50:
case CudaArch::SM_52:
- case CudaArch::SM_53:
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62: {
+ case CudaArch::SM_53: {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << "Target architecture " << CudaArchToString(Arch)
@@ -4609,10 +3849,14 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
CGM.Error(Clause->getBeginLoc(), Out.str());
return;
}
+ case CudaArch::SM_60:
+ case CudaArch::SM_61:
+ case CudaArch::SM_62:
case CudaArch::SM_70:
case CudaArch::SM_72:
case CudaArch::SM_75:
case CudaArch::SM_80:
+ case CudaArch::SM_86:
case CudaArch::GFX600:
case CudaArch::GFX601:
case CudaArch::GFX602:
@@ -4633,14 +3877,18 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX906:
case CudaArch::GFX908:
case CudaArch::GFX909:
+ case CudaArch::GFX90a:
case CudaArch::GFX90c:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
+ case CudaArch::GFX1013:
case CudaArch::GFX1030:
case CudaArch::GFX1031:
case CudaArch::GFX1032:
case CudaArch::GFX1033:
+ case CudaArch::GFX1034:
+ case CudaArch::GFX1035:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
break;
@@ -4652,183 +3900,8 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
CGOpenMPRuntime::processRequiresDirective(D);
}
-/// Get number of SMs and number of blocks per SM.
-static std::pair<unsigned, unsigned> getSMsBlocksPerSM(CodeGenModule &CGM) {
- std::pair<unsigned, unsigned> Data;
- if (CGM.getLangOpts().OpenMPCUDANumSMs)
- Data.first = CGM.getLangOpts().OpenMPCUDANumSMs;
- if (CGM.getLangOpts().OpenMPCUDABlocksPerSM)
- Data.second = CGM.getLangOpts().OpenMPCUDABlocksPerSM;
- if (Data.first && Data.second)
- return Data;
- switch (getCudaArch(CGM)) {
- case CudaArch::SM_20:
- case CudaArch::SM_21:
- case CudaArch::SM_30:
- case CudaArch::SM_32:
- case CudaArch::SM_35:
- case CudaArch::SM_37:
- case CudaArch::SM_50:
- case CudaArch::SM_52:
- case CudaArch::SM_53:
- return {16, 16};
- case CudaArch::SM_60:
- case CudaArch::SM_61:
- case CudaArch::SM_62:
- return {56, 32};
- case CudaArch::SM_70:
- case CudaArch::SM_72:
- case CudaArch::SM_75:
- case CudaArch::SM_80:
- return {84, 32};
- case CudaArch::GFX600:
- case CudaArch::GFX601:
- case CudaArch::GFX602:
- case CudaArch::GFX700:
- case CudaArch::GFX701:
- case CudaArch::GFX702:
- case CudaArch::GFX703:
- case CudaArch::GFX704:
- case CudaArch::GFX705:
- case CudaArch::GFX801:
- case CudaArch::GFX802:
- case CudaArch::GFX803:
- case CudaArch::GFX805:
- case CudaArch::GFX810:
- case CudaArch::GFX900:
- case CudaArch::GFX902:
- case CudaArch::GFX904:
- case CudaArch::GFX906:
- case CudaArch::GFX908:
- case CudaArch::GFX909:
- case CudaArch::GFX90c:
- case CudaArch::GFX1010:
- case CudaArch::GFX1011:
- case CudaArch::GFX1012:
- case CudaArch::GFX1030:
- case CudaArch::GFX1031:
- case CudaArch::GFX1032:
- case CudaArch::GFX1033:
- case CudaArch::UNUSED:
- case CudaArch::UNKNOWN:
- break;
- case CudaArch::LAST:
- llvm_unreachable("Unexpected Cuda arch.");
- }
- llvm_unreachable("Unexpected NVPTX target without ptx feature.");
-}
-
void CGOpenMPRuntimeGPU::clear() {
- if (!GlobalizedRecords.empty() &&
- !CGM.getLangOpts().OpenMPCUDATargetParallel) {
- ASTContext &C = CGM.getContext();
- llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> GlobalRecs;
- llvm::SmallVector<const GlobalPtrSizeRecsTy *, 4> SharedRecs;
- RecordDecl *StaticRD = C.buildImplicitRecord(
- "_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
- StaticRD->startDefinition();
- RecordDecl *SharedStaticRD = C.buildImplicitRecord(
- "_shared_openmp_static_memory_type_$_", RecordDecl::TagKind::TTK_Union);
- SharedStaticRD->startDefinition();
- for (const GlobalPtrSizeRecsTy &Records : GlobalizedRecords) {
- if (Records.Records.empty())
- continue;
- unsigned Size = 0;
- unsigned RecAlignment = 0;
- for (const RecordDecl *RD : Records.Records) {
- QualType RDTy = C.getRecordType(RD);
- unsigned Alignment = C.getTypeAlignInChars(RDTy).getQuantity();
- RecAlignment = std::max(RecAlignment, Alignment);
- unsigned RecSize = C.getTypeSizeInChars(RDTy).getQuantity();
- Size =
- llvm::alignTo(llvm::alignTo(Size, Alignment) + RecSize, Alignment);
- }
- Size = llvm::alignTo(Size, RecAlignment);
- llvm::APInt ArySize(/*numBits=*/64, Size);
- QualType SubTy = C.getConstantArrayType(
- C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- const bool UseSharedMemory = Size <= SharedMemorySize;
- auto *Field =
- FieldDecl::Create(C, UseSharedMemory ? SharedStaticRD : StaticRD,
- SourceLocation(), SourceLocation(), nullptr, SubTy,
- C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- if (UseSharedMemory) {
- SharedStaticRD->addDecl(Field);
- SharedRecs.push_back(&Records);
- } else {
- StaticRD->addDecl(Field);
- GlobalRecs.push_back(&Records);
- }
- Records.RecSize->setInitializer(llvm::ConstantInt::get(CGM.SizeTy, Size));
- Records.UseSharedMemory->setInitializer(
- llvm::ConstantInt::get(CGM.Int16Ty, UseSharedMemory ? 1 : 0));
- }
- // Allocate SharedMemorySize buffer for the shared memory.
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore this code as sson as nvlink is fixed.
- if (!SharedStaticRD->field_empty()) {
- llvm::APInt ArySize(/*numBits=*/64, SharedMemorySize);
- QualType SubTy = C.getConstantArrayType(
- C.CharTy, ArySize, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0);
- auto *Field = FieldDecl::Create(
- C, SharedStaticRD, SourceLocation(), SourceLocation(), nullptr, SubTy,
- C.getTrivialTypeSourceInfo(SubTy, SourceLocation()),
- /*BW=*/nullptr, /*Mutable=*/false,
- /*InitStyle=*/ICIS_NoInit);
- Field->setAccess(AS_public);
- SharedStaticRD->addDecl(Field);
- }
- SharedStaticRD->completeDefinition();
- if (!SharedStaticRD->field_empty()) {
- QualType StaticTy = C.getRecordType(SharedStaticRD);
- llvm::Type *LLVMStaticTy = CGM.getTypes().ConvertTypeForMem(StaticTy);
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMStaticTy,
- /*isConstant=*/false, llvm::GlobalValue::WeakAnyLinkage,
- llvm::UndefValue::get(LLVMStaticTy),
- "_openmp_shared_static_glob_rd_$_", /*InsertBefore=*/nullptr,
- llvm::GlobalValue::NotThreadLocal,
- C.getTargetAddressSpace(LangAS::cuda_shared));
- auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- GV, CGM.VoidPtrTy);
- for (const GlobalPtrSizeRecsTy *Rec : SharedRecs) {
- Rec->Buffer->replaceAllUsesWith(Replacement);
- Rec->Buffer->eraseFromParent();
- }
- }
- StaticRD->completeDefinition();
- if (!StaticRD->field_empty()) {
- QualType StaticTy = C.getRecordType(StaticRD);
- std::pair<unsigned, unsigned> SMsBlockPerSM = getSMsBlocksPerSM(CGM);
- llvm::APInt Size1(32, SMsBlockPerSM.second);
- QualType Arr1Ty =
- C.getConstantArrayType(StaticTy, Size1, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- llvm::APInt Size2(32, SMsBlockPerSM.first);
- QualType Arr2Ty =
- C.getConstantArrayType(Arr1Ty, Size2, nullptr, ArrayType::Normal,
- /*IndexTypeQuals=*/0);
- llvm::Type *LLVMArr2Ty = CGM.getTypes().ConvertTypeForMem(Arr2Ty);
- // FIXME: nvlink does not handle weak linkage correctly (object with the
- // different size are reported as erroneous).
- // Restore CommonLinkage as soon as nvlink is fixed.
- auto *GV = new llvm::GlobalVariable(
- CGM.getModule(), LLVMArr2Ty,
- /*isConstant=*/false, llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(LLVMArr2Ty),
- "_openmp_static_glob_rd_$_");
- auto *Replacement = llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
- GV, CGM.VoidPtrTy);
- for (const GlobalPtrSizeRecsTy *Rec : GlobalRecs) {
- Rec->Buffer->replaceAllUsesWith(Replacement);
- Rec->Buffer->eraseFromParent();
- }
- }
- }
+
if (!TeamsReductions.empty()) {
ASTContext &C = CGM.getContext();
RecordDecl *StaticRD = C.buildImplicitRecord(
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
index 7267511ca672..b5f1b843c46b 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.h
@@ -38,19 +38,7 @@ private:
llvm::SmallVector<llvm::Function *, 16> Work;
struct EntryFunctionState {
- llvm::BasicBlock *ExitBB = nullptr;
- };
-
- class WorkerFunctionState {
- public:
- llvm::Function *WorkerFn;
- const CGFunctionInfo &CGFI;
SourceLocation Loc;
-
- WorkerFunctionState(CodeGenModule &CGM, SourceLocation Loc);
-
- private:
- void createWorkerFunction(CodeGenModule &CGM);
};
ExecutionMode getExecutionMode() const;
@@ -60,20 +48,13 @@ private:
/// Get barrier to synchronize all threads in a block.
void syncCTAThreads(CodeGenFunction &CGF);
- /// Emit the worker function for the current target region.
- void emitWorkerFunction(WorkerFunctionState &WST);
-
- /// Helper for worker function. Emit body of worker loop.
- void emitWorkerLoop(CodeGenFunction &CGF, WorkerFunctionState &WST);
-
- /// Helper for non-SPMD target entry function. Guide the master and
- /// worker threads to their respective locations.
- void emitNonSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
- WorkerFunctionState &WST);
+ /// Helper for target directive initialization.
+ void emitKernelInit(CodeGenFunction &CGF, EntryFunctionState &EST,
+ bool IsSPMD);
- /// Signal termination of OMP execution for non-SPMD target entry
- /// function.
- void emitNonSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
+ /// Helper for target directive finalization.
+ void emitKernelDeinit(CodeGenFunction &CGF, EntryFunctionState &EST,
+ bool IsSPMD);
/// Helper for generic variables globalization prolog.
void emitGenericVarsProlog(CodeGenFunction &CGF, SourceLocation Loc,
@@ -82,13 +63,6 @@ private:
/// Helper for generic variables globalization epilog.
void emitGenericVarsEpilog(CodeGenFunction &CGF, bool WithSPMDCheck = false);
- /// Helper for SPMD mode target directive's entry function.
- void emitSPMDEntryHeader(CodeGenFunction &CGF, EntryFunctionState &EST,
- const OMPExecutableDirective &D);
-
- /// Signal termination of SPMD mode execution.
- void emitSPMDEntryFooter(CodeGenFunction &CGF, EntryFunctionState &EST);
-
//
// Base class overrides.
//
@@ -399,10 +373,6 @@ public:
/// supports unified addressing
void processRequiresDirective(const OMPRequiresDecl *D) override;
- /// Returns default address space for the constant firstprivates, __constant__
- /// address space by default.
- unsigned getDefaultFirstprivateAddressSpace() const override;
-
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
@@ -440,15 +410,9 @@ private:
/// The data for the single globalized variable.
struct MappedVarData {
/// Corresponding field in the global record.
- const FieldDecl *FD = nullptr;
+ llvm::Value *GlobalizedVal = nullptr;
/// Corresponding address.
Address PrivateAddr = Address::invalid();
- /// true, if only one element is required (for latprivates in SPMD mode),
- /// false, if need to create based on the warp-size.
- bool IsOnePerTeam = false;
- MappedVarData() = delete;
- MappedVarData(const FieldDecl *FD, bool IsOnePerTeam = false)
- : FD(FD), IsOnePerTeam(IsOnePerTeam) {}
};
/// The map of local variables to their addresses in the global memory.
using DeclToAddrMapTy = llvm::MapVector<const Decl *, MappedVarData>;
@@ -459,30 +423,14 @@ private:
llvm::Optional<DeclToAddrMapTy> SecondaryLocalVarData = llvm::None;
EscapedParamsTy EscapedParameters;
llvm::SmallVector<const ValueDecl*, 4> EscapedVariableLengthDecls;
- llvm::SmallVector<llvm::Value *, 4> EscapedVariableLengthDeclsAddrs;
- const RecordDecl *GlobalRecord = nullptr;
- llvm::Optional<const RecordDecl *> SecondaryGlobalRecord = llvm::None;
- llvm::Value *GlobalRecordAddr = nullptr;
+ llvm::SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4>
+ EscapedVariableLengthDeclsAddrs;
llvm::Value *IsInSPMDModeFlag = nullptr;
std::unique_ptr<CodeGenFunction::OMPMapVars> MappedParams;
};
/// Maps the function to the list of the globalized variables with their
/// addresses.
llvm::SmallDenseMap<llvm::Function *, FunctionData> FunctionGlobalizedDecls;
- /// List of records for the globalized variables in target/teams/distribute
- /// contexts. Inner records are going to be joined into the single record,
- /// while those resulting records are going to be joined into the single
- /// union. This resulting union (one per CU) is the entry point for the static
- /// memory management runtime functions.
- struct GlobalPtrSizeRecsTy {
- llvm::GlobalVariable *UseSharedMemory = nullptr;
- llvm::GlobalVariable *RecSize = nullptr;
- llvm::GlobalVariable *Buffer = nullptr;
- SourceLocation Loc;
- llvm::SmallVector<const RecordDecl *, 2> Records;
- unsigned RegionCounter = 0;
- };
- llvm::SmallVector<GlobalPtrSizeRecsTy, 8> GlobalizedRecords;
llvm::GlobalVariable *KernelTeamsReductionPtr = nullptr;
/// List of the records with the list of fields for the reductions across the
/// teams. Used to build the intermediate buffer for the fast teams
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index a1a72a9f668d..aeb319ca1581 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -16,6 +16,8 @@
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DiagnosticSema.h"
@@ -194,12 +196,21 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::SEHTryStmtClass:
EmitSEHTryStmt(cast<SEHTryStmt>(*S));
break;
+ case Stmt::OMPCanonicalLoopClass:
+ EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
+ break;
case Stmt::OMPParallelDirectiveClass:
EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
break;
case Stmt::OMPSimdDirectiveClass:
EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
break;
+ case Stmt::OMPTileDirectiveClass:
+ EmitOMPTileDirective(cast<OMPTileDirective>(*S));
+ break;
+ case Stmt::OMPUnrollDirectiveClass:
+ EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
+ break;
case Stmt::OMPForDirectiveClass:
EmitOMPForDirective(cast<OMPForDirective>(*S));
break;
@@ -369,6 +380,15 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
EmitOMPTargetTeamsDistributeSimdDirective(
cast<OMPTargetTeamsDistributeSimdDirective>(*S));
break;
+ case Stmt::OMPInteropDirectiveClass:
+ llvm_unreachable("Interop directive not supported yet.");
+ break;
+ case Stmt::OMPDispatchDirectiveClass:
+ llvm_unreachable("Dispatch directive not supported yet.");
+ break;
+ case Stmt::OMPMaskedDirectiveClass:
+ EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
+ break;
}
}
@@ -629,17 +649,30 @@ void CodeGenFunction::LexicalScope::rescopeLabels() {
void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
EmitLabel(S.getDecl());
+
+ // IsEHa - emit eha.scope.begin if it's a side entry of a scope
+ if (getLangOpts().EHAsynch && S.isSideEntry())
+ EmitSehCppScopeBegin();
+
EmitStmt(S.getSubStmt());
}
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
bool nomerge = false;
- for (const auto *A : S.getAttrs())
+ const CallExpr *musttail = nullptr;
+
+ for (const auto *A : S.getAttrs()) {
if (A->getKind() == attr::NoMerge) {
nomerge = true;
- break;
}
+ if (A->getKind() == attr::MustTail) {
+ const Stmt *Sub = S.getSubStmt();
+ const ReturnStmt *R = cast<ReturnStmt>(Sub);
+ musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
+ }
+ }
SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
+ SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
EmitStmt(S.getSubStmt(), S.getAttrs());
}
@@ -791,20 +824,14 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
// while(1) is common, avoid extra exit blocks. Be sure
// to correctly handle break/continue though.
- bool EmitBoolCondBranch = true;
- bool LoopMustProgress = false;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
- if (C->isOne()) {
- EmitBoolCondBranch = false;
- FnIsMustProgress = false;
- }
- } else if (LanguageRequiresProgress())
- LoopMustProgress = true;
-
+ llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
+ bool CondIsConstInt = C != nullptr;
+ bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
+ SourceLocToDebugLoc(R.getEnd()),
+ checkIfLoopMustProgress(CondIsConstInt));
// As long as the condition is true, go to the loop body.
llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
@@ -812,8 +839,11 @@ void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (ConditionScope.requiresCleanups())
ExitBlock = createBasicBlock("while.exit");
- llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
- S.getCond(), getProfileCount(S.getBody()), S.getBody());
+ llvm::MDNode *Weights =
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
+ if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
+ BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
+ BoolCondVal, Stmt::getLikelihood(S.getBody()));
Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
if (ExitBlock != LoopExit.getBlock()) {
@@ -892,20 +922,15 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S,
// "do {} while (0)" is common in macros, avoid extra blocks. Be sure
// to correctly handle break/continue though.
- bool EmitBoolCondBranch = true;
- bool LoopMustProgress = false;
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) {
- if (C->isZero())
- EmitBoolCondBranch = false;
- else if (C->isOne())
- FnIsMustProgress = false;
- } else if (LanguageRequiresProgress())
- LoopMustProgress = true;
+ llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
+ bool CondIsConstInt = C;
+ bool EmitBoolCondBranch = !C || !C->isZero();
const SourceRange &R = S.getSourceRange();
LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
+ SourceLocToDebugLoc(R.getEnd()),
+ checkIfLoopMustProgress(CondIsConstInt));
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch) {
@@ -939,43 +964,47 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// Start the loop with a block that tests the condition.
// If there's an increment, the continue scope will be overwritten
// later.
- JumpDest Continue = getJumpDestInCurrentScope("for.cond");
- llvm::BasicBlock *CondBlock = Continue.getBlock();
+ JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = CondDest.getBlock();
EmitBlock(CondBlock);
- bool LoopMustProgress = false;
Expr::EvalResult Result;
- if (LanguageRequiresProgress()) {
- if (!S.getCond()) {
- FnIsMustProgress = false;
- } else if (!S.getCond()->EvaluateAsInt(Result, getContext())) {
- LoopMustProgress = true;
- }
- }
+ bool CondIsConstInt =
+ !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
const SourceRange &R = S.getSourceRange();
LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
SourceLocToDebugLoc(R.getBegin()),
- SourceLocToDebugLoc(R.getEnd()), LoopMustProgress);
-
- // If the for loop doesn't have an increment we can just use the
- // condition as the continue block. Otherwise we'll need to create
- // a block for it (in the current scope, i.e. in the scope of the
- // condition), and that we will become our continue block.
- if (S.getInc())
- Continue = getJumpDestInCurrentScope("for.inc");
-
- // Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+ SourceLocToDebugLoc(R.getEnd()),
+ checkIfLoopMustProgress(CondIsConstInt));
// Create a cleanup scope for the condition variable cleanups.
LexicalScope ConditionScope(*this, S.getSourceRange());
+ // If the for loop doesn't have an increment we can just use the condition as
+ // the continue block. Otherwise, if there is no condition variable, we can
+ // form the continue block now. If there is a condition variable, we can't
+ // form the continue block until after we've emitted the condition, because
+ // the condition is in scope in the increment, but Sema's jump diagnostics
+ // ensure that there are no continues from the condition variable that jump
+ // to the loop increment.
+ JumpDest Continue;
+ if (!S.getInc())
+ Continue = CondDest;
+ else if (!S.getConditionVariable())
+ Continue = getJumpDestInCurrentScope("for.inc");
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
if (S.getConditionVariable()) {
EmitDecl(*S.getConditionVariable());
+
+ // We have entered the condition variable's scope, so we're now able to
+ // jump to the continue block.
+ Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
+ BreakContinueStack.back().ContinueBlock = Continue;
}
llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
@@ -990,12 +1019,11 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S,
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
- S.getCond(), getProfileCount(S.getBody()), S.getBody());
-
- if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
- if (C->isOne())
- FnIsMustProgress = false;
+ llvm::MDNode *Weights =
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
+ if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
+ BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
+ BoolCondVal, Stmt::getLikelihood(S.getBody()));
Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
@@ -1076,8 +1104,11 @@ CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
// The body is executed if the expression, contextually converted
// to bool, is true.
llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- llvm::MDNode *Weights = createProfileOrBranchWeightsForLoop(
- S.getCond(), getProfileCount(S.getBody()), S.getBody());
+ llvm::MDNode *Weights =
+ createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
+ if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
+ BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
+ BoolCondVal, Stmt::getLikelihood(S.getBody()));
Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
if (ExitBlock != LoopExit.getBlock()) {
@@ -1145,6 +1176,38 @@ struct SaveRetExprRAII {
};
} // namespace
+/// If we have 'return f(...);', where both caller and callee are SwiftAsync,
+/// codegen it as 'tail call ...; ret void;'.
+static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
+ const CGFunctionInfo *CurFnInfo) {
+ auto calleeQualType = CE->getCallee()->getType();
+ const FunctionType *calleeType = nullptr;
+ if (calleeQualType->isFunctionPointerType() ||
+ calleeQualType->isFunctionReferenceType() ||
+ calleeQualType->isBlockPointerType() ||
+ calleeQualType->isMemberFunctionPointerType()) {
+ calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
+ } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
+ calleeType = ty;
+ } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
+ if (auto methodDecl = CMCE->getMethodDecl()) {
+ // getMethodDecl() doesn't handle member pointers at the moment.
+ calleeType = methodDecl->getType()->castAs<FunctionType>();
+ } else {
+ return;
+ }
+ } else {
+ return;
+ }
+ if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
+ (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
+ auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
+ CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
+ Builder.CreateRetVoid();
+ Builder.ClearInsertionPoint();
+ }
+}
+
/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
/// if the function returns void, or may be missing one if the function returns
/// non-void. Fun stuff :).
@@ -1203,8 +1266,11 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
- if (RV)
+ if (RV) {
EmitAnyExpr(RV);
+ if (auto *CE = dyn_cast<CallExpr>(RV))
+ makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
+ }
} else if (!RV) {
// Do nothing (return value is left uninitialized)
} else if (FnRetTy->isReferenceType()) {
@@ -1351,7 +1417,7 @@ void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
// this case.
(*SwitchWeights)[0] += ThisCount;
} else if (SwitchLikelihood)
- Weights = createBranchWeights(LH);
+ Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
@@ -2092,7 +2158,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
SmallVector<llvm::Metadata *, 8> Locs;
// Add the location of the first line to the MDNode.
Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
+ CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
StringRef StrVal = Str->getString();
if (!StrVal.empty()) {
const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
@@ -2107,7 +2173,7 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
SourceLocation LineLoc = Str->getLocationOfByte(
i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
Locs.push_back(llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
+ llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
}
}
@@ -2115,13 +2181,15 @@ static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
}
static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
- bool ReadOnly, bool ReadNone, bool NoMerge,
- const AsmStmt &S,
+ bool HasUnwindClobber, bool ReadOnly,
+ bool ReadNone, bool NoMerge, const AsmStmt &S,
const std::vector<llvm::Type *> &ResultRegTypes,
CodeGenFunction &CGF,
std::vector<llvm::Value *> &RegResults) {
- Result.addAttribute(llvm::AttributeList::FunctionIndex,
- llvm::Attribute::NoUnwind);
+ if (!HasUnwindClobber)
+ Result.addAttribute(llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoUnwind);
+
if (NoMerge)
Result.addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoMerge);
@@ -2142,8 +2210,8 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
else {
// At least put the line number on MS inline asm blobs.
- llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
- S.getAsmLoc().getRawEncoding());
+ llvm::Constant *Loc =
+ llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
Result.setMetadata("srcloc",
llvm::MDNode::get(CGF.getLLVMContext(),
llvm::ConstantAsMetadata::get(Loc)));
@@ -2468,13 +2536,18 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
Constraints += InOutConstraints;
+ bool HasUnwindClobber = false;
+
// Clobbers
for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
StringRef Clobber = S.getClobber(i);
if (Clobber == "memory")
ReadOnly = ReadNone = false;
- else if (Clobber != "cc") {
+ else if (Clobber == "unwind") {
+ HasUnwindClobber = true;
+ continue;
+ } else if (Clobber != "cc") {
Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
if (CGM.getCodeGenOpts().StackClashProtector &&
getTarget().isSPRegName(Clobber)) {
@@ -2483,6 +2556,23 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
+ if (isa<MSAsmStmt>(&S)) {
+ if (Clobber == "eax" || Clobber == "edx") {
+ if (Constraints.find("=&A") != std::string::npos)
+ continue;
+ std::string::size_type position1 =
+ Constraints.find("={" + Clobber.str() + "}");
+ if (position1 != std::string::npos) {
+ Constraints.insert(position1 + 1, "&");
+ continue;
+ }
+ std::string::size_type position2 = Constraints.find("=A");
+ if (position2 != std::string::npos) {
+ Constraints.insert(position2 + 1, "&");
+ continue;
+ }
+ }
+ }
if (!Constraints.empty())
Constraints += ',';
@@ -2491,6 +2581,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Constraints += '}';
}
+ assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
+ "unwind clobber can't be used with asm goto");
+
// Add machine specific clobbers
std::string MachineClobbers = getTarget().getClobbers();
if (!MachineClobbers.empty()) {
@@ -2513,23 +2606,28 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
- llvm::InlineAsm *IA =
- llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
- /* IsAlignStack */ false, AsmDialect);
+ llvm::InlineAsm *IA = llvm::InlineAsm::get(
+ FTy, AsmString, Constraints, HasSideEffect,
+ /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
std::vector<llvm::Value*> RegResults;
if (IsGCCAsmGoto) {
llvm::CallBrInst *Result =
Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
EmitBlock(Fallthrough);
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
- ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
- *this, RegResults);
+ UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
+ ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
+ ResultRegTypes, *this, RegResults);
+ } else if (HasUnwindClobber) {
+ llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
+ UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
+ InNoMergeAttributedStmt, S, ResultRegTypes, *this,
+ RegResults);
} else {
llvm::CallInst *Result =
Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
- UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
- ReadNone, InNoMergeAttributedStmt, S, ResultRegTypes,
- *this, RegResults);
+ UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
+ ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
+ ResultRegTypes, *this, RegResults);
}
assert(RegResults.size() == ResultRegTypes.size());
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 5e8d98cfe5ef..f6233b791182 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -133,50 +133,55 @@ public:
/// Private scope for OpenMP loop-based directives, that supports capturing
/// of used expression from loop statement.
class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
- void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopDirective &S) {
+ void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
+ const DeclStmt *PreInits;
CodeGenFunction::OMPMapVars PreCondVars;
- llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
- for (const auto *E : S.counters()) {
- const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
- EmittedAsPrivate.insert(VD->getCanonicalDecl());
- (void)PreCondVars.setVarAddr(
- CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
- }
- // Mark private vars as undefs.
- for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
- for (const Expr *IRef : C->varlists()) {
- const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
- if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
- (void)PreCondVars.setVarAddr(
- CGF, OrigVD,
- Address(llvm::UndefValue::get(
- CGF.ConvertTypeForMem(CGF.getContext().getPointerType(
- OrigVD->getType().getNonReferenceType()))),
- CGF.getContext().getDeclAlign(OrigVD)));
- }
+ if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
+ llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
+ for (const auto *E : LD->counters()) {
+ const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
+ EmittedAsPrivate.insert(VD->getCanonicalDecl());
+ (void)PreCondVars.setVarAddr(
+ CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
}
- }
- (void)PreCondVars.apply(CGF);
- // Emit init, __range and __end variables for C++ range loops.
- const Stmt *Body =
- S.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
- for (unsigned Cnt = 0; Cnt < S.getCollapsedNumber(); ++Cnt) {
- Body = OMPLoopDirective::tryToFindNextInnerLoop(
- Body, /*TryImperfectlyNestedLoops=*/true);
- if (auto *For = dyn_cast<ForStmt>(Body)) {
- Body = For->getBody();
- } else {
- assert(isa<CXXForRangeStmt>(Body) &&
- "Expected canonical for loop or range-based for loop.");
- auto *CXXFor = cast<CXXForRangeStmt>(Body);
- if (const Stmt *Init = CXXFor->getInit())
- CGF.EmitStmt(Init);
- CGF.EmitStmt(CXXFor->getRangeStmt());
- CGF.EmitStmt(CXXFor->getEndStmt());
- Body = CXXFor->getBody();
+ // Mark private vars as undefs.
+ for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) {
+ for (const Expr *IRef : C->varlists()) {
+ const auto *OrigVD =
+ cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
+ if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ (void)PreCondVars.setVarAddr(
+ CGF, OrigVD,
+ Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
+ CGF.getContext().getPointerType(
+ OrigVD->getType().getNonReferenceType()))),
+ CGF.getContext().getDeclAlign(OrigVD)));
+ }
+ }
}
+ (void)PreCondVars.apply(CGF);
+ // Emit init, __range and __end variables for C++ range loops.
+ (void)OMPLoopBasedDirective::doForAllLoops(
+ LD->getInnermostCapturedStmt()->getCapturedStmt(),
+ /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(),
+ [&CGF](unsigned Cnt, const Stmt *CurStmt) {
+ if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
+ if (const Stmt *Init = CXXFor->getInit())
+ CGF.EmitStmt(Init);
+ CGF.EmitStmt(CXXFor->getRangeStmt());
+ CGF.EmitStmt(CXXFor->getEndStmt());
+ }
+ return false;
+ });
+ PreInits = cast_or_null<DeclStmt>(LD->getPreInits());
+ } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
+ PreInits = cast_or_null<DeclStmt>(Tile->getPreInits());
+ } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
+ PreInits = cast_or_null<DeclStmt>(Unroll->getPreInits());
+ } else {
+ llvm_unreachable("Unknown loop-based directive kind.");
}
- if (const auto *PreInits = cast_or_null<DeclStmt>(S.getPreInits())) {
+ if (PreInits) {
for (const auto *I : PreInits->decls())
CGF.EmitVarDecl(cast<VarDecl>(*I));
}
@@ -184,7 +189,7 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
}
public:
- OMPLoopScope(CodeGenFunction &CGF, const OMPLoopDirective &S)
+ OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S)
: CodeGenFunction::RunCleanupsScope(CGF) {
emitPreInitStmt(CGF, S);
}
@@ -238,11 +243,22 @@ public:
if (const Expr *E = TG->getReductionRef())
CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
}
+ // Temp copy arrays for inscan reductions should not be emitted as they are
+ // not used in simd only mode.
+ llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ if (C->getModifier() != OMPC_REDUCTION_inscan)
+ continue;
+ for (const Expr *E : C->copy_array_temps())
+ CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl());
+ }
const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
while (CS) {
for (auto &C : CS->captures()) {
if (C.capturesVariable() || C.capturesVariableByCopy()) {
auto *VD = C.getCapturedVar();
+ if (CopyArrayTemps.contains(VD))
+ continue;
assert(VD == VD->getCanonicalDecl() &&
"Canonical decl must be captured.");
DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
@@ -501,6 +517,10 @@ static llvm::Function *emitOutlinedFunctionPrologue(
F->setDoesNotThrow();
F->setDoesNotRecurse();
+ // Always inline the outlined function if optimizations are enabled.
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0)
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
// Generate the function.
CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
@@ -631,6 +651,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
WrapperCGF.CXXThisValue, WrapperFO);
llvm::SmallVector<llvm::Value *, 4> CallArgs;
+ auto *PI = F->arg_begin();
for (const auto *Arg : Args) {
llvm::Value *CallArg;
auto I = LocalAddrs.find(Arg);
@@ -639,6 +660,11 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
I->second.second,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
+ if (LV.getType()->isAnyComplexType())
+ LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ LV.getAddress(WrapperCGF),
+ PI->getType()->getPointerTo(
+ LV.getAddress(WrapperCGF).getAddressSpace())));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
@@ -652,6 +678,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
}
}
CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
+ ++PI;
}
CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
WrapperCGF.FinishFunction();
@@ -675,7 +702,8 @@ void CodeGenFunction::EmitOMPAggregateAssign(
llvm::Value *SrcBegin = SrcAddr.getPointer();
llvm::Value *DestBegin = DestAddr.getPointer();
// Cast from pointer to array type to pointer to single element.
- llvm::Value *DestEnd = Builder.CreateGEP(DestBegin, NumElements);
+ llvm::Value *DestEnd =
+ Builder.CreateGEP(DestAddr.getElementType(), DestBegin, NumElements);
// The basic structure here is a while-do loop.
llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
@@ -708,9 +736,11 @@ void CodeGenFunction::EmitOMPAggregateAssign(
// Shift the address forward by one element.
llvm::Value *DestElementNext = Builder.CreateConstGEP1_32(
- DestElementPHI, /*Idx0=*/1, "omp.arraycpy.dest.element");
+ DestAddr.getElementType(), DestElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.dest.element");
llvm::Value *SrcElementNext = Builder.CreateConstGEP1_32(
- SrcElementPHI, /*Idx0=*/1, "omp.arraycpy.src.element");
+ SrcAddr.getElementType(), SrcElementPHI, /*Idx0=*/1,
+ "omp.arraycpy.src.element");
// Check whether we've reached the end.
llvm::Value *Done =
Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
@@ -803,8 +833,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
FD && FD->getType()->isReferenceType() &&
(!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
- (void)CGM.getOpenMPRuntime().registerTargetFirstprivateCopy(*this,
- OrigVD);
+ EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
++IRef;
++InitsRef;
continue;
@@ -985,12 +1014,14 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
// need to copy data.
CopyBegin = createBasicBlock("copyin.not.master");
CopyEnd = createBasicBlock("copyin.not.master.end");
+ // TODO: Avoid ptrtoint conversion.
+ auto *MasterAddrInt =
+ Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy);
+ auto *PrivateAddrInt =
+ Builder.CreatePtrToInt(PrivateAddr.getPointer(), CGM.IntPtrTy);
Builder.CreateCondBr(
- Builder.CreateICmpNE(
- Builder.CreatePtrToInt(MasterAddr.getPointer(), CGM.IntPtrTy),
- Builder.CreatePtrToInt(PrivateAddr.getPointer(),
- CGM.IntPtrTy)),
- CopyBegin, CopyEnd);
+ Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
+ CopyEnd);
EmitBlock(CopyBegin);
}
const auto *SrcVD =
@@ -1755,6 +1786,31 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
checkForLastprivateConditionalUpdate(*this, S);
}
+namespace {
+/// RAII to handle scopes for loop transformation directives.
+class OMPTransformDirectiveScopeRAII {
+ OMPLoopScope *Scope = nullptr;
+ CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
+ CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
+
+public:
+ OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
+ if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
+ Scope = new OMPLoopScope(CGF, *Dir);
+ CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP);
+ CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
+ }
+ }
+ ~OMPTransformDirectiveScopeRAII() {
+ if (!Scope)
+ return;
+ delete CapInfoRAII;
+ delete CGSI;
+ delete Scope;
+ }
+};
+} // namespace
+
static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
int MaxLevel, int Level = 0) {
assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
@@ -1771,6 +1827,12 @@ static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
return;
}
if (SimplifiedS == NextLoop) {
+ if (auto *Dir = dyn_cast<OMPTileDirective>(SimplifiedS))
+ SimplifiedS = Dir->getTransformedStmt();
+ if (auto *Dir = dyn_cast<OMPUnrollDirective>(SimplifiedS))
+ SimplifiedS = Dir->getTransformedStmt();
+ if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
+ SimplifiedS = CanonLoop->getLoopStmt();
if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
S = For->getBody();
} else {
@@ -1845,9 +1907,9 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
// Emit loop body.
emitBody(*this, Body,
- OMPLoopDirective::tryToFindNextInnerLoop(
+ OMPLoopBasedDirective::tryToFindNextInnerLoop(
Body, /*TryImperfectlyNestedLoops=*/true),
- D.getCollapsedNumber());
+ D.getLoopsNumber());
// Jump to the dispatcher at the end of the loop body.
if (IsInscanRegion)
@@ -1858,6 +1920,121 @@ void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
BreakContinueStack.pop_back();
}
+using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>;
+
+/// Emit a captured statement and return the function as well as its captured
+/// closure context.
+static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF,
+ const CapturedStmt *S) {
+ LValue CapStruct = ParentCGF.InitCapturedStruct(*S);
+ CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true);
+ std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
+ std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
+ llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S);
+
+ return {F, CapStruct.getPointer(ParentCGF)};
+}
+
+/// Emit a call to a previously captured closure.
+static llvm::CallInst *
+emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap,
+ llvm::ArrayRef<llvm::Value *> Args) {
+ // Append the closure context to the argument.
+ SmallVector<llvm::Value *> EffectiveArgs;
+ EffectiveArgs.reserve(Args.size() + 1);
+ llvm::append_range(EffectiveArgs, Args);
+ EffectiveArgs.push_back(Cap.second);
+
+ return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs);
+}
+
+llvm::CanonicalLoopInfo *
+CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
+ assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
+
+ EmitStmt(S);
+ assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
+
+ // The last added loop is the outermost one.
+ return OMPLoopNestStack.back();
+}
+
+void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
+ const Stmt *SyntacticalLoop = S->getLoopStmt();
+ if (!getLangOpts().OpenMPIRBuilder) {
+ // Ignore if OpenMPIRBuilder is not enabled.
+ EmitStmt(SyntacticalLoop);
+ return;
+ }
+
+ LexicalScope ForScope(*this, S->getSourceRange());
+
+ // Emit init statements. The Distance/LoopVar funcs may reference variable
+ // declarations they contain.
+ const Stmt *BodyStmt;
+ if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
+ if (const Stmt *InitStmt = For->getInit())
+ EmitStmt(InitStmt);
+ BodyStmt = For->getBody();
+ } else if (const auto *RangeFor =
+ dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
+ if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
+ EmitStmt(RangeStmt);
+ if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
+ EmitStmt(BeginStmt);
+ if (const DeclStmt *EndStmt = RangeFor->getEndStmt())
+ EmitStmt(EndStmt);
+ if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
+ EmitStmt(LoopVarStmt);
+ BodyStmt = RangeFor->getBody();
+ } else
+ llvm_unreachable("Expected for-stmt or range-based for-stmt");
+
+ // Emit closure for later use. By-value captures will be captured here.
+ const CapturedStmt *DistanceFunc = S->getDistanceFunc();
+ EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc);
+ const CapturedStmt *LoopVarFunc = S->getLoopVarFunc();
+ EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc);
+
+ // Call the distance function to get the number of iterations of the loop to
+ // come.
+ QualType LogicalTy = DistanceFunc->getCapturedDecl()
+ ->getParam(0)
+ ->getType()
+ .getNonReferenceType();
+ Address CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
+ emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
+ llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
+
+ // Emit the loop structure.
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
+ llvm::Value *IndVar) {
+ Builder.restoreIP(CodeGenIP);
+
+ // Emit the loop body: Convert the logical iteration number to the loop
+ // variable and emit the body.
+ const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
+ LValue LCVal = EmitLValue(LoopVarRef);
+ Address LoopVarAddress = LCVal.getAddress(*this);
+ emitCapturedStmtCall(*this, LoopVarClosure,
+ {LoopVarAddress.getPointer(), IndVar});
+
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(BodyStmt);
+ };
+ llvm::CanonicalLoopInfo *CL =
+ OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal);
+
+ // Finish up the loop.
+ Builder.restoreIP(CL->getAfterIP());
+ ForScope.ForceCleanup();
+
+ // Remember the CanonicalLoopInfo for parent AST nodes consuming it.
+ OMPLoopNestStack.push_back(CL);
+}
+
void CodeGenFunction::EmitOMPInnerLoop(
const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
const Expr *IncExpr,
@@ -1875,6 +2052,7 @@ void CodeGenFunction::EmitOMPInnerLoop(
const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
const Stmt *SS = ICS->getCapturedStmt();
const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
+ OMPLoopNestStack.clear();
if (AS)
LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
@@ -2062,8 +2240,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
if (!C->getNumForLoops())
continue;
- for (unsigned I = S.getCollapsedNumber(),
- E = C->getLoopNumIterations().size();
+ for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size();
I < E; ++I) {
const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
const auto *VD = cast<VarDecl>(DRE->getDecl());
@@ -2152,8 +2329,7 @@ void CodeGenFunction::EmitOMPLinearClause(
}
static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
- const OMPExecutableDirective &D,
- bool IsMonotonic) {
+ const OMPExecutableDirective &D) {
if (!CGF.HaveInsertPoint())
return;
if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
@@ -2164,8 +2340,7 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
// In presence of finite 'safelen', it may be unsafe to mark all
// the memory instructions parallel, because loop-carried
// dependences of 'safelen' iterations are possible.
- if (!IsMonotonic)
- CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
+ CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
} else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
/*ignoreResult=*/true);
@@ -2178,12 +2353,11 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
}
}
-void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
- bool IsMonotonic) {
+void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
// Walk clauses and process safelen/lastprivate.
- LoopStack.setParallel(!IsMonotonic);
+ LoopStack.setParallel(/*Enable=*/true);
LoopStack.setVectorizeEnable();
- emitSimdlenSafelenClause(*this, D, IsMonotonic);
+ emitSimdlenSafelenClause(*this, D);
if (const auto *C = D.getSingleClause<OMPOrderClause>())
if (C->getKind() == OMPC_ORDER_concurrent)
LoopStack.setParallel(/*Enable=*/true);
@@ -2406,6 +2580,34 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
checkForLastprivateConditionalUpdate(*this, S);
}
+void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
+ // Emit the de-sugared statement.
+ OMPTransformDirectiveScopeRAII TileScope(*this, &S);
+ EmitStmt(S.getTransformedStmt());
+}
+
+void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) {
+ // This function is only called if the unrolled loop is not consumed by any
+ // other loop-associated construct. Such a loop-associated construct will have
+ // used the transformed AST.
+
+ // Set the unroll metadata for the next emitted loop.
+ LoopStack.setUnrollState(LoopAttributes::Enable);
+
+ if (S.hasClausesOfKind<OMPFullClause>()) {
+ LoopStack.setUnrollState(LoopAttributes::Full);
+ } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
+ if (Expr *FactorExpr = PartialClause->getFactor()) {
+ uint64_t Factor =
+ FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
+ assert(Factor >= 1 && "Only positive factors are valid");
+ LoopStack.setUnrollCount(Factor);
+ }
+ }
+
+ EmitStmt(S.getAssociatedStmt());
+}
+
void CodeGenFunction::EmitOMPOuterLoop(
bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
CodeGenFunction::OMPPrivateScope &LoopScope,
@@ -2424,6 +2626,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
EmitBlock(CondBlock);
const SourceRange R = S.getSourceRange();
+ OMPLoopNestStack.clear();
LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
SourceLocToDebugLoc(R.getEnd()));
@@ -2477,7 +2680,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
if (C->getKind() == OMPC_ORDER_concurrent)
CGF.LoopStack.setParallel(/*Enable=*/true);
} else {
- CGF.EmitOMPSimdInit(S, IsMonotonic);
+ CGF.EmitOMPSimdInit(S);
}
},
[&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
@@ -2507,6 +2710,7 @@ void CodeGenFunction::EmitOMPOuterLoop(
}
EmitBranch(CondBlock);
+ OMPLoopNestStack.clear();
LoopStack.pop();
// Emit the fall-through block.
EmitBlock(LoopExit.getBlock());
@@ -2986,8 +3190,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
bool IsMonotonic =
Ordered ||
- ((ScheduleKind.Schedule == OMPC_SCHEDULE_static ||
- ScheduleKind.Schedule == OMPC_SCHEDULE_unknown) &&
+ (ScheduleKind.Schedule == OMPC_SCHEDULE_static &&
!(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
@@ -3000,9 +3203,9 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
emitCommonSimdLoop(
*this, S,
- [&S, IsMonotonic](CodeGenFunction &CGF, PrePostActionTy &) {
+ [&S](CodeGenFunction &CGF, PrePostActionTy &) {
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
- CGF.EmitOMPSimdInit(S, IsMonotonic);
+ CGF.EmitOMPSimdInit(S);
} else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
if (C->getKind() == OMPC_ORDER_concurrent)
CGF.LoopStack.setParallel(/*Enable=*/true);
@@ -3131,53 +3334,30 @@ emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
return {LBVal, UBVal};
}
-/// Emits the code for the directive with inscan reductions.
+/// Emits internal temp array declarations for the directive with inscan
+/// reductions.
/// The code is the following:
/// \code
/// size num_iters = <num_iters>;
/// <type> buffer[num_iters];
-/// #pragma omp ...
-/// for (i: 0..<num_iters>) {
-/// <input phase>;
-/// buffer[i] = red;
-/// }
-/// for (int k = 0; k != ceil(log2(num_iters)); ++k)
-/// for (size cnt = last_iter; cnt >= pow(2, k); --k)
-/// buffer[i] op= buffer[i-pow(2,k)];
-/// #pragma omp ...
-/// for (0..<num_iters>) {
-/// red = InclusiveScan ? buffer[i] : buffer[i-1];
-/// <scan phase>;
-/// }
/// \endcode
-static void emitScanBasedDirective(
+static void emitScanBasedDirectiveDecls(
CodeGenFunction &CGF, const OMPLoopDirective &S,
- llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
- llvm::function_ref<void(CodeGenFunction &)> FirstGen,
- llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
SmallVector<const Expr *, 4> Shareds;
SmallVector<const Expr *, 4> Privates;
SmallVector<const Expr *, 4> ReductionOps;
- SmallVector<const Expr *, 4> LHSs;
- SmallVector<const Expr *, 4> RHSs;
- SmallVector<const Expr *, 4> CopyOps;
SmallVector<const Expr *, 4> CopyArrayTemps;
- SmallVector<const Expr *, 4> CopyArrayElems;
for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
assert(C->getModifier() == OMPC_REDUCTION_inscan &&
"Only inscan reductions are expected.");
Shareds.append(C->varlist_begin(), C->varlist_end());
Privates.append(C->privates().begin(), C->privates().end());
ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
- LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
- RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
- CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
CopyArrayTemps.append(C->copy_array_temps().begin(),
C->copy_array_temps().end());
- CopyArrayElems.append(C->copy_array_elems().begin(),
- C->copy_array_elems().end());
}
{
// Emit buffers for each reduction variables.
@@ -3206,6 +3386,49 @@ static void emitScanBasedDirective(
++Count;
}
}
+}
+
+/// Emits the code for the directive with inscan reductions.
+/// The code is the following:
+/// \code
+/// #pragma omp ...
+/// for (i: 0..<num_iters>) {
+/// <input phase>;
+/// buffer[i] = red;
+/// }
+/// #pragma omp master // in parallel region
+/// for (int k = 0; k != ceil(log2(num_iters)); ++k)
+/// for (size cnt = last_iter; cnt >= pow(2, k); --k)
+/// buffer[i] op= buffer[i-pow(2,k)];
+/// #pragma omp barrier // in parallel region
+/// #pragma omp ...
+/// for (0..<num_iters>) {
+/// red = InclusiveScan ? buffer[i] : buffer[i-1];
+/// <scan phase>;
+/// }
+/// \endcode
+static void emitScanBasedDirective(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
+ llvm::function_ref<void(CodeGenFunction &)> FirstGen,
+ llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
+ llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
+ NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> ReductionOps;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ assert(C->getModifier() == OMPC_REDUCTION_inscan &&
+ "Only inscan reductions are expected.");
+ Privates.append(C->privates().begin(), C->privates().end());
+ ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
{
// Emit loop with input phase:
@@ -3218,90 +3441,108 @@ static void emitScanBasedDirective(
CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
FirstGen(CGF);
}
- // Emit prefix reduction:
- // for (int k = 0; k <= ceil(log2(n)); ++k)
- llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
- llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
- llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
- llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
- llvm::Value *Arg =
- CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
- llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
- F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
- LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
- LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
- llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
- OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
- auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
- CGF.EmitBlock(LoopBB);
- auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
- // size pow2k = 1;
- auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
- Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
- Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
- // for (size i = n - 1; i >= 2 ^ k; --i)
- // tmp[i] op= tmp[i-pow2k];
- llvm::BasicBlock *InnerLoopBB =
- CGF.createBasicBlock("omp.inner.log.scan.body");
- llvm::BasicBlock *InnerExitBB =
- CGF.createBasicBlock("omp.inner.log.scan.exit");
- llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
- CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
- CGF.EmitBlock(InnerLoopBB);
- auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
- IVal->addIncoming(NMin1, LoopBB);
- {
- CodeGenFunction::OMPPrivateScope PrivScope(CGF);
- auto *ILHS = LHSs.begin();
- auto *IRHS = RHSs.begin();
- for (const Expr *CopyArrayElem : CopyArrayElems) {
- const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
- const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
- Address LHSAddr = Address::invalid();
- {
- CodeGenFunction::OpaqueValueMapping IdxMapping(
- CGF,
- cast<OpaqueValueExpr>(
- cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
- RValue::get(IVal));
- LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
- }
- PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
- Address RHSAddr = Address::invalid();
- {
- llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
- CodeGenFunction::OpaqueValueMapping IdxMapping(
- CGF,
- cast<OpaqueValueExpr>(
- cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
- RValue::get(OffsetIVal));
- RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ // #pragma omp barrier // in parallel region
+ auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
+ &ReductionOps,
+ &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ // Emit prefix reduction:
+ // #pragma omp master // in parallel region
+ // for (int k = 0; k <= ceil(log2(n)); ++k)
+ llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
+ llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
+ llvm::Function *F =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
+ llvm::Value *Arg =
+ CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
+ llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
+ F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
+ LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
+ LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
+ llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
+ OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
+ CGF.EmitBlock(LoopBB);
+ auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
+ // size pow2k = 1;
+ auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
+ Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
+ // for (size i = n - 1; i >= 2 ^ k; --i)
+ // tmp[i] op= tmp[i-pow2k];
+ llvm::BasicBlock *InnerLoopBB =
+ CGF.createBasicBlock("omp.inner.log.scan.body");
+ llvm::BasicBlock *InnerExitBB =
+ CGF.createBasicBlock("omp.inner.log.scan.exit");
+ llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerLoopBB);
+ auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
+ IVal->addIncoming(NMin1, LoopBB);
+ {
+ CodeGenFunction::OMPPrivateScope PrivScope(CGF);
+ auto *ILHS = LHSs.begin();
+ auto *IRHS = RHSs.begin();
+ for (const Expr *CopyArrayElem : CopyArrayElems) {
+ const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
+ const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
+ Address LHSAddr = Address::invalid();
+ {
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(IVal));
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
+ Address RHSAddr = Address::invalid();
+ {
+ llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(OffsetIVal));
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ }
+ PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
+ ++ILHS;
+ ++IRHS;
}
- PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
- ++ILHS;
- ++IRHS;
+ PrivScope.Privatize();
+ CGF.CGM.getOpenMPRuntime().emitReduction(
+ CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
+ {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
}
- PrivScope.Privatize();
- CGF.CGM.getOpenMPRuntime().emitReduction(
- CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
- {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
- }
- llvm::Value *NextIVal =
- CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
- IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
- CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
- CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
- CGF.EmitBlock(InnerExitBB);
- llvm::Value *Next =
- CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
- Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
- // pow2k <<= 1;
- llvm::Value *NextPow2K = CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
- Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
- llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
- CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
- auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
- CGF.EmitBlock(ExitBB);
+ llvm::Value *NextIVal =
+ CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
+ IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
+ CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
+ CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
+ CGF.EmitBlock(InnerExitBB);
+ llvm::Value *Next =
+ CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
+ Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
+ // pow2k <<= 1;
+ llvm::Value *NextPow2K =
+ CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
+ Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
+ llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
+ CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
+ auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
+ CGF.EmitBlock(ExitBB);
+ };
+ if (isOpenMPParallelDirective(S.getDirectiveKind())) {
+ CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
+ CGF.CGM.getOpenMPRuntime().emitBarrierCall(
+ CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
+ /*ForceSimpleCall=*/true);
+ } else {
+ RegionCodeGenTy RCG(CodeGen);
+ RCG(CGF);
+ }
CGF.OMPFirstScanLoop = false;
SecondGen(CGF);
@@ -3338,6 +3579,8 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
emitForLoopBounds,
emitDispatchForLoopBounds);
};
+ if (!isOpenMPParallelDirective(S.getDirectiveKind()))
+ emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
} else {
CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
@@ -3349,10 +3592,38 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
return HasLastprivates;
}
+static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) {
+ if (S.hasCancel())
+ return false;
+ for (OMPClause *C : S.clauses())
+ if (!isa<OMPNowaitClause>(C))
+ return false;
+
+ return true;
+}
+
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
- auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
- PrePostActionTy &) {
+ bool UseOMPIRBuilder =
+ CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S);
+ auto &&CodeGen = [this, &S, &HasLastprivates,
+ UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
+ // Use the OpenMPIRBuilder if enabled.
+ if (UseOMPIRBuilder) {
+ // Emit the associated statement and get its loop representation.
+ const Stmt *Inner = S.getRawStmt();
+ llvm::CanonicalLoopInfo *CLI =
+ EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
+
+ bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGM.getOpenMPRuntime().getOMPBuilder();
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
+ AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
+ OMPBuilder.createWorkshareLoop(Builder, CLI, AllocaIP, NeedsBarrier);
+ return;
+ }
+
HasLastprivates = emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
@@ -3363,9 +3634,11 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
S.hasCancel());
}
- // Emit an implicit barrier at the end.
- if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
- CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ if (!UseOMPIRBuilder) {
+ // Emit an implicit barrier at the end.
+ if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
+ CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
+ }
// Check for outer lastprivate conditional update.
checkForLastprivateConditionalUpdate(*this, S);
}
@@ -3428,11 +3701,11 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
// Generate condition for loop.
BinaryOperator *Cond = BinaryOperator::Create(
- C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_RValue, OK_Ordinary,
+ C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary,
S.getBeginLoc(), FPOptionsOverride());
// Increment for loop counter.
UnaryOperator *Inc = UnaryOperator::Create(
- C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_RValue, OK_Ordinary,
+ C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary,
S.getBeginLoc(), true, FPOptionsOverride());
auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
// Iterate through all sections and emit a switch construct:
@@ -3546,6 +3819,64 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
}
void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+ using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ const CapturedStmt *ICS = S.getInnermostCapturedStmt();
+ const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
+ const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
+ llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector;
+ if (CS) {
+ for (const Stmt *SubStmt : CS->children()) {
+ auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP,
+ FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP,
+ FiniBB);
+ };
+ SectionCBVector.push_back(SectionCB);
+ }
+ } else {
+ auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP,
+ FiniBB);
+ };
+ SectionCBVector.push_back(SectionCB);
+ }
+
+ // Privatization callback that performs appropriate action for
+ // shared/private/firstprivate/lastprivate/copyin/... variables.
+ //
+ // TODO: This defaults to shared right now.
+ auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
+ llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
+ // The next line is appropriate only for variables (Val) with the
+ // data-sharing attribute "shared".
+ ReplVal = &Val;
+
+ return CodeGenIP;
+ };
+
+ CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
+ llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
+ AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
+ Builder.restoreIP(OMPBuilder.createSections(
+ Builder, AllocaIP, SectionCBVector, PrivCB, FiniCB, S.hasCancel(),
+ S.getSingleClause<OMPNowaitClause>()));
+ return;
+ }
{
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
@@ -3562,6 +3893,29 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
}
void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ Builder.restoreIP(OMPBuilder.createSection(Builder, BodyGenCB, FiniCB));
+
+ return;
+ }
LexicalScope Scope(*this, S.getSourceRange());
EmitStopPoint(&S);
EmitStmt(S.getAssociatedStmt());
@@ -3650,6 +4004,55 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
emitMaster(*this, S);
}
+static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
+ auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
+ Action.Enter(CGF);
+ CGF.EmitStmt(S.getRawStmt());
+ };
+ Expr *Filter = nullptr;
+ if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
+ Filter = FilterClause->getThreadID();
+ CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(),
+ Filter);
+}
+
+void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) {
+ if (CGM.getLangOpts().OpenMPIRBuilder) {
+ llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
+ using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
+
+ const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
+ const Expr *Filter = nullptr;
+ if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
+ Filter = FilterClause->getThreadID();
+ llvm::Value *FilterVal = Filter
+ ? EmitScalarExpr(Filter, CGM.Int32Ty)
+ : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
+
+ auto FiniCB = [this](InsertPointTy IP) {
+ OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
+ };
+
+ auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ llvm::BasicBlock &FiniBB) {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt,
+ CodeGenIP, FiniBB);
+ };
+
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ Builder.restoreIP(
+ OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal));
+
+ return;
+ }
+ LexicalScope Scope(*this, S.getSourceRange());
+ EmitStopPoint(&S);
+ emitMasked(*this, S);
+}
+
void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
if (CGM.getLangOpts().OpenMPIRBuilder) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
@@ -3712,6 +4115,19 @@ void CodeGenFunction::EmitOMPParallelForDirective(
(void)emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
+ if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ })) {
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
+ }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
@@ -3730,6 +4146,19 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
(void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
{
+ if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ [](const OMPReductionClause *C) {
+ return C->getModifier() == OMPC_REDUCTION_inscan;
+ })) {
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
+ }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
@@ -3892,7 +4321,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
}
}
// Get list of lastprivate variables (for taskloops).
- llvm::DenseMap<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
+ llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
auto IRef = C->varlist_begin();
auto ID = C->destination_exprs().begin();
@@ -3903,8 +4332,8 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Data.LastprivateCopies.push_back(IInit);
}
LastprivateDstsOrigs.insert(
- {cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
- cast<DeclRefExpr>(*IRef)});
+ std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
+ cast<DeclRefExpr>(*IRef)));
++IRef;
++ID;
}
@@ -3938,15 +4367,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
CapturedRegion](CodeGenFunction &CGF,
PrePostActionTy &Action) {
- llvm::DenseMap<CanonicalDeclPtr<const VarDecl>, std::pair<Address, Address>>
+ llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
+ std::pair<Address, Address>>
UntiedLocalVars;
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
!Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
- llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
- CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
enum { PrivatesParam = 2, CopyFnParam = 3 };
llvm::Value *CopyFn = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
@@ -3955,13 +4383,16 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// Map privates.
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
+ llvm::SmallVector<llvm::Type *, 4> ParamTypes;
CallArgs.push_back(PrivatesPtr);
+ ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.PrivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
+ ParamTypes.push_back(PrivatePtr.getType());
}
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
@@ -3971,6 +4402,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
PrivatePtrs.emplace_back(VD, PrivatePtr);
FirstprivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
+ ParamTypes.push_back(PrivatePtr.getType());
}
for (const Expr *E : Data.LastprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
@@ -3979,6 +4411,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
".lastpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
+ ParamTypes.push_back(PrivatePtr.getType());
}
for (const VarDecl *VD : Data.PrivateLocals) {
QualType Ty = VD->getType().getNonReferenceType();
@@ -3988,9 +4421,19 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Ty = CGF.getContext().getPointerType(Ty);
Address PrivatePtr = CGF.CreateMemTemp(
CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
- UntiedLocalVars.try_emplace(VD, PrivatePtr, Address::invalid());
+ auto Result = UntiedLocalVars.insert(
+ std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
+ // If key exists update in place.
+ if (Result.second == false)
+ *Result.first = std::make_pair(
+ VD, std::make_pair(PrivatePtr, Address::invalid()));
CallArgs.push_back(PrivatePtr.getPointer());
+ ParamTypes.push_back(PrivatePtr.getType());
}
+ auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
+ ParamTypes, /*isVarArg=*/false);
+ CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CopyFn, CopyFnTy->getPointerTo());
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : LastprivateDstsOrigs) {
@@ -4015,14 +4458,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (isAllocatableDecl(Pair.first)) {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
Address Replacement(Ptr, CGF.getPointerAlign());
- Pair.getSecond().first = Replacement;
+ Pair.second.first = Replacement;
Ptr = CGF.Builder.CreateLoad(Replacement);
Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first));
- Pair.getSecond().second = Replacement;
+ Pair.second.second = Replacement;
} else {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first));
- Pair.getSecond().first = Replacement;
+ Pair.second.first = Replacement;
}
}
}
@@ -4156,7 +4599,7 @@ createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
PrivateVD->setInitStyle(VarDecl::CInit);
PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
InitRef, /*BasePath=*/nullptr,
- VK_RValue, FPOptionsOverride()));
+ VK_PRValue, FPOptionsOverride()));
Data.FirstprivateVars.emplace_back(OrigRef);
Data.FirstprivateCopies.emplace_back(PrivateRef);
Data.FirstprivateInits.emplace_back(InitRef);
@@ -4238,8 +4681,6 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Set proper addresses for generated private copies.
OMPPrivateScope Scope(CGF);
if (!Data.FirstprivateVars.empty()) {
- llvm::FunctionType *CopyFnTy = llvm::FunctionType::get(
- CGF.Builder.getVoidTy(), {CGF.Builder.getInt8PtrTy()}, true);
enum { PrivatesParam = 2, CopyFnParam = 3 };
llvm::Value *CopyFn = CGF.Builder.CreateLoad(
CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
@@ -4248,7 +4689,9 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
// Map privates.
llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
llvm::SmallVector<llvm::Value *, 16> CallArgs;
+ llvm::SmallVector<llvm::Type *, 4> ParamTypes;
CallArgs.push_back(PrivatesPtr);
+ ParamTypes.push_back(PrivatesPtr->getType());
for (const Expr *E : Data.FirstprivateVars) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
Address PrivatePtr =
@@ -4256,7 +4699,12 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
".firstpriv.ptr.addr");
PrivatePtrs.emplace_back(VD, PrivatePtr);
CallArgs.push_back(PrivatePtr.getPointer());
+ ParamTypes.push_back(PrivatePtr.getType());
}
+ auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
+ ParamTypes, /*isVarArg=*/false);
+ CopyFn = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CopyFn, CopyFnTy->getPointerTo());
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : PrivatePtrs) {
@@ -4779,7 +5227,7 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
*this, S,
[&S](CodeGenFunction &CGF, PrePostActionTy &) {
if (isOpenMPSimdDirective(S.getDirectiveKind()))
- CGF.EmitOMPSimdInit(S, /*IsMonotonic=*/true);
+ CGF.EmitOMPSimdInit(S);
},
[&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
@@ -4859,6 +5307,8 @@ static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
CGF.CapturedStmtInfo = &CapStmtInfo;
llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
Fn->setDoesNotRecurse();
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0)
+ Fn->addFnAttr(llvm::Attribute::AlwaysInline);
return Fn;
}
@@ -5281,32 +5731,35 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
// Emit post-update store to 'v' of old/new 'x' value.
CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
- // OpenMP, 2.17.7, atomic Construct
- // If the write, update, or capture clause is specified and the release,
- // acq_rel, or seq_cst clause is specified then the strong flush on entry to
- // the atomic operation is also a release flush.
- // If the read or capture clause is specified and the acquire, acq_rel, or
- // seq_cst clause is specified then the strong flush on exit from the atomic
- // operation is also an acquire flush.
- switch (AO) {
- case llvm::AtomicOrdering::Release:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
- llvm::AtomicOrdering::Release);
- break;
- case llvm::AtomicOrdering::Acquire:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
- llvm::AtomicOrdering::Acquire);
- break;
- case llvm::AtomicOrdering::AcquireRelease:
- case llvm::AtomicOrdering::SequentiallyConsistent:
- CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
- llvm::AtomicOrdering::AcquireRelease);
- break;
- case llvm::AtomicOrdering::Monotonic:
- break;
- case llvm::AtomicOrdering::NotAtomic:
- case llvm::AtomicOrdering::Unordered:
- llvm_unreachable("Unexpected ordering.");
+ // OpenMP 5.1 removes the required flush for capture clause.
+ if (CGF.CGM.getLangOpts().OpenMP < 51) {
+ // OpenMP, 2.17.7, atomic Construct
+ // If the write, update, or capture clause is specified and the release,
+ // acq_rel, or seq_cst clause is specified then the strong flush on entry to
+ // the atomic operation is also a release flush.
+ // If the read or capture clause is specified and the acquire, acq_rel, or
+ // seq_cst clause is specified then the strong flush on exit from the atomic
+ // operation is also an acquire flush.
+ switch (AO) {
+ case llvm::AtomicOrdering::Release:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Release);
+ break;
+ case llvm::AtomicOrdering::Acquire:
+ CGF.CGM.getOpenMPRuntime().emitFlush(CGF, llvm::None, Loc,
+ llvm::AtomicOrdering::Acquire);
+ break;
+ case llvm::AtomicOrdering::AcquireRelease:
+ case llvm::AtomicOrdering::SequentiallyConsistent:
+ CGF.CGM.getOpenMPRuntime().emitFlush(
+ CGF, llvm::None, Loc, llvm::AtomicOrdering::AcquireRelease);
+ break;
+ case llvm::AtomicOrdering::Monotonic:
+ break;
+ case llvm::AtomicOrdering::NotAtomic:
+ case llvm::AtomicOrdering::Unordered:
+ llvm_unreachable("Unexpected ordering.");
+ }
}
}
@@ -5341,6 +5794,9 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_in_reduction:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
+ case OMPC_full:
+ case OMPC_partial:
case OMPC_allocator:
case OMPC_allocate:
case OMPC_collapse:
@@ -5399,7 +5855,14 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_exclusive:
case OMPC_uses_allocators:
case OMPC_affinity:
- default:
+ case OMPC_init:
+ case OMPC_inbranch:
+ case OMPC_notinbranch:
+ case OMPC_link:
+ case OMPC_use:
+ case OMPC_novariants:
+ case OMPC_nocontext:
+ case OMPC_filter:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
@@ -5431,7 +5894,7 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
C->getClauseKind() != OMPC_acq_rel &&
C->getClauseKind() != OMPC_acquire &&
C->getClauseKind() != OMPC_release &&
- C->getClauseKind() != OMPC_relaxed) {
+ C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) {
Kind = C->getClauseKind();
break;
}
@@ -5554,6 +6017,7 @@ static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
+ CGF.EnsureInsertPoint();
}
void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
@@ -5970,7 +6434,9 @@ void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
// TODO: This check is necessary as we only generate `omp parallel` through
// the OpenMPIRBuilder for now.
- if (S.getCancelRegion() == OMPD_parallel) {
+ if (S.getCancelRegion() == OMPD_parallel ||
+ S.getCancelRegion() == OMPD_sections ||
+ S.getCancelRegion() == OMPD_section) {
llvm::Value *IfCondition = nullptr;
if (IfCond)
IfCondition = EmitScalarExpr(IfCond,
@@ -6695,7 +7161,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
if (!C->getNumForLoops())
continue;
- for (unsigned I = LD->getCollapsedNumber(),
+ for (unsigned I = LD->getLoopsNumber(),
E = C->getLoopNumIterations().size();
I < E; ++I) {
if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
@@ -6714,7 +7180,8 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
if (D.getDirectiveKind() == OMPD_atomic ||
D.getDirectiveKind() == OMPD_critical ||
D.getDirectiveKind() == OMPD_section ||
- D.getDirectiveKind() == OMPD_master) {
+ D.getDirectiveKind() == OMPD_master ||
+ D.getDirectiveKind() == OMPD_masked) {
EmitStmt(D.getAssociatedStmt());
} else {
auto LPCRegion =
diff --git a/clang/lib/CodeGen/CGVTables.cpp b/clang/lib/CodeGen/CGVTables.cpp
index bef9a293b7ed..9eb650814238 100644
--- a/clang/lib/CodeGen/CGVTables.cpp
+++ b/clang/lib/CodeGen/CGVTables.cpp
@@ -427,7 +427,8 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
unsigned CallingConv;
llvm::AttributeList Attrs;
CGM.ConstructAttributeList(Callee.getCallee()->getName(), *CurFnInfo, GD,
- Attrs, CallingConv, /*AttrOnCallSite=*/true);
+ Attrs, CallingConv, /*AttrOnCallSite=*/true,
+ /*IsThunk=*/false);
Call->setAttributes(Attrs);
Call->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
@@ -531,7 +532,7 @@ llvm::Constant *CodeGenVTables::maybeEmitThunk(GlobalDecl GD,
OldThunkFn->setName(StringRef());
ThunkFn = llvm::Function::Create(ThunkFnTy, llvm::Function::ExternalLinkage,
Name.str(), &CGM.getModule());
- CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
+ CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/false);
// If needed, replace the old thunk with a bitcast.
if (!OldThunkFn->use_empty()) {
@@ -727,22 +728,7 @@ void CodeGenVTables::addVTableComponent(ConstantArrayBuilder &builder,
case VTableComponent::CK_FunctionPointer:
case VTableComponent::CK_CompleteDtorPointer:
case VTableComponent::CK_DeletingDtorPointer: {
- GlobalDecl GD;
-
- // Get the right global decl.
- switch (component.getKind()) {
- default:
- llvm_unreachable("Unexpected vtable component kind");
- case VTableComponent::CK_FunctionPointer:
- GD = component.getFunctionDecl();
- break;
- case VTableComponent::CK_CompleteDtorPointer:
- GD = GlobalDecl(component.getDestructorDecl(), Dtor_Complete);
- break;
- case VTableComponent::CK_DeletingDtorPointer:
- GD = GlobalDecl(component.getDestructorDecl(), Dtor_Deleting);
- break;
- }
+ GlobalDecl GD = component.getGlobalDecl();
if (CGM.getLangOpts().CUDA) {
// Emit NULL for methods we can't codegen on this
diff --git a/clang/lib/CodeGen/CodeGenAction.cpp b/clang/lib/CodeGen/CodeGenAction.cpp
index 778d4df3c2e9..b30bd11edbad 100644
--- a/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/clang/lib/CodeGen/CodeGenAction.cpp
@@ -60,22 +60,19 @@ namespace clang {
bool handleDiagnostics(const DiagnosticInfo &DI) override;
bool isAnalysisRemarkEnabled(StringRef PassName) const override {
- return (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
- CodeGenOpts.OptimizationRemarkAnalysisPattern->match(PassName));
+ return CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(PassName);
}
bool isMissedOptRemarkEnabled(StringRef PassName) const override {
- return (CodeGenOpts.OptimizationRemarkMissedPattern &&
- CodeGenOpts.OptimizationRemarkMissedPattern->match(PassName));
+ return CodeGenOpts.OptimizationRemarkMissed.patternMatches(PassName);
}
bool isPassedOptRemarkEnabled(StringRef PassName) const override {
- return (CodeGenOpts.OptimizationRemarkPattern &&
- CodeGenOpts.OptimizationRemarkPattern->match(PassName));
+ return CodeGenOpts.OptimizationRemark.patternMatches(PassName);
}
bool isAnyRemarkEnabled() const override {
- return (CodeGenOpts.OptimizationRemarkAnalysisPattern ||
- CodeGenOpts.OptimizationRemarkMissedPattern ||
- CodeGenOpts.OptimizationRemarkPattern);
+ return CodeGenOpts.OptimizationRemarkAnalysis.hasValidPattern() ||
+ CodeGenOpts.OptimizationRemarkMissed.hasValidPattern() ||
+ CodeGenOpts.OptimizationRemark.hasValidPattern();
}
private:
@@ -304,14 +301,7 @@ namespace clang {
if (!getModule())
return;
- // Install an inline asm handler so that diagnostics get printed through
- // our diagnostics hooks.
LLVMContext &Ctx = getModule()->getContext();
- LLVMContext::InlineAsmDiagHandlerTy OldHandler =
- Ctx.getInlineAsmDiagnosticHandler();
- void *OldContext = Ctx.getInlineAsmDiagnosticContext();
- Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
-
std::unique_ptr<DiagnosticHandler> OldDiagnosticHandler =
Ctx.getDiagnosticHandler();
Ctx.setDiagnosticHandler(std::make_unique<ClangDiagnosticHandler>(
@@ -342,11 +332,9 @@ namespace clang {
EmbedBitcode(getModule(), CodeGenOpts, llvm::MemoryBufferRef());
EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
- LangOpts, C.getTargetInfo().getDataLayout(),
+ LangOpts, C.getTargetInfo().getDataLayoutString(),
getModule(), Action, std::move(AsmOutStream));
- Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
-
Ctx.setDiagnosticHandler(std::move(OldDiagnosticHandler));
if (OptRecordFile)
@@ -380,12 +368,6 @@ namespace clang {
Gen->HandleVTable(RD);
}
- static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
- unsigned LocCookie) {
- SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
- ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
- }
-
/// Get the best possible source location to represent a diagnostic that
/// may have associated debug info.
const FullSourceLoc
@@ -393,14 +375,13 @@ namespace clang {
bool &BadDebugInfo, StringRef &Filename,
unsigned &Line, unsigned &Column) const;
- void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
- SourceLocation LocCookie);
-
void DiagnosticHandlerImpl(const llvm::DiagnosticInfo &DI);
/// Specialized handler for InlineAsm diagnostic.
/// \return True if the diagnostic has been successfully reported, false
/// otherwise.
bool InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D);
+ /// Specialized handler for diagnostics reported using SMDiagnostic.
+ void SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &D);
/// Specialized handler for StackSize diagnostic.
/// \return True if the diagnostic has been successfully reported, false
/// otherwise.
@@ -459,64 +440,6 @@ static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
return FullSourceLoc(NewLoc, CSM);
}
-
-/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
-/// error parsing inline asm. The SMDiagnostic indicates the error relative to
-/// the temporary memory buffer that the inline asm parser has set up.
-void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
- SourceLocation LocCookie) {
- // There are a couple of different kinds of errors we could get here. First,
- // we re-format the SMDiagnostic in terms of a clang diagnostic.
-
- // Strip "error: " off the start of the message string.
- StringRef Message = D.getMessage();
- if (Message.startswith("error: "))
- Message = Message.substr(7);
-
- // If the SMDiagnostic has an inline asm source location, translate it.
- FullSourceLoc Loc;
- if (D.getLoc() != SMLoc())
- Loc = ConvertBackendLocation(D, Context->getSourceManager());
-
- unsigned DiagID;
- switch (D.getKind()) {
- case llvm::SourceMgr::DK_Error:
- DiagID = diag::err_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Warning:
- DiagID = diag::warn_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Note:
- DiagID = diag::note_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Remark:
- llvm_unreachable("remarks unexpected");
- }
- // If this problem has clang-level source location information, report the
- // issue in the source with a note showing the instantiated
- // code.
- if (LocCookie.isValid()) {
- Diags.Report(LocCookie, DiagID).AddString(Message);
-
- if (D.getLoc().isValid()) {
- DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
- // Convert the SMDiagnostic ranges into SourceRange and attach them
- // to the diagnostic.
- for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) {
- unsigned Column = D.getColumnNo();
- B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
- Loc.getLocWithOffset(Range.second - Column));
- }
- }
- return;
- }
-
- // Otherwise, report the backend issue as occurring in the generated .s file.
- // If Loc is invalid, we still need to report the issue, it just gets no
- // location info.
- Diags.Report(Loc, DiagID).AddString(Message);
-}
-
#define ComputeDiagID(Severity, GroupName, DiagID) \
do { \
switch (Severity) { \
@@ -553,6 +476,65 @@ void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
} \
} while (false)
+void BackendConsumer::SrcMgrDiagHandler(const llvm::DiagnosticInfoSrcMgr &DI) {
+ const llvm::SMDiagnostic &D = DI.getSMDiag();
+
+ unsigned DiagID;
+ if (DI.isInlineAsmDiag())
+ ComputeDiagID(DI.getSeverity(), inline_asm, DiagID);
+ else
+ ComputeDiagID(DI.getSeverity(), source_mgr, DiagID);
+
+ // This is for the empty BackendConsumer that uses the clang diagnostic
+ // handler for IR input files.
+ if (!Context) {
+ D.print(nullptr, llvm::errs());
+ Diags.Report(DiagID).AddString("cannot compile inline asm");
+ return;
+ }
+
+ // There are a couple of different kinds of errors we could get here.
+ // First, we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ StringRef Message = D.getMessage();
+ (void)Message.consume_front("error: ");
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+ // If this problem has clang-level source location information, report the
+ // issue in the source with a note showing the instantiated
+ // code.
+ if (DI.isInlineAsmDiag()) {
+ SourceLocation LocCookie =
+ SourceLocation::getFromRawEncoding(DI.getLocCookie());
+ if (LocCookie.isValid()) {
+ Diags.Report(LocCookie, DiagID).AddString(Message);
+
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (const std::pair<unsigned, unsigned> &Range : D.getRanges()) {
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
+ return;
+ }
+ }
+
+ // Otherwise, report the backend issue as occurring in the generated .s file.
+ // If Loc is invalid, we still need to report the issue, it just gets no
+ // location info.
+ Diags.Report(Loc, DiagID).AddString(Message);
+ return;
+}
+
bool
BackendConsumer::InlineAsmDiagHandler(const llvm::DiagnosticInfoInlineAsm &D) {
unsigned DiagID;
@@ -589,7 +571,9 @@ BackendConsumer::StackSizeDiagHandler(const llvm::DiagnosticInfoStackSize &D) {
// FIXME: Shouldn't need to truncate to uint32_t
Diags.Report(ND->getASTContext().getFullLoc(ND->getLocation()),
diag::warn_fe_frame_larger_than)
- << static_cast<uint32_t>(D.getStackSize()) << Decl::castToDeclContext(ND);
+ << static_cast<uint32_t>(D.getStackSize())
+ << static_cast<uint32_t>(D.getStackLimit())
+ << Decl::castToDeclContext(ND);
return true;
}
@@ -722,15 +706,13 @@ void BackendConsumer::OptimizationRemarkHandler(
if (D.isPassed()) {
// Optimization remarks are active only if the -Rpass flag has a regular
// expression that matches the name of the pass name in \p D.
- if (CodeGenOpts.OptimizationRemarkPattern &&
- CodeGenOpts.OptimizationRemarkPattern->match(D.getPassName()))
+ if (CodeGenOpts.OptimizationRemark.patternMatches(D.getPassName()))
EmitOptimizationMessage(D, diag::remark_fe_backend_optimization_remark);
} else if (D.isMissed()) {
// Missed optimization remarks are active only if the -Rpass-missed
// flag has a regular expression that matches the name of the pass
// name in \p D.
- if (CodeGenOpts.OptimizationRemarkMissedPattern &&
- CodeGenOpts.OptimizationRemarkMissedPattern->match(D.getPassName()))
+ if (CodeGenOpts.OptimizationRemarkMissed.patternMatches(D.getPassName()))
EmitOptimizationMessage(
D, diag::remark_fe_backend_optimization_remark_missed);
} else {
@@ -741,8 +723,7 @@ void BackendConsumer::OptimizationRemarkHandler(
ShouldAlwaysPrint = ORA->shouldAlwaysPrint();
if (ShouldAlwaysPrint ||
- (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
- CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
EmitOptimizationMessage(
D, diag::remark_fe_backend_optimization_remark_analysis);
}
@@ -755,8 +736,7 @@ void BackendConsumer::OptimizationRemarkHandler(
// regular expression that matches the name of the pass name in \p D.
if (D.shouldAlwaysPrint() ||
- (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
- CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
EmitOptimizationMessage(
D, diag::remark_fe_backend_optimization_remark_analysis_fpcommute);
}
@@ -768,8 +748,7 @@ void BackendConsumer::OptimizationRemarkHandler(
// regular expression that matches the name of the pass name in \p D.
if (D.shouldAlwaysPrint() ||
- (CodeGenOpts.OptimizationRemarkAnalysisPattern &&
- CodeGenOpts.OptimizationRemarkAnalysisPattern->match(D.getPassName())))
+ CodeGenOpts.OptimizationRemarkAnalysis.patternMatches(D.getPassName()))
EmitOptimizationMessage(
D, diag::remark_fe_backend_optimization_remark_analysis_aliasing);
}
@@ -791,6 +770,9 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
return;
ComputeDiagID(Severity, inline_asm, DiagID);
break;
+ case llvm::DK_SrcMgr:
+ SrcMgrDiagHandler(cast<DiagnosticInfoSrcMgr>(DI));
+ return;
case llvm::DK_StackSize:
if (StackSizeDiagHandler(cast<DiagnosticInfoStackSize>(DI)))
return;
@@ -905,6 +887,10 @@ llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
return VMContext;
}
+CodeGenerator *CodeGenAction::getCodeGenerator() const {
+ return BEConsumer->getCodeGenerator();
+}
+
static std::unique_ptr<raw_pwrite_stream>
GetOutputStream(CompilerInstance &CI, StringRef InFile, BackendAction Action) {
switch (Action) {
@@ -987,30 +973,6 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
return std::move(Result);
}
-static void BitcodeInlineAsmDiagHandler(const llvm::SMDiagnostic &SM,
- void *Context,
- unsigned LocCookie) {
- SM.print(nullptr, llvm::errs());
-
- auto Diags = static_cast<DiagnosticsEngine *>(Context);
- unsigned DiagID;
- switch (SM.getKind()) {
- case llvm::SourceMgr::DK_Error:
- DiagID = diag::err_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Warning:
- DiagID = diag::warn_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Note:
- DiagID = diag::note_fe_inline_asm;
- break;
- case llvm::SourceMgr::DK_Remark:
- llvm_unreachable("remarks unexpected");
- }
-
- Diags->Report(DiagID).AddString("cannot compile inline asm");
-}
-
std::unique_ptr<llvm::Module>
CodeGenAction::loadModule(MemoryBufferRef MBRef) {
CompilerInstance &CI = getCompilerInstance();
@@ -1113,7 +1075,14 @@ void CodeGenAction::ExecuteAction() {
EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile);
LLVMContext &Ctx = TheModule->getContext();
- Ctx.setInlineAsmDiagnosticHandler(BitcodeInlineAsmDiagHandler, &Diagnostics);
+
+ // Restore any diagnostic handler previously set before returning from this
+ // function.
+ struct RAII {
+ LLVMContext &Ctx;
+ std::unique_ptr<DiagnosticHandler> PrevHandler = Ctx.getDiagnosticHandler();
+ ~RAII() { Ctx.setDiagnosticHandler(std::move(PrevHandler)); }
+ } _{Ctx};
// Set clang diagnostic handler. To do this we need to create a fake
// BackendConsumer.
@@ -1142,7 +1111,7 @@ void CodeGenAction::ExecuteAction() {
EmitBackendOutput(Diagnostics, CI.getHeaderSearchOpts(), CodeGenOpts,
TargetOpts, CI.getLangOpts(),
- CI.getTarget().getDataLayout(), TheModule.get(), BA,
+ CI.getTarget().getDataLayoutString(), TheModule.get(), BA,
std::move(OS));
if (OptRecordFile)
OptRecordFile->keep();
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index b393c88f7751..a2384456ea94 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -75,6 +75,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
if (!suppressNewContext)
CGM.getCXXABI().getMangleContext().startNewFunction();
+ EHStack.setCGF(this);
SetFastMathFlags(CurFPFeatures);
SetFPModel();
@@ -91,8 +92,8 @@ CodeGenFunction::~CodeGenFunction() {
// seems to be a reasonable spot. We do it here, as opposed to the deletion
// time of the CodeGenModule, because we have to ensure the IR has not yet
// been "emitted" to the outside, thus, modifications are still sensible.
- if (CGM.getLangOpts().OpenMPIRBuilder)
- CGM.getOpenMPRuntime().getOMPBuilder().finalize();
+ if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
+ CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
}
// Map the LangOption for exception behavior into
@@ -174,7 +175,7 @@ void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
auto OldValue =
- CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
+ CGF.CurFn->getFnAttribute(Name).getValueAsBool();
auto NewValue = OldValue & Value;
if (OldValue != NewValue)
CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
@@ -452,13 +453,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
if (CGM.getCodeGenOpts().EmitDeclMetadata)
EmitDeclMetadata();
- for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
- I = DeferredReplacements.begin(),
- E = DeferredReplacements.end();
- I != E; ++I) {
- I->first->replaceAllUsesWith(I->second);
- I->first->eraseFromParent();
+ for (const auto &R : DeferredReplacements) {
+ if (llvm::Value *Old = R.first) {
+ Old->replaceAllUsesWith(R.second);
+ cast<llvm::Instruction>(Old)->eraseFromParent();
+ }
}
+ DeferredReplacements.clear();
// Eliminate CleanupDestSlot alloca by replacing it with SSA values and
// PHIs if the current function is a coroutine. We don't do it for all
@@ -495,6 +496,13 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
// function.
CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
+ // Add vscale attribute if appropriate.
+ if (getLangOpts().ArmSveVectorBits) {
+ unsigned VScale = getLangOpts().ArmSveVectorBits / 128;
+ CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(getLLVMContext(),
+ VScale, VScale));
+ }
+
// If we generated an unreachable return block, delete it now.
if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
Builder.ClearInsertionPoint();
@@ -702,23 +710,23 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
DidCallStackSave = false;
CurCodeDecl = D;
- if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
- if (FD->usesSEHTry())
- CurSEHParent = FD;
+ const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD && FD->usesSEHTry())
+ CurSEHParent = FD;
CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
FnRetTy = RetTy;
CurFn = Fn;
CurFnInfo = &FnInfo;
assert(CurFn->isDeclaration() && "Function already has body?");
- // If this function has been blacklisted for any of the enabled sanitizers,
+ // If this function is ignored for any of the enabled sanitizers,
// disable the sanitizer for the function.
do {
#define SANITIZER(NAME, ID) \
if (SanOpts.empty()) \
break; \
if (SanOpts.has(SanitizerKind::ID)) \
- if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
+ if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
SanOpts.set(SanitizerKind::ID, false);
#include "clang/Basic/Sanitizers.def"
@@ -726,8 +734,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
} while (0);
if (D) {
- // Apply the no_sanitize* attributes to SanOpts.
+ bool NoSanitizeCoverage = false;
+
for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
+ // Apply the no_sanitize* attributes to SanOpts.
SanitizerMask mask = Attr->getMask();
SanOpts.Mask &= ~mask;
if (mask & SanitizerKind::Address)
@@ -738,7 +748,14 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
SanOpts.set(SanitizerKind::KernelHWAddress, false);
if (mask & SanitizerKind::KernelHWAddress)
SanOpts.set(SanitizerKind::HWAddress, false);
+
+ // SanitizeCoverage is not handled by SanOpts.
+ if (Attr->hasCoverage())
+ NoSanitizeCoverage = true;
}
+
+ if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
+ Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
}
// Apply sanitizer attributes to the function.
@@ -786,10 +803,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// are not aware of how to move the extra UBSan instructions across the split
// coroutine boundaries.
if (D && SanOpts.has(SanitizerKind::Null))
- if (const auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->getBody() &&
- FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
- SanOpts.Mask &= ~SanitizerKind::Null;
+ if (FD && FD->getBody() &&
+ FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
+ SanOpts.Mask &= ~SanitizerKind::Null;
// Apply xray attributes to the function (as a string, for now)
bool AlwaysXRayAttr = false;
@@ -859,8 +875,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
// Add no-jump-tables value.
- Fn->addFnAttr("no-jump-tables",
- llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
+ if (CGM.getCodeGenOpts().NoUseJumpTables)
+ Fn->addFnAttr("no-jump-tables", "true");
// Add no-inline-line-tables value.
if (CGM.getCodeGenOpts().NoInlineLineTables)
@@ -876,32 +892,30 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
Fn->addFnAttr("cfi-canonical-jump-table");
- if (getLangOpts().OpenCL) {
+ if (D && D->hasAttr<NoProfileFunctionAttr>())
+ Fn->addFnAttr(llvm::Attribute::NoProfile);
+
+ if (FD && getLangOpts().OpenCL) {
// Add metadata for a kernel function.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- EmitOpenCLKernelMetadata(FD, Fn);
+ EmitOpenCLKernelMetadata(FD, Fn);
}
// If we are checking function types, emit a function type signature as
// prologue data.
- if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
- // Remove any (C++17) exception specifications, to allow calling e.g. a
- // noexcept function through a non-noexcept pointer.
- auto ProtoTy =
- getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
- EST_None);
- llvm::Constant *FTRTTIConst =
- CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
- llvm::Constant *FTRTTIConstEncoded =
- EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
- llvm::Constant *PrologueStructElems[] = {PrologueSig,
- FTRTTIConstEncoded};
- llvm::Constant *PrologueStructConst =
- llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
- Fn->setPrologueData(PrologueStructConst);
- }
+ if (FD && getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
+ if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
+ // Remove any (C++17) exception specifications, to allow calling e.g. a
+ // noexcept function through a non-noexcept pointer.
+ auto ProtoTy = getContext().getFunctionTypeWithExceptionSpec(
+ FD->getType(), EST_None);
+ llvm::Constant *FTRTTIConst =
+ CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
+ llvm::Constant *FTRTTIConstEncoded =
+ EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
+ llvm::Constant *PrologueStructElems[] = {PrologueSig, FTRTTIConstEncoded};
+ llvm::Constant *PrologueStructConst =
+ llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
+ Fn->setPrologueData(PrologueStructConst);
}
}
@@ -928,14 +942,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// kernels cannot include RTTI information, exception classes,
// recursive code, virtual functions or make use of C++ libraries that
// are not compiled for the device.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
- if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
- getLangOpts().SYCLIsDevice ||
- (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
- Fn->addFnAttr(llvm::Attribute::NoRecurse);
- }
+ if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
+ getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
+ (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
+ Fn->addFnAttr(llvm::Attribute::NoRecurse);
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (FD) {
Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>());
if (FD->hasAttr<StrictFPAttr>())
Fn->addFnAttr(llvm::Attribute::StrictFP);
@@ -943,10 +955,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// If a custom alignment is used, force realigning to this alignment on
// any main function which certainly will need it.
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
- if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
- CGM.getCodeGenOpts().StackAlignment)
- Fn->addFnAttr("stackrealign");
+ if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
+ CGM.getCodeGenOpts().StackAlignment))
+ Fn->addFnAttr("stackrealign");
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
@@ -973,7 +984,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// such as 'this' and 'vtt', show up in the debug info. Preserve the calling
// convention.
CallingConv CC = CallingConv::CC_C;
- if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD)
if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
CC = SrcFnTy->getCallConv();
SmallVector<QualType, 16> ArgTypes;
@@ -1033,6 +1044,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Fn->addFnAttr("packed-stack");
}
+ if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX)
+ Fn->addFnAttr("warn-stack-size",
+ std::to_string(CGM.getCodeGenOpts().WarnStackSize));
+
if (RetTy->isVoidType()) {
// Void type; nothing to return.
ReturnValue = Address::invalid();
@@ -1060,9 +1075,12 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
- llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
+ llvm::Value *Addr = Builder.CreateStructGEP(
+ EI->getType()->getPointerElementType(), &*EI, Idx);
+ llvm::Type *Ty =
+ cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
ReturnValuePointer = Address(Addr, getPointerAlign());
- Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
+ Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1177,9 +1195,6 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
incrementProfileCounter(Body);
- if (CPlusPlusWithProgress())
- FnIsMustProgress = true;
-
if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
EmitCompoundStmtWithoutScope(*S);
else
@@ -1187,7 +1202,7 @@ void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
// This is checked after emitting the function body so we know if there
// are any permitted infinite loops.
- if (FnIsMustProgress)
+ if (checkIfFunctionMustProgress())
CurFn->addFnAttr(llvm::Attribute::MustProgress);
}
@@ -1272,19 +1287,6 @@ QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
return ResTy;
}
-static bool
-shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
- const ASTContext &Context) {
- QualType T = FD->getReturnType();
- // Avoid the optimization for functions that return a record type with a
- // trivial destructor or another trivially copyable type.
- if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
- if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return !ClassDecl->hasTrivialDestructor();
- }
- return !T.isTriviallyCopyableType(Context);
-}
-
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
const CGFunctionInfo &FnInfo) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
@@ -1294,8 +1296,14 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
QualType ResTy = BuildFunctionArgList(GD, Args);
// Check if we should generate debug info for this function.
- if (FD->hasAttr<NoDebugAttr>())
- DebugInfo = nullptr; // disable debug info indefinitely for this function
+ if (FD->hasAttr<NoDebugAttr>()) {
+ // Clear non-distinct debug info that was possibly attached to the function
+ // due to an earlier declaration without the nodebug attribute
+ if (Fn)
+ Fn->setSubprogram(nullptr);
+ // Disable debug info indefinitely for this function
+ DebugInfo = nullptr;
+ }
// The function might not have a body if we're generating thunks for a
// function declaration.
@@ -1321,14 +1329,25 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
Stmt *Body = FD->getBody();
- // Initialize helper which will detect jumps which can cause invalid lifetime
- // markers.
- if (Body && ShouldEmitLifetimeMarkers)
- Bypasses.Init(Body);
+ if (Body) {
+ // Coroutines always emit lifetime markers.
+ if (isa<CoroutineBodyStmt>(Body))
+ ShouldEmitLifetimeMarkers = true;
+
+ // Initialize helper which will detect jumps which can cause invalid
+ // lifetime markers.
+ if (ShouldEmitLifetimeMarkers)
+ Bypasses.Init(Body);
+ }
// Emit the standard function prologue.
StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
+ // Save parameters for coroutine function.
+ if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
+ for (const auto *ParamDecl : FD->parameters())
+ FnArgs.push_back(ParamDecl);
+
// Generate the body of the function.
PGO.assignRegionCounters(GD, CurFn);
if (isa<CXXDestructorDecl>(FD))
@@ -1365,7 +1384,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
!FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
bool ShouldEmitUnreachable =
CGM.getCodeGenOpts().StrictReturn ||
- shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
+ !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
if (SanOpts.has(SanitizerKind::Return)) {
SanitizerScope SanScope(this);
llvm::Value *IsFalse = Builder.getFalse();
@@ -1774,10 +1793,19 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
return;
}
+ // Emit the code with the fully general case.
+ llvm::Value *CondV;
+ {
+ ApplyDebugLocation DL(*this, Cond);
+ CondV = EvaluateExprAsBool(Cond);
+ }
+
+ llvm::MDNode *Weights = nullptr;
+ llvm::MDNode *Unpredictable = nullptr;
+
// If the branch has a condition wrapped by __builtin_unpredictable,
// create metadata that specifies that the branch is unpredictable.
// Don't bother if not optimizing because that metadata would not be used.
- llvm::MDNode *Unpredictable = nullptr;
auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
@@ -1787,18 +1815,17 @@ void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
}
}
- llvm::MDNode *Weights = createBranchWeights(LH);
- if (!Weights) {
+ // If there is a Likelihood knowledge for the cond, lower it.
+ // Note that if not optimizing this won't emit anything.
+ llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
+ if (CondV != NewCondV)
+ CondV = NewCondV;
+ else {
+ // Otherwise, lower profile counts. Note that we do this even at -O0.
uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
}
- // Emit the code with the fully general case.
- llvm::Value *CondV;
- {
- ApplyDebugLocation DL(*this, Cond);
- CondV = EvaluateExprAsBool(Cond);
- }
Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
}
@@ -1826,8 +1853,8 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
Address begin =
Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
- llvm::Value *end =
- Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
+ llvm::Value *end = Builder.CreateInBoundsGEP(
+ begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
@@ -2034,9 +2061,9 @@ llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
} else {
// Create the actual GEP.
- addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
- gepIndices, "array.begin"),
- addr.getAlignment());
+ addr = Address(Builder.CreateInBoundsGEP(
+ addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
+ addr.getAlignment());
}
baseType = eltType;
@@ -2642,35 +2669,26 @@ llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
return llvm::DebugLoc();
}
-static Optional<std::pair<uint32_t, uint32_t>>
-getLikelihoodWeights(Stmt::Likelihood LH) {
+llvm::Value *
+CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
+ Stmt::Likelihood LH) {
switch (LH) {
- case Stmt::LH_Unlikely:
- return std::pair<uint32_t, uint32_t>(llvm::UnlikelyBranchWeight,
- llvm::LikelyBranchWeight);
case Stmt::LH_None:
- return None;
+ return Cond;
case Stmt::LH_Likely:
- return std::pair<uint32_t, uint32_t>(llvm::LikelyBranchWeight,
- llvm::UnlikelyBranchWeight);
+ case Stmt::LH_Unlikely:
+ // Don't generate llvm.expect on -O0 as the backend won't use it for
+ // anything.
+ if (CGM.getCodeGenOpts().OptimizationLevel == 0)
+ return Cond;
+ llvm::Type *CondTy = Cond->getType();
+ assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
+ llvm::Function *FnExpect =
+ CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
+ llvm::Value *ExpectedValueOfCond =
+ llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
+ return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
+ Cond->getName() + ".expval");
}
llvm_unreachable("Unknown Likelihood");
}
-
-llvm::MDNode *CodeGenFunction::createBranchWeights(Stmt::Likelihood LH) const {
- Optional<std::pair<uint32_t, uint32_t>> LHW = getLikelihoodWeights(LH);
- if (!LHW)
- return nullptr;
-
- llvm::MDBuilder MDHelper(CGM.getLLVMContext());
- return MDHelper.createBranchWeights(LHW->first, LHW->second);
-}
-
-llvm::MDNode *CodeGenFunction::createProfileOrBranchWeightsForLoop(
- const Stmt *Cond, uint64_t LoopCount, const Stmt *Body) const {
- llvm::MDNode *Weights = createProfileWeightsForLoop(Cond, LoopCount);
- if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
- Weights = createBranchWeights(Stmt::getLikelihood(Body));
-
- return Weights;
-}
diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h
index 8eb7adbc8fcb..4e087ce51e37 100644
--- a/clang/lib/CodeGen/CodeGenFunction.h
+++ b/clang/lib/CodeGen/CodeGenFunction.h
@@ -50,6 +50,7 @@ class Module;
class SwitchInst;
class Twine;
class Value;
+class CanonicalLoopInfo;
}
namespace clang {
@@ -276,6 +277,20 @@ public:
// because of jumps.
VarBypassDetector Bypasses;
+ /// List of recently emitted OMPCanonicalLoops.
+ ///
+ /// Since OMPCanonicalLoops are nested inside other statements (in particular
+ /// CapturedStmt generated by OMPExecutableDirective and non-perfectly nested
+ /// loops), we cannot directly call OMPEmitOMPCanonicalLoop and receive its
+ /// llvm::CanonicalLoopInfo. Instead, we call EmitStmt and any
+ /// OMPEmitOMPCanonicalLoop called by it will add its CanonicalLoopInfo to
+ /// this stack when done. Entering a new loop requires clearing this list; it
+ /// either means we start parsing a new loop nest (in which case the previous
+ /// loop nest goes out of scope) or a second loop in the same level in which
+ /// case it would be ambiguous into which of the two (or more) loops the loop
+ /// nest would extend.
+ SmallVector<llvm::CanonicalLoopInfo *, 4> OMPLoopNestStack;
+
// CodeGen lambda for loops and support for ordered clause
typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
JumpDest)>
@@ -310,6 +325,9 @@ public:
QualType FnRetTy;
llvm::Function *CurFn = nullptr;
+ /// Save Parameter Decl for coroutine.
+ llvm::SmallVector<const ParmVarDecl *, 4> FnArgs;
+
// Holds coroutine data if the current function is a coroutine. We use a
// wrapper to manage its lifetime, so that we don't have to define CGCoroData
// in this header.
@@ -502,24 +520,52 @@ public:
/// True if the current statement has nomerge attribute.
bool InNoMergeAttributedStmt = false;
- /// True if the current function should be marked mustprogress.
- bool FnIsMustProgress = false;
+ // The CallExpr within the current statement that the musttail attribute
+ // applies to. nullptr if there is no 'musttail' on the current statement.
+ const CallExpr *MustTailCall = nullptr;
- /// True if the C++ Standard Requires Progress.
- bool CPlusPlusWithProgress() {
- return getLangOpts().CPlusPlus11 || getLangOpts().CPlusPlus14 ||
- getLangOpts().CPlusPlus17 || getLangOpts().CPlusPlus20;
- }
+ /// Returns true if a function must make progress, which means the
+ /// mustprogress attribute can be added.
+ bool checkIfFunctionMustProgress() {
+ if (CGM.getCodeGenOpts().getFiniteLoops() ==
+ CodeGenOptions::FiniteLoopsKind::Never)
+ return false;
- /// True if the C Standard Requires Progress.
- bool CWithProgress() {
- return getLangOpts().C11 || getLangOpts().C17 || getLangOpts().C2x;
+ // C++11 and later guarantees that a thread eventually will do one of the
+ // following (6.9.2.3.1 in C++11):
+ // - terminate,
+ // - make a call to a library I/O function,
+ // - perform an access through a volatile glvalue, or
+ // - perform a synchronization operation or an atomic operation.
+ //
+ // Hence each function is 'mustprogress' in C++11 or later.
+ return getLangOpts().CPlusPlus11;
}
- /// True if the language standard requires progress in functions or
- /// in infinite loops with non-constant conditionals.
- bool LanguageRequiresProgress() {
- return CWithProgress() || CPlusPlusWithProgress();
+ /// Returns true if a loop must make progress, which means the mustprogress
+ /// attribute can be added. \p HasConstantCond indicates whether the branch
+ /// condition is a known constant.
+ bool checkIfLoopMustProgress(bool HasConstantCond) {
+ if (CGM.getCodeGenOpts().getFiniteLoops() ==
+ CodeGenOptions::FiniteLoopsKind::Always)
+ return true;
+ if (CGM.getCodeGenOpts().getFiniteLoops() ==
+ CodeGenOptions::FiniteLoopsKind::Never)
+ return false;
+
+ // If the containing function must make progress, loops also must make
+ // progress (as in C++11 and later).
+ if (checkIfFunctionMustProgress())
+ return true;
+
+ // Now apply rules for plain C (see 6.8.5.6 in C11).
+ // Loops with constant conditions do not have to make progress in any C
+ // version.
+ if (HasConstantCond)
+ return false;
+
+ // Loops with non-constant conditions must make progress in C11 and later.
+ return getLangOpts().C11;
}
const CodeGen::CGBlockInfo *BlockInfo = nullptr;
@@ -539,6 +585,8 @@ public:
llvm::Instruction *CurrentFuncletPad = nullptr;
class CallLifetimeEnd final : public EHScopeStack::Cleanup {
+ bool isRedundantBeforeReturn() override { return true; }
+
llvm::Value *Addr;
llvm::Value *Size;
@@ -1419,8 +1467,9 @@ private:
};
OpenMPCancelExitStack OMPCancelStack;
- /// Calculate branch weights for the likelihood attribute
- llvm::MDNode *createBranchWeights(Stmt::Likelihood LH) const;
+ /// Lower the Likelihood knowledge about the \p Cond via llvm.expect intrin.
+ llvm::Value *emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
+ Stmt::Likelihood LH);
CodeGenPGO PGO;
@@ -1431,13 +1480,6 @@ private:
llvm::MDNode *createProfileWeightsForLoop(const Stmt *Cond,
uint64_t LoopCount) const;
- /// Calculate the branch weight for PGO data or the likelihood attribute.
- /// The function tries to get the weight of \ref createProfileWeightsForLoop.
- /// If that fails it gets the weight of \ref createBranchWeights.
- llvm::MDNode *createProfileOrBranchWeightsForLoop(const Stmt *Cond,
- uint64_t LoopCount,
- const Stmt *Body) const;
-
public:
/// Increment the profiler's counter for the given statement by \p StepV.
/// If \p StepV is null, the default increment is 1.
@@ -1866,8 +1908,9 @@ private:
/// function attribute.
unsigned LargestVectorWidth = 0;
- /// True if we need emit the life-time markers.
- const bool ShouldEmitLifetimeMarkers;
+ /// True if we need emit the life-time markers. This is initially set in
+ /// the constructor, but could be overwritten to true if this is a coroutine.
+ bool ShouldEmitLifetimeMarkers;
/// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
@@ -2824,7 +2867,12 @@ public:
void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
Address Ptr);
- llvm::Value *EmitLifetimeStart(uint64_t Size, llvm::Value *Addr);
+ void EmitSehCppScopeBegin();
+ void EmitSehCppScopeEnd();
+ void EmitSehTryScopeBegin();
+ void EmitSehTryScopeEnd();
+
+ llvm::Value *EmitLifetimeStart(llvm::TypeSize Size, llvm::Value *Addr);
void EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
@@ -3174,6 +3222,8 @@ public:
void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
void EnterSEHTryStmt(const SEHTryStmt &S);
void ExitSEHTryStmt(const SEHTryStmt &S);
+ void VolatilizeTryBlocks(llvm::BasicBlock *BB,
+ llvm::SmallPtrSet<llvm::BasicBlock *, 10> &V);
void pushSEHCleanup(CleanupKind kind,
llvm::Function *FinallyFunc);
@@ -3390,12 +3440,15 @@ public:
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
+ void EmitOMPTileDirective(const OMPTileDirective &S);
+ void EmitOMPUnrollDirective(const OMPUnrollDirective &S);
void EmitOMPForDirective(const OMPForDirective &S);
void EmitOMPForSimdDirective(const OMPForSimdDirective &S);
void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
void EmitOMPSectionDirective(const OMPSectionDirective &S);
void EmitOMPSingleDirective(const OMPSingleDirective &S);
void EmitOMPMasterDirective(const OMPMasterDirective &S);
+ void EmitOMPMaskedDirective(const OMPMaskedDirective &S);
void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
void EmitOMPParallelForSimdDirective(const OMPParallelForSimdDirective &S);
@@ -3499,6 +3552,18 @@ public:
static void EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
CodeGenModule &CGM, StringRef ParentName,
const OMPTargetTeamsDistributeParallelForDirective &S);
+
+ /// Emit the Stmt \p S and return its topmost canonical loop, if any.
+ /// TODO: The \p Depth paramter is not yet implemented and must be 1. In the
+ /// future it is meant to be the number of loops expected in the loop nests
+ /// (usually specified by the "collapse" clause) that are collapsed to a
+ /// single loop by this function.
+ llvm::CanonicalLoopInfo *EmitOMPCollapsedCanonicalLoopNest(const Stmt *S,
+ int Depth);
+
+ /// Emit an OMPCanonicalLoop using the OpenMPIRBuilder.
+ void EmitOMPCanonicalLoop(const OMPCanonicalLoop *S);
+
/// Emit inner loop of the worksharing/simd construct.
///
/// \param S Directive, for which the inner loop must be emitted.
@@ -3535,7 +3600,7 @@ public:
const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
/// Helpers for the OpenMP loop directives.
- void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
+ void EmitOMPSimdInit(const OMPLoopDirective &D);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen);
@@ -3875,12 +3940,14 @@ public:
/// LLVM arguments and the types they were derived from.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::CallBase **callOrInvoke, SourceLocation Loc);
+ llvm::CallBase **callOrInvoke, bool IsMustTail,
+ SourceLocation Loc);
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee,
ReturnValueSlot ReturnValue, const CallArgList &Args,
- llvm::CallBase **callOrInvoke = nullptr) {
+ llvm::CallBase **callOrInvoke = nullptr,
+ bool IsMustTail = false) {
return EmitCall(CallInfo, Callee, ReturnValue, Args, callOrInvoke,
- SourceLocation());
+ IsMustTail, SourceLocation());
}
RValue EmitCall(QualType FnType, const CGCallee &Callee, const CallExpr *E,
ReturnValueSlot ReturnValue, llvm::Value *Chain = nullptr);
@@ -4117,6 +4184,8 @@ public:
llvm::Value *EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
const CallExpr *E);
llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue);
bool ProcessOrderScopeAMDGCN(llvm::Value *Order, llvm::Value *Scope,
llvm::AtomicOrdering &AO,
llvm::SyncScope::ID &SSID);
@@ -4202,6 +4271,8 @@ public:
void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
+ void EmitARCNoopIntrinsicUse(ArrayRef<llvm::Value *> values);
+
static Destroyer destroyARCStrongImprecise;
static Destroyer destroyARCStrongPrecise;
static Destroyer destroyARCWeak;
@@ -4293,6 +4364,11 @@ public:
llvm::Function *createAtExitStub(const VarDecl &VD, llvm::FunctionCallee Dtor,
llvm::Constant *Addr);
+ llvm::Function *createTLSAtExitStub(const VarDecl &VD,
+ llvm::FunctionCallee Dtor,
+ llvm::Constant *Addr,
+ llvm::FunctionCallee &AtExit);
+
/// Call atexit() with a function that passes the given argument to
/// the given function.
void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::FunctionCallee fn,
@@ -4331,8 +4407,9 @@ public:
/// variables.
void GenerateCXXGlobalCleanUpFunc(
llvm::Function *Fn,
- const std::vector<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
- llvm::Constant *>> &DtorsOrStermFinalizers);
+ ArrayRef<std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>>
+ DtorsOrStermFinalizers);
void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
const VarDecl *D,
@@ -4523,8 +4600,8 @@ private:
void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
- llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
- DeferredReplacements;
+ llvm::SmallVector<std::pair<llvm::WeakTrackingVH, llvm::Value *>, 4>
+ DeferredReplacements;
/// Set the address of a local variable.
void setAddrOfLocalVar(const VarDecl *VD, Address Addr) {
@@ -4630,7 +4707,6 @@ public:
struct MultiVersionResolverOption {
llvm::Function *Function;
- FunctionDecl *FD;
struct Conds {
StringRef Architecture;
llvm::SmallVector<StringRef, 8> Features;
@@ -4764,7 +4840,8 @@ inline llvm::Value *DominatingLLVMValue::restore(CodeGenFunction &CGF,
// Otherwise, it should be an alloca instruction, as set up in save().
auto alloca = cast<llvm::AllocaInst>(value.getPointer());
- return CGF.Builder.CreateAlignedLoad(alloca, alloca->getAlign());
+ return CGF.Builder.CreateAlignedLoad(alloca->getAllocatedType(), alloca,
+ alloca->getAlign());
}
} // end namespace CodeGen
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 31afbc6b4262..9b40b88ea3c9 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -75,7 +75,7 @@ static llvm::cl::opt<bool> LimitedCoverage(
static const char AnnotationSection[] = "llvm.metadata";
static CGCXXABI *createCXXABI(CodeGenModule &CGM) {
- switch (CGM.getTarget().getCXXABI().getKind()) {
+ switch (CGM.getContext().getCXXABIKind()) {
case TargetCXXABI::AppleARM64:
case TargetCXXABI::Fuchsia:
case TargetCXXABI::GenericAArch64:
@@ -180,6 +180,34 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
// CoverageMappingModuleGen object.
if (CodeGenOpts.CoverageMapping)
CoverageMapping.reset(new CoverageMappingModuleGen(*this, *CoverageInfo));
+
+ // Generate the module name hash here if needed.
+ if (CodeGenOpts.UniqueInternalLinkageNames &&
+ !getModule().getSourceFileName().empty()) {
+ std::string Path = getModule().getSourceFileName();
+ // Check if a path substitution is needed from the MacroPrefixMap.
+ for (const auto &Entry : PPO.MacroPrefixMap)
+ if (Path.rfind(Entry.first, 0) != std::string::npos) {
+ Path = Entry.second + Path.substr(Entry.first.size());
+ break;
+ }
+ llvm::MD5 Md5;
+ Md5.update(Path);
+ llvm::MD5::MD5Result R;
+ Md5.final(R);
+ SmallString<32> Str;
+ llvm::MD5::stringifyResult(R, Str);
+ // Convert MD5hash to Decimal. Demangler suffixes can either contain
+ // numbers or characters but not both.
+ llvm::APInt IntHash(128, Str.str(), 16);
+ // Prepend "__uniq" before the hash for tools like profilers to understand
+ // that this symbol is of internal linkage type. The "__uniq" is the
+ // pre-determined prefix that is used to tell tools that this symbol was
+ // created with -funique-internal-linakge-symbols and the tools can strip or
+ // keep the prefix as needed.
+ ModuleNameHash = (Twine(".__uniq.") +
+ Twine(toString(IntHash, /* Radix = */ 10, /* Signed = */false))).str();
+ }
}
CodeGenModule::~CodeGenModule() {}
@@ -459,10 +487,8 @@ void CodeGenModule::Release() {
if (ObjCRuntime)
if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
AddGlobalCtor(ObjCInitFunction);
- if (Context.getLangOpts().CUDA && !Context.getLangOpts().CUDAIsDevice &&
- CUDARuntime) {
- if (llvm::Function *CudaCtorFunction =
- CUDARuntime->makeModuleCtorFunction())
+ if (Context.getLangOpts().CUDA && CUDARuntime) {
+ if (llvm::Function *CudaCtorFunction = CUDARuntime->finalizeModule())
AddGlobalCtor(CudaCtorFunction);
}
if (OpenMPRuntime) {
@@ -485,6 +511,7 @@ void CodeGenModule::Release() {
EmitGlobalAnnotations();
EmitStaticExternCAliases();
EmitDeferredUnusedCoverageMappings();
+ CodeGenPGO(*this).setValueProfilingFlag(getModule());
if (CoverageMapping)
CoverageMapping->emit();
if (CodeGenOpts.SanitizeCfiCrossDso) {
@@ -496,6 +523,22 @@ void CodeGenModule::Release() {
!Context.getTargetInfo().getTriple().isOSEmscripten()) {
EmitMainVoidAlias();
}
+
+ // Emit reference of __amdgpu_device_library_preserve_asan_functions to
+ // preserve ASAN functions in bitcode libraries.
+ if (LangOpts.Sanitize.has(SanitizerKind::Address) && getTriple().isAMDGPU()) {
+ auto *FT = llvm::FunctionType::get(VoidTy, {});
+ auto *F = llvm::Function::Create(
+ FT, llvm::GlobalValue::ExternalLinkage,
+ "__amdgpu_device_library_preserve_asan_functions", &getModule());
+ auto *Var = new llvm::GlobalVariable(
+ getModule(), FT->getPointerTo(),
+ /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
+ "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
+ addCompilerUsedGlobal(Var);
+ }
+
emitLLVMUsed();
if (SanStats)
SanStats->finish();
@@ -533,6 +576,9 @@ void CodeGenModule::Release() {
CodeGenOpts.DwarfVersion);
}
+ if (CodeGenOpts.Dwarf64)
+ getModule().addModuleFlag(llvm::Module::Max, "DWARF64", 1);
+
if (Context.getLangOpts().SemanticInterposition)
// Require various optimization to respect semantic interposition.
getModule().setSemanticInterposition(1);
@@ -551,6 +597,10 @@ void CodeGenModule::Release() {
// Function ID tables for Control Flow Guard (cfguard=1).
getModule().addModuleFlag(llvm::Module::Warning, "cfguard", 1);
}
+ if (CodeGenOpts.EHContGuard) {
+ // Function ID tables for EH Continuation Guard.
+ getModule().addModuleFlag(llvm::Module::Warning, "ehcontguard", 1);
+ }
if (CodeGenOpts.OptimizationLevel > 0 && CodeGenOpts.StrictVTablePointers) {
// We don't support LTO with 2 with different StrictVTablePointers
// FIXME: we could support it by stripping all the information introduced
@@ -664,6 +714,16 @@ void CodeGenModule::Release() {
llvm::DenormalMode::IEEE);
}
+ if (LangOpts.EHAsynch)
+ getModule().addModuleFlag(llvm::Module::Warning, "eh-asynch", 1);
+
+ // Indicate whether this Module was compiled with -fopenmp
+ if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd)
+ getModule().addModuleFlag(llvm::Module::Max, "openmp", LangOpts.OpenMP);
+ if (getLangOpts().OpenMPIsDevice)
+ getModule().addModuleFlag(llvm::Module::Max, "openmp-device",
+ LangOpts.OpenMP);
+
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
if (LangOpts.OpenCL) {
EmitOpenCLMetadata();
@@ -708,6 +768,20 @@ void CodeGenModule::Release() {
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
+ if (CodeGenOpts.UnwindTables)
+ getModule().setUwtable();
+
+ switch (CodeGenOpts.getFramePointer()) {
+ case CodeGenOptions::FramePointerKind::None:
+ // 0 ("none") is the default.
+ break;
+ case CodeGenOptions::FramePointerKind::NonLeaf:
+ getModule().setFramePointer(llvm::FramePointerKind::NonLeaf);
+ break;
+ case CodeGenOptions::FramePointerKind::All:
+ getModule().setFramePointer(llvm::FramePointerKind::All);
+ break;
+ }
SimplifyPersonality();
@@ -726,6 +800,17 @@ void CodeGenModule::Release() {
if (!getCodeGenOpts().RecordCommandLine.empty())
EmitCommandLineMetadata();
+ if (!getCodeGenOpts().StackProtectorGuard.empty())
+ getModule().setStackProtectorGuard(getCodeGenOpts().StackProtectorGuard);
+ if (!getCodeGenOpts().StackProtectorGuardReg.empty())
+ getModule().setStackProtectorGuardReg(
+ getCodeGenOpts().StackProtectorGuardReg);
+ if (getCodeGenOpts().StackProtectorGuardOffset != INT_MAX)
+ getModule().setStackProtectorGuardOffset(
+ getCodeGenOpts().StackProtectorGuardOffset);
+ if (getCodeGenOpts().StackAlignment)
+ getModule().setOverrideStackAlignment(getCodeGenOpts().StackAlignment);
+
getTargetCodeGenInfo().emitTargetMetadata(*this, MangledDeclNames);
EmitBackendOptionsMetadata(getCodeGenOpts());
@@ -926,8 +1011,13 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
// In MinGW, variables without DLLImport can still be automatically
// imported from a DLL by the linker; don't mark variables that
// potentially could come from another DLL as DSO local.
+
+ // With EmulatedTLS, TLS variables can be autoimported from other DLLs
+ // (and this actually happens in the public interface of libstdc++), so
+ // such variables can't be marked as DSO local. (Native TLS variables
+ // can't be dllimported at all, though.)
if (GV->isDeclarationForLinker() && isa<llvm::GlobalVariable>(GV) &&
- !GV->isThreadLocal())
+ (!GV->isThreadLocal() || CGM.getCodeGenOpts().EmulatedTLS))
return false;
}
@@ -945,27 +1035,21 @@ static bool shouldAssumeDSOLocal(const CodeGenModule &CGM,
if (TT.isOSBinFormatCOFF() || (TT.isOSWindows() && TT.isOSBinFormatMachO()))
return true;
- const auto &CGOpts = CGM.getCodeGenOpts();
- llvm::Reloc::Model RM = CGOpts.RelocationModel;
- const auto &LOpts = CGM.getLangOpts();
-
- if (TT.isOSBinFormatMachO()) {
- if (RM == llvm::Reloc::Static)
- return true;
- return GV->isStrongDefinitionForLinker();
- }
-
// Only handle COFF and ELF for now.
if (!TT.isOSBinFormatELF())
return false;
+ // If this is not an executable, don't assume anything is local.
+ const auto &CGOpts = CGM.getCodeGenOpts();
+ llvm::Reloc::Model RM = CGOpts.RelocationModel;
+ const auto &LOpts = CGM.getLangOpts();
if (RM != llvm::Reloc::Static && !LOpts.PIE) {
// On ELF, if -fno-semantic-interposition is specified and the target
// supports local aliases, there will be neither CC1
// -fsemantic-interposition nor -fhalf-no-semantic-interposition. Set
- // dso_local if using a local alias is preferable (can avoid GOT
- // indirection).
- if (!GV->canBenefitFromLocalAlias())
+ // dso_local on the function if using a local alias is preferable (can avoid
+ // PLT indirection).
+ if (!(isa<llvm::Function>(GV) && GV->canBenefitFromLocalAlias()))
return false;
return !(CGM.getLangOpts().SemanticInterposition ||
CGM.getLangOpts().HalfNoSemanticInterposition);
@@ -1142,13 +1226,25 @@ static void AppendTargetMangling(const CodeGenModule &CGM,
}
}
-static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
+// Returns true if GD is a function decl with internal linkage and
+// needs a unique suffix after the mangled name.
+static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
+ CodeGenModule &CGM) {
+ const Decl *D = GD.getDecl();
+ return !CGM.getModuleNameHash().empty() && isa<FunctionDecl>(D) &&
+ (CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
+}
+
+static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
MangleContext &MC = CGM.getCXXABI().getMangleContext();
- if (MC.shouldMangleDeclName(ND))
+ if (!CGM.getModuleNameHash().empty())
+ MC.needsUniqueInternalLinkageNames();
+ bool ShouldMangle = MC.shouldMangleDeclName(ND);
+ if (ShouldMangle)
MC.mangleName(GD.getWithDecl(ND), Out);
else {
IdentifierInfo *II = ND->getIdentifier();
@@ -1166,6 +1262,20 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
}
}
+ // Check if the module name hash should be appended for internal linkage
+ // symbols. This should come before multi-version target suffixes are
+ // appended. This is to keep the name and module hash suffix of the
+ // internal linkage function together. The unique suffix should only be
+ // added when name mangling is done to make sure that the final name can
+ // be properly demangled. For example, for C functions without prototypes,
+ // name mangling is not done and the unique suffix should not be appeneded
+ // then.
+ if (ShouldMangle && isUniqueInternalLinkageDecl(GD, CGM)) {
+ assert(CGM.getCodeGenOpts().UniqueInternalLinkageNames &&
+ "Hash computed when not explicitly requested");
+ Out << CGM.getModuleNameHash();
+ }
+
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
if (FD->isMultiVersion() && !OmitMultiVersionMangling) {
switch (FD->getMultiVersionKind()) {
@@ -1183,6 +1293,11 @@ static std::string getMangledNameImpl(const CodeGenModule &CGM, GlobalDecl GD,
}
}
+ // Make unique name for device side static file-scope variable for HIP.
+ if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
+ CGM.getLangOpts().GPURelocatableDeviceCode &&
+ CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
+ CGM.printPostfixForExternalizedStaticVar(Out);
return std::string(Out.str());
}
@@ -1240,9 +1355,16 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
}
}
- auto FoundName = MangledDeclNames.find(CanonicalGD);
- if (FoundName != MangledDeclNames.end())
- return FoundName->second;
+ // In CUDA/HIP device compilation with -fgpu-rdc, the mangled name of a
+ // static device variable depends on whether the variable is referenced by
+ // a host or device host function. Therefore the mangled name cannot be
+ // cached.
+ if (!LangOpts.CUDAIsDevice ||
+ !getContext().mayExternalizeStaticVar(GD.getDecl())) {
+ auto FoundName = MangledDeclNames.find(CanonicalGD);
+ if (FoundName != MangledDeclNames.end())
+ return FoundName->second;
+ }
// Keep the first result in the case of a mangling collision.
const auto *ND = cast<NamedDecl>(GD.getDecl());
@@ -1387,10 +1509,11 @@ llvm::ConstantInt *CodeGenModule::CreateCrossDsoCfiTypeId(llvm::Metadata *MD) {
void CodeGenModule::SetLLVMFunctionAttributes(GlobalDecl GD,
const CGFunctionInfo &Info,
- llvm::Function *F) {
+ llvm::Function *F, bool IsThunk) {
unsigned CallingConv;
llvm::AttributeList PAL;
- ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv, false);
+ ConstructAttributeList(F->getName(), Info, GD, PAL, CallingConv,
+ /*AttrOnCallSite=*/false, IsThunk);
F->setAttributes(PAL);
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
}
@@ -1475,6 +1598,39 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
QualType ty = parm->getType();
std::string typeQuals;
+ // Get image and pipe access qualifier:
+ if (ty->isImageType() || ty->isPipeType()) {
+ const Decl *PDecl = parm;
+ if (auto *TD = dyn_cast<TypedefType>(ty))
+ PDecl = TD->getDecl();
+ const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
+ if (A && A->isWriteOnly())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
+ else if (A && A->isReadWrite())
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
+ else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
+ } else
+ accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
+
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
+
+ auto getTypeSpelling = [&](QualType Ty) {
+ auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
+
+ if (Ty.isCanonical()) {
+ StringRef typeNameRef = typeName;
+ // Turn "unsigned type" to "utype"
+ if (typeNameRef.consume_front("unsigned "))
+ return std::string("u") + typeNameRef.str();
+ if (typeNameRef.consume_front("signed "))
+ return typeNameRef.str();
+ }
+
+ return typeName;
+ };
+
if (ty->isPointerType()) {
QualType pointeeTy = ty->getPointeeType();
@@ -1484,26 +1640,10 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
// Get argument type name.
- std::string typeName =
- pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
-
- // Turn "unsigned type" to "utype"
- std::string::size_type pos = typeName.find("unsigned");
- if (pointeeTy.isCanonical() && pos != std::string::npos)
- typeName.erase(pos + 1, 8);
-
- argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
-
+ std::string typeName = getTypeSpelling(pointeeTy) + "*";
std::string baseTypeName =
- pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
- Policy) +
- "*";
-
- // Turn "unsigned type" to "utype"
- pos = baseTypeName.find("unsigned");
- if (pos != std::string::npos)
- baseTypeName.erase(pos + 1, 8);
-
+ getTypeSpelling(pointeeTy.getCanonicalType()) + "*";
+ argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
argBaseTypeNames.push_back(
llvm::MDString::get(VMContext, baseTypeName));
@@ -1525,30 +1665,9 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
llvm::ConstantAsMetadata::get(CGF->Builder.getInt32(AddrSpc)));
// Get argument type name.
- std::string typeName;
- if (isPipe)
- typeName = ty.getCanonicalType()
- ->castAs<PipeType>()
- ->getElementType()
- .getAsString(Policy);
- else
- typeName = ty.getUnqualifiedType().getAsString(Policy);
-
- // Turn "unsigned type" to "utype"
- std::string::size_type pos = typeName.find("unsigned");
- if (ty.isCanonical() && pos != std::string::npos)
- typeName.erase(pos + 1, 8);
-
- std::string baseTypeName;
- if (isPipe)
- baseTypeName = ty.getCanonicalType()
- ->castAs<PipeType>()
- ->getElementType()
- .getCanonicalType()
- .getAsString(Policy);
- else
- baseTypeName =
- ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
+ ty = isPipe ? ty->castAs<PipeType>()->getElementType() : ty;
+ std::string typeName = getTypeSpelling(ty);
+ std::string baseTypeName = getTypeSpelling(ty.getCanonicalType());
// Remove access qualifiers on images
// (as they are inseparable from type in clang implementation,
@@ -1560,38 +1679,13 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
}
argTypeNames.push_back(llvm::MDString::get(VMContext, typeName));
-
- // Turn "unsigned type" to "utype"
- pos = baseTypeName.find("unsigned");
- if (pos != std::string::npos)
- baseTypeName.erase(pos + 1, 8);
-
argBaseTypeNames.push_back(
llvm::MDString::get(VMContext, baseTypeName));
if (isPipe)
typeQuals = "pipe";
}
-
argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
-
- // Get image and pipe access qualifier:
- if (ty->isImageType() || ty->isPipeType()) {
- const Decl *PDecl = parm;
- if (auto *TD = dyn_cast<TypedefType>(ty))
- PDecl = TD->getDecl();
- const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
- if (A && A->isWriteOnly())
- accessQuals.push_back(llvm::MDString::get(VMContext, "write_only"));
- else if (A && A->isReadWrite())
- accessQuals.push_back(llvm::MDString::get(VMContext, "read_write"));
- else
- accessQuals.push_back(llvm::MDString::get(VMContext, "read_only"));
- } else
- accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
-
- // Get argument name.
- argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
}
Fn->setMetadata("kernel_arg_addr_space",
@@ -1836,13 +1930,13 @@ void CodeGenModule::SetCommonAttributes(GlobalDecl GD, llvm::GlobalValue *GV) {
GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
if (D && D->hasAttr<UsedAttr>())
- addUsedGlobal(GV);
+ addUsedOrCompilerUsedGlobal(GV);
if (CodeGenOpts.KeepStaticConsts && D && isa<VarDecl>(D)) {
const auto *VD = cast<VarDecl>(D);
if (VD->getType().isConstQualified() &&
VD->getStorageDuration() == SD_Static)
- addUsedGlobal(GV);
+ addUsedOrCompilerUsedGlobal(GV);
}
}
@@ -1912,6 +2006,8 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
if (D) {
if (auto *GV = dyn_cast<llvm::GlobalVariable>(GO)) {
+ if (D->hasAttr<RetainAttr>())
+ addUsedGlobal(GV);
if (auto *SA = D->getAttr<PragmaClangBSSSectionAttr>())
GV->addAttribute("bss-section", SA->getName());
if (auto *SA = D->getAttr<PragmaClangDataSectionAttr>())
@@ -1923,6 +2019,8 @@ void CodeGenModule::setNonAliasAttributes(GlobalDecl GD,
}
if (auto *F = dyn_cast<llvm::Function>(GO)) {
+ if (D->hasAttr<RetainAttr>())
+ addUsedGlobal(F);
if (auto *SA = D->getAttr<PragmaClangTextSectionAttr>())
if (!D->getAttr<SectionAttr>())
F->addFnAttr("implicit-section-name", SA->getName());
@@ -1954,7 +2052,7 @@ void CodeGenModule::SetInternalFunctionAttributes(GlobalDecl GD,
llvm::Function *F,
const CGFunctionInfo &FI) {
const Decl *D = GD.getDecl();
- SetLLVMFunctionAttributes(GD, FI, F);
+ SetLLVMFunctionAttributes(GD, FI, F, /*IsThunk=*/false);
SetLLVMFunctionAttributesForDefinition(D, F);
F->setLinkage(llvm::Function::InternalLinkage);
@@ -2008,7 +2106,8 @@ void CodeGenModule::SetFunctionAttributes(GlobalDecl GD, llvm::Function *F,
const auto *FD = cast<FunctionDecl>(GD.getDecl());
if (!IsIncompleteFunction)
- SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F);
+ SetLLVMFunctionAttributes(GD, getTypes().arrangeGlobalDeclaration(GD), F,
+ IsThunk);
// Add the Returned attribute for "this", except for iOS 5 and earlier
// where substantial code, including the libstdc++ dylib, was compiled with
@@ -2103,6 +2202,15 @@ void CodeGenModule::addCompilerUsedGlobal(llvm::GlobalValue *GV) {
LLVMCompilerUsed.emplace_back(GV);
}
+void CodeGenModule::addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV) {
+ assert((isa<llvm::Function>(GV) || !GV->isDeclaration()) &&
+ "Only globals with definition can force usage.");
+ if (getTriple().isOSBinFormatELF())
+ LLVMCompilerUsed.emplace_back(GV);
+ else
+ LLVMUsed.emplace_back(GV);
+}
+
static void emitUsed(CodeGenModule &CGM, StringRef Name,
std::vector<llvm::WeakTrackingVH> &List) {
// Don't create llvm.used if there is no need.
@@ -2299,8 +2407,10 @@ void CodeGenModule::EmitDeferred() {
}
// Emit CUDA/HIP static device variables referenced by host code only.
- if (getLangOpts().CUDA)
- for (auto V : getContext().CUDAStaticDeviceVarReferencedByHost)
+ // Note we should not clear CUDADeviceVarODRUsedByHost since it is still
+ // needed for further handling.
+ if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
+ for (const auto *V : getContext().CUDADeviceVarODRUsedByHost)
DeferredDeclsToEmit.push_back(V);
// Stop if we're out of both deferred vtables and deferred declarations.
@@ -2485,29 +2595,28 @@ void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
Annotations.push_back(EmitAnnotateAttr(GV, I, D->getLocation()));
}
-bool CodeGenModule::isInSanitizerBlacklist(SanitizerMask Kind,
- llvm::Function *Fn,
- SourceLocation Loc) const {
- const auto &SanitizerBL = getContext().getSanitizerBlacklist();
- // Blacklist by function name.
- if (SanitizerBL.isBlacklistedFunction(Kind, Fn->getName()))
+bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
+ SourceLocation Loc) const {
+ const auto &NoSanitizeL = getContext().getNoSanitizeList();
+ // NoSanitize by function name.
+ if (NoSanitizeL.containsFunction(Kind, Fn->getName()))
return true;
- // Blacklist by location.
+ // NoSanitize by location.
if (Loc.isValid())
- return SanitizerBL.isBlacklistedLocation(Kind, Loc);
+ return NoSanitizeL.containsLocation(Kind, Loc);
// If location is unknown, this may be a compiler-generated function. Assume
// it's located in the main file.
auto &SM = Context.getSourceManager();
if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- return SanitizerBL.isBlacklistedFile(Kind, MainFile->getName());
+ return NoSanitizeL.containsFile(Kind, MainFile->getName());
}
return false;
}
-bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
- SourceLocation Loc, QualType Ty,
- StringRef Category) const {
- // For now globals can be blacklisted only in ASan and KASan.
+bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
+ SourceLocation Loc, QualType Ty,
+ StringRef Category) const {
+ // For now globals can be ignored only in ASan and KASan.
const SanitizerMask EnabledAsanMask =
LangOpts.Sanitize.Mask &
(SanitizerKind::Address | SanitizerKind::KernelAddress |
@@ -2515,22 +2624,22 @@ bool CodeGenModule::isInSanitizerBlacklist(llvm::GlobalVariable *GV,
SanitizerKind::MemTag);
if (!EnabledAsanMask)
return false;
- const auto &SanitizerBL = getContext().getSanitizerBlacklist();
- if (SanitizerBL.isBlacklistedGlobal(EnabledAsanMask, GV->getName(), Category))
+ const auto &NoSanitizeL = getContext().getNoSanitizeList();
+ if (NoSanitizeL.containsGlobal(EnabledAsanMask, GV->getName(), Category))
return true;
- if (SanitizerBL.isBlacklistedLocation(EnabledAsanMask, Loc, Category))
+ if (NoSanitizeL.containsLocation(EnabledAsanMask, Loc, Category))
return true;
// Check global type.
if (!Ty.isNull()) {
// Drill down the array types: if global variable of a fixed type is
- // blacklisted, we also don't instrument arrays of them.
+ // not sanitized, we also don't instrument arrays of them.
while (auto AT = dyn_cast<ArrayType>(Ty.getTypePtr()))
Ty = AT->getElementType();
Ty = Ty.getCanonicalType().getUnqualifiedType();
- // We allow to blacklist only record types (classes, structs etc.)
+ // Only record types (classes, structs etc.) are ignored.
if (Ty->isRecordType()) {
std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
- if (SanitizerBL.isBlacklistedType(EnabledAsanMask, TypeStr, Category))
+ if (NoSanitizeL.containsType(EnabledAsanMask, TypeStr, Category))
return true;
}
}
@@ -2607,19 +2716,24 @@ bool CodeGenModule::MustBeEmitted(const ValueDecl *Global) {
}
bool CodeGenModule::MayBeEmittedEagerly(const ValueDecl *Global) {
+ // In OpenMP 5.0 variables and function may be marked as
+ // device_type(host/nohost) and we should not emit them eagerly unless we sure
+ // that they must be emitted on the host/device. To be sure we need to have
+ // seen a declare target with an explicit mentioning of the function, we know
+ // we have if the level of the declare target attribute is -1. Note that we
+ // check somewhere else if we should emit this at all.
+ if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd) {
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ OMPDeclareTargetDeclAttr::getActiveAttr(Global);
+ if (!ActiveAttr || (*ActiveAttr)->getLevel() != (unsigned)-1)
+ return false;
+ }
+
if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
// Implicit template instantiations may change linkage if they are later
// explicitly instantiated, so they should not be emitted eagerly.
return false;
- // In OpenMP 5.0 function may be marked as device_type(nohost) and we should
- // not emit them eagerly unless we sure that the function must be emitted on
- // the host.
- if (LangOpts.OpenMP >= 50 && !LangOpts.OpenMPSimd &&
- !LangOpts.OpenMPIsDevice &&
- !OMPDeclareTargetDeclAttr::getDeviceType(FD) &&
- !FD->isUsed(/*CheckUsedAttr=*/false) && !FD->isReferenced())
- return false;
}
if (const auto *VD = dyn_cast<VarDecl>(Global))
if (Context.getInlineVariableDefinitionKind(VD) ==
@@ -2739,9 +2853,7 @@ ConstantAddress CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
GlobalDecl(cast<FunctionDecl>(VD)),
/*ForVTable=*/false);
else
- Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
- llvm::PointerType::getUnqual(DeclTy),
- nullptr);
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, 0, nullptr);
auto *F = cast<llvm::GlobalValue>(Aliasee);
F->setLinkage(llvm::Function::ExternalWeakLinkage);
@@ -3031,7 +3143,7 @@ bool CodeGenModule::shouldEmitFunction(GlobalDecl GD) {
if (CodeGenOpts.OptimizationLevel == 0 && !F->hasAttr<AlwaysInlineAttr>())
return false;
- if (F->hasAttr<DLLImportAttr>()) {
+ if (F->hasAttr<DLLImportAttr>() && !F->hasAttr<AlwaysInlineAttr>()) {
// Check whether it would be safe to inline this dllimport function.
DLLImportFunctionVisitor Visitor;
Visitor.TraverseFunctionDecl(const_cast<FunctionDecl*>(F));
@@ -3141,7 +3253,9 @@ TargetMVPriority(const TargetInfo &TI,
}
void CodeGenModule::emitMultiVersionFunctions() {
- for (GlobalDecl GD : MultiVersionFuncs) {
+ std::vector<GlobalDecl> MVFuncsToEmit;
+ MultiVersionFuncs.swap(MVFuncsToEmit);
+ for (GlobalDecl GD : MVFuncsToEmit) {
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
getContext().forEachMultiversionedFunctionVersion(
@@ -3195,6 +3309,17 @@ void CodeGenModule::emitMultiVersionFunctions() {
CodeGenFunction CGF(*this);
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
+
+ // Ensure that any additions to the deferred decls list caused by emitting a
+ // variant are emitted. This can happen when the variant itself is inline and
+ // calls a function without linkage.
+ if (!MVFuncsToEmit.empty())
+ EmitDeferred();
+
+ // Ensure that any additions to the multiversion funcs list from either the
+ // deferred decls or the multiversion functions themselves are emitted.
+ if (!MultiVersionFuncs.empty())
+ emitMultiVersionFunctions();
}
void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
@@ -3269,7 +3394,7 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
++Index;
}
- llvm::sort(
+ llvm::stable_sort(
Options, [](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
return CodeGenFunction::GetX86CpuSupportsMask(LHS.Conditions.Features) >
@@ -3575,9 +3700,19 @@ llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
}
StringRef MangledName = getMangledName(GD);
- return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
- /*IsThunk=*/false, llvm::AttributeList(),
- IsForDefinition);
+ auto *F = GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable, DontDefer,
+ /*IsThunk=*/false, llvm::AttributeList(),
+ IsForDefinition);
+ // Returns kernel handle for HIP kernel stub function.
+ if (LangOpts.CUDA && !LangOpts.CUDAIsDevice &&
+ cast<FunctionDecl>(GD.getDecl())->hasAttr<CUDAGlobalAttr>()) {
+ auto *Handle = getCUDARuntime().getKernelHandle(
+ cast<llvm::Function>(F->stripPointerCasts()), GD);
+ if (IsForDefinition)
+ return F;
+ return llvm::ConstantExpr::getBitCast(Handle, Ty->getPointerTo());
+ }
+ return F;
}
static const FunctionDecl *
@@ -3586,8 +3721,8 @@ GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
IdentifierInfo &CII = C.Idents.get(Name);
- for (const auto &Result : DC->lookup(&CII))
- if (const auto FD = dyn_cast<FunctionDecl>(Result))
+ for (const auto *Result : DC->lookup(&CII))
+ if (const auto *FD = dyn_cast<FunctionDecl>(Result))
return FD;
if (!C.getLangOpts().CPlusPlus)
@@ -3601,15 +3736,15 @@ GetRuntimeFunctionDecl(ASTContext &C, StringRef Name) {
for (const auto &N : {"__cxxabiv1", "std"}) {
IdentifierInfo &NS = C.Idents.get(N);
- for (const auto &Result : DC->lookup(&NS)) {
- NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
- if (auto LSD = dyn_cast<LinkageSpecDecl>(Result))
- for (const auto &Result : LSD->lookup(&NS))
+ for (const auto *Result : DC->lookup(&NS)) {
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(Result);
+ if (auto *LSD = dyn_cast<LinkageSpecDecl>(Result))
+ for (const auto *Result : LSD->lookup(&NS))
if ((ND = dyn_cast<NamespaceDecl>(Result)))
break;
if (ND)
- for (const auto &Result : ND->lookup(&CXXII))
+ for (const auto *Result : ND->lookup(&CXXII))
if (const auto *FD = dyn_cast<FunctionDecl>(Result))
return FD;
}
@@ -3680,9 +3815,9 @@ bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
}
/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
-/// create and return an llvm GlobalVariable with the specified type. If there
-/// is something in the module with the specified name, return it potentially
-/// bitcasted to the right type.
+/// create and return an llvm GlobalVariable with the specified type and address
+/// space. If there is something in the module with the specified name, return
+/// it potentially bitcasted to the right type.
///
/// If D is non-null, it specifies a decl that correspond to this. This is used
/// to set the attributes on the global when it is first created.
@@ -3691,9 +3826,8 @@ bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
/// type Ty will be returned, not conversion of a variable with the same
/// mangled name but some other type.
llvm::Constant *
-CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
- llvm::PointerType *Ty,
- const VarDecl *D,
+CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
+ unsigned AddrSpace, const VarDecl *D,
ForDefinition_t IsForDefinition) {
// Lookup the entry, lazily creating it if necessary.
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
@@ -3710,7 +3844,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
getOpenMPRuntime().registerTargetGlobalVariable(D, Entry);
- if (Entry->getType() == Ty)
+ if (Entry->getValueType() == Ty && Entry->getAddressSpace() == AddrSpace)
return Entry;
// If there are two attempts to define the same mangled name, issue an
@@ -3734,22 +3868,24 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
// Make sure the result is of the correct type.
- if (Entry->getType()->getAddressSpace() != Ty->getAddressSpace())
- return llvm::ConstantExpr::getAddrSpaceCast(Entry, Ty);
+ if (Entry->getType()->getAddressSpace() != AddrSpace) {
+ return llvm::ConstantExpr::getAddrSpaceCast(Entry,
+ Ty->getPointerTo(AddrSpace));
+ }
// (If global is requested for a definition, we always need to create a new
// global, not just return a bitcast.)
if (!IsForDefinition)
- return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo(AddrSpace));
}
- auto AddrSpace = GetGlobalVarAddressSpace(D);
- auto TargetAddrSpace = getContext().getTargetAddressSpace(AddrSpace);
+ auto DAddrSpace = GetGlobalVarAddressSpace(D);
+ auto TargetAddrSpace = getContext().getTargetAddressSpace(DAddrSpace);
auto *GV = new llvm::GlobalVariable(
- getModule(), Ty->getElementType(), false,
- llvm::GlobalValue::ExternalLinkage, nullptr, MangledName, nullptr,
- llvm::GlobalVariable::NotThreadLocal, TargetAddrSpace);
+ getModule(), Ty, false, llvm::GlobalValue::ExternalLinkage, nullptr,
+ MangledName, nullptr, llvm::GlobalVariable::NotThreadLocal,
+ TargetAddrSpace);
// If we already created a global with the same mangled name (but different
// type) before, take its name and remove it from its parent.
@@ -3860,17 +3996,23 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
}
}
- if (GV->isDeclaration())
+ if (GV->isDeclaration()) {
getTargetCodeGenInfo().setTargetAttributes(D, GV, *this);
+ // External HIP managed variables needed to be recorded for transformation
+ // in both device and host compilations.
+ if (getLangOpts().CUDA && D && D->hasAttr<HIPManagedAttr>() &&
+ D->hasExternalStorage())
+ getCUDARuntime().handleVarRegistration(D, *GV);
+ }
LangAS ExpectedAS =
D ? D->getType().getAddressSpace()
: (LangOpts.OpenCL ? LangAS::opencl_global : LangAS::Default);
- assert(getContext().getTargetAddressSpace(ExpectedAS) ==
- Ty->getPointerAddressSpace());
- if (AddrSpace != ExpectedAS)
- return getTargetCodeGenInfo().performAddrSpaceCast(*this, GV, AddrSpace,
- ExpectedAS, Ty);
+ assert(getContext().getTargetAddressSpace(ExpectedAS) == AddrSpace);
+ if (DAddrSpace != ExpectedAS) {
+ return getTargetCodeGenInfo().performAddrSpaceCast(
+ *this, GV, DAddrSpace, ExpectedAS, Ty->getPointerTo(AddrSpace));
+ }
return GV;
}
@@ -3958,11 +4100,10 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
if (!Ty)
Ty = getTypes().ConvertTypeForMem(ASTTy);
- llvm::PointerType *PTy =
- llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
-
StringRef MangledName = getMangledName(D);
- return GetOrCreateLLVMGlobal(MangledName, PTy, D, IsForDefinition);
+ return GetOrCreateLLVMGlobal(MangledName, Ty,
+ getContext().getTargetAddressSpace(ASTTy), D,
+ IsForDefinition);
}
/// CreateRuntimeVariable - Create a new runtime global variable with the
@@ -3970,12 +4111,11 @@ llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
llvm::Constant *
CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
StringRef Name) {
- auto PtrTy =
+ auto AddrSpace =
getContext().getLangOpts().OpenCL
- ? llvm::PointerType::get(
- Ty, getContext().getTargetAddressSpace(LangAS::opencl_global))
- : llvm::PointerType::getUnqual(Ty);
- auto *Ret = GetOrCreateLLVMGlobal(Name, PtrTy, nullptr);
+ ? getContext().getTargetAddressSpace(LangAS::opencl_global)
+ : 0;
+ auto *Ret = GetOrCreateLLVMGlobal(Name, Ty, AddrSpace, nullptr);
setDSOLocal(cast<llvm::GlobalValue>(Ret->stripPointerCasts()));
return Ret;
}
@@ -4025,6 +4165,10 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return AddrSpace;
}
+ if (LangOpts.SYCLIsDevice &&
+ (!D || D->getType().getAddressSpace() == LangAS::Default))
+ return LangAS::sycl_global;
+
if (LangOpts.CUDA && LangOpts.CUDAIsDevice) {
if (D && D->hasAttr<CUDAConstantAttr>())
return LangAS::cuda_constant;
@@ -4046,10 +4190,12 @@ LangAS CodeGenModule::GetGlobalVarAddressSpace(const VarDecl *D) {
return getTargetCodeGenInfo().getGlobalVarAddressSpace(*this, D);
}
-LangAS CodeGenModule::getStringLiteralAddressSpace() const {
+LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
// OpenCL v1.2 s6.5.3: a string literal is in the constant address space.
if (LangOpts.OpenCL)
return LangAS::opencl_constant;
+ if (LangOpts.SYCLIsDevice)
+ return LangAS::sycl_global;
if (auto AS = getTarget().getConstantAddressSpace())
return AS.getValue();
return LangAS::Default;
@@ -4068,13 +4214,12 @@ castStringLiteralToDefaultAddressSpace(CodeGenModule &CGM,
llvm::GlobalVariable *GV) {
llvm::Constant *Cast = GV;
if (!CGM.getLangOpts().OpenCL) {
- if (auto AS = CGM.getTarget().getConstantAddressSpace()) {
- if (AS != LangAS::Default)
- Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
- CGM, GV, AS.getValue(), LangAS::Default,
- GV->getValueType()->getPointerTo(
- CGM.getContext().getTargetAddressSpace(LangAS::Default)));
- }
+ auto AS = CGM.GetGlobalConstantAddressSpace();
+ if (AS != LangAS::Default)
+ Cast = CGM.getTargetCodeGenInfo().performAddrSpaceCast(
+ CGM, GV, AS, LangAS::Default,
+ GV->getValueType()->getPointerTo(
+ CGM.getContext().getTargetAddressSpace(LangAS::Default)));
}
return Cast;
}
@@ -4164,7 +4309,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
OpenMPRuntime->emitTargetGlobalVariable(D))
return;
- llvm::Constant *Init = nullptr;
+ llvm::TrackingVH<llvm::Constant> Init;
bool NeedsGlobalCtor = false;
bool NeedsGlobalDtor =
D->needsDestruction(getContext()) == QualType::DK_cxx_destructor;
@@ -4181,22 +4326,20 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
getLangOpts().CUDAIsDevice && D->hasAttr<CUDASharedAttr>();
// Shadows of initialized device-side global variables are also left
// undefined.
+ // Managed Variables should be initialized on both host side and device side.
bool IsCUDAShadowVar =
!getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
(D->hasAttr<CUDAConstantAttr>() || D->hasAttr<CUDADeviceAttr>() ||
D->hasAttr<CUDASharedAttr>());
bool IsCUDADeviceShadowVar =
- getLangOpts().CUDAIsDevice &&
+ getLangOpts().CUDAIsDevice && !D->hasAttr<HIPManagedAttr>() &&
(D->getType()->isCUDADeviceBuiltinSurfaceType() ||
- D->getType()->isCUDADeviceBuiltinTextureType() ||
- D->hasAttr<HIPManagedAttr>());
- // HIP pinned shadow of initialized host-side global variables are also
- // left undefined.
+ D->getType()->isCUDADeviceBuiltinTextureType());
if (getLangOpts().CUDA &&
(IsCUDASharedVar || IsCUDAShadowVar || IsCUDADeviceShadowVar))
- Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
else if (D->hasAttr<LoaderUninitializedAttr>())
- Init = llvm::UndefValue::get(getTypes().ConvertType(ASTTy));
+ Init = llvm::UndefValue::get(getTypes().ConvertTypeForMem(ASTTy));
else if (!InitExpr) {
// This is a tentative definition; tentative definitions are
// implicitly initialized with { 0 }.
@@ -4212,9 +4355,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
} else {
initializedGlobalDecl = GlobalDecl(D);
emitter.emplace(*this);
- Init = emitter->tryEmitForInitializer(*InitDecl);
-
- if (!Init) {
+ llvm::Constant *Initializer = emitter->tryEmitForInitializer(*InitDecl);
+ if (!Initializer) {
QualType T = InitExpr->getType();
if (D->getType()->isReferenceType())
T = D->getType();
@@ -4227,6 +4369,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
Init = llvm::UndefValue::get(getTypes().ConvertType(T));
}
} else {
+ Init = Initializer;
// We don't need an initializer, so remove the entry for the delayed
// initializer position (just in case this entry was delayed) if we
// also don't need to register a destructor.
@@ -4268,7 +4411,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// Replace all uses of the old global with the new global
llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV,
+ Entry->getType());
Entry->replaceAllUsesWith(NewPtrForOldDecl);
// Erase the old global, since it is no longer used.
@@ -4297,60 +4441,9 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
(D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()))
GV->setExternallyInitialized(true);
} else {
- // Host-side shadows of external declarations of device-side
- // global variables become internal definitions. These have to
- // be internal in order to prevent name conflicts with global
- // host variables with the same name in a different TUs.
- if (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>()) {
- Linkage = llvm::GlobalValue::InternalLinkage;
- // Shadow variables and their properties must be registered with CUDA
- // runtime. Skip Extern global variables, which will be registered in
- // the TU where they are defined.
- //
- // Don't register a C++17 inline variable. The local symbol can be
- // discarded and referencing a discarded local symbol from outside the
- // comdat (__cuda_register_globals) is disallowed by the ELF spec.
- // TODO: Reject __device__ constexpr and __device__ inline in Sema.
- if (!D->hasExternalStorage() && !D->isInline())
- getCUDARuntime().registerDeviceVar(D, *GV, !D->hasDefinition(),
- D->hasAttr<CUDAConstantAttr>());
- } else if (D->hasAttr<CUDASharedAttr>()) {
- // __shared__ variables are odd. Shadows do get created, but
- // they are not registered with the CUDA runtime, so they
- // can't really be used to access their device-side
- // counterparts. It's not clear yet whether it's nvcc's bug or
- // a feature, but we've got to do the same for compatibility.
- Linkage = llvm::GlobalValue::InternalLinkage;
- } else if (D->getType()->isCUDADeviceBuiltinSurfaceType() ||
- D->getType()->isCUDADeviceBuiltinTextureType()) {
- // Builtin surfaces and textures and their template arguments are
- // also registered with CUDA runtime.
- Linkage = llvm::GlobalValue::InternalLinkage;
- const ClassTemplateSpecializationDecl *TD =
- cast<ClassTemplateSpecializationDecl>(
- D->getType()->getAs<RecordType>()->getDecl());
- const TemplateArgumentList &Args = TD->getTemplateArgs();
- if (TD->hasAttr<CUDADeviceBuiltinSurfaceTypeAttr>()) {
- assert(Args.size() == 2 &&
- "Unexpected number of template arguments of CUDA device "
- "builtin surface type.");
- auto SurfType = Args[1].getAsIntegral();
- if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceSurf(D, *GV, !D->hasDefinition(),
- SurfType.getSExtValue());
- } else {
- assert(Args.size() == 3 &&
- "Unexpected number of template arguments of CUDA device "
- "builtin texture type.");
- auto TexType = Args[1].getAsIntegral();
- auto Normalized = Args[2].getAsIntegral();
- if (!D->hasExternalStorage())
- getCUDARuntime().registerDeviceTex(D, *GV, !D->hasDefinition(),
- TexType.getSExtValue(),
- Normalized.getZExtValue());
- }
- }
+ getCUDARuntime().internalizeDeviceSideVar(D, Linkage);
}
+ getCUDARuntime().handleVarRegistration(D, *GV);
}
GV->setInitializer(Init);
@@ -4436,9 +4529,8 @@ void CodeGenModule::EmitExternalVarDeclaration(const VarDecl *D) {
if (getCodeGenOpts().hasReducedDebugInfo()) {
QualType ASTTy = D->getType();
llvm::Type *Ty = getTypes().ConvertTypeForMem(D->getType());
- llvm::PointerType *PTy =
- llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
- llvm::Constant *GV = GetOrCreateLLVMGlobal(D->getName(), PTy, D);
+ llvm::Constant *GV = GetOrCreateLLVMGlobal(
+ D->getName(), Ty, getContext().getTargetAddressSpace(ASTTy), D);
DI->EmitExternalVariable(
cast<llvm::GlobalVariable>(GV->stripPointerCasts()), D);
}
@@ -4610,7 +4702,6 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Type *newRetTy = newFn->getReturnType();
SmallVector<llvm::Value*, 4> newArgs;
- SmallVector<llvm::OperandBundleDef, 1> newBundles;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
ui != ue; ) {
@@ -4667,6 +4758,7 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
newArgs.append(callSite->arg_begin(), callSite->arg_begin() + argNo);
// Copy over any operand bundles.
+ SmallVector<llvm::OperandBundleDef, 1> newBundles;
callSite->getOperandBundlesAsDefs(newBundles);
llvm::CallBase *newCall;
@@ -4810,8 +4902,7 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
/*ForVTable=*/false);
LT = getFunctionLinkage(GD);
} else {
- Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
- llvm::PointerType::getUnqual(DeclTy),
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(), DeclTy, 0,
/*D=*/nullptr);
if (const auto *VD = dyn_cast<VarDecl>(GD.getDecl()))
LT = getLLVMLinkageVarDefinition(VD, D->getType().isConstQualified());
@@ -5027,7 +5118,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
const VarDecl *VD = nullptr;
- for (const auto &Result : DC->lookup(&II))
+ for (const auto *Result : DC->lookup(&II))
if ((VD = dyn_cast<VarDecl>(Result)))
break;
@@ -5239,7 +5330,7 @@ GenerateStringLiteral(llvm::Constant *C, llvm::GlobalValue::LinkageTypes LT,
CodeGenModule &CGM, StringRef GlobalName,
CharUnits Alignment) {
unsigned AddrSpace = CGM.getContext().getTargetAddressSpace(
- CGM.getStringLiteralAddressSpace());
+ CGM.GetGlobalConstantAddressSpace());
llvm::Module &M = CGM.getModule();
// Create a global variable for this string
@@ -5366,8 +5457,21 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
CharUnits Align = getContext().getTypeAlignInChars(MaterializedType);
- if (llvm::Constant *Slot = MaterializedGlobalTemporaryMap[E])
- return ConstantAddress(Slot, Align);
+ auto InsertResult = MaterializedGlobalTemporaryMap.insert({E, nullptr});
+ if (!InsertResult.second) {
+ // We've seen this before: either we already created it or we're in the
+ // process of doing so.
+ if (!InsertResult.first->second) {
+ // We recursively re-entered this function, probably during emission of
+ // the initializer. Create a placeholder. We'll clean this up in the
+ // outer call, at the end of this function.
+ llvm::Type *Type = getTypes().ConvertTypeForMem(MaterializedType);
+ InsertResult.first->second = new llvm::GlobalVariable(
+ getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
+ nullptr);
+ }
+ return ConstantAddress(InsertResult.first->second, Align);
+ }
// FIXME: If an externally-visible declaration extends multiple temporaries,
// we need to give each temporary the same name in every translation unit (and
@@ -5446,7 +5550,17 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
*this, GV, AddrSpace, LangAS::Default,
Type->getPointerTo(
getContext().getTargetAddressSpace(LangAS::Default)));
- MaterializedGlobalTemporaryMap[E] = CV;
+
+ // Update the map with the new temporary. If we created a placeholder above,
+ // replace it with the new global now.
+ llvm::Constant *&Entry = MaterializedGlobalTemporaryMap[E];
+ if (Entry) {
+ Entry->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(CV, Entry->getType()));
+ llvm::cast<llvm::GlobalVariable>(Entry)->eraseFromParent();
+ }
+ Entry = CV;
+
return ConstantAddress(CV, Align);
}
@@ -5649,6 +5763,10 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(*D));
break;
+ case Decl::UsingEnum: // using enum X; [C++]
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitUsingEnumDecl(cast<UsingEnumDecl>(*D));
+ break;
case Decl::NamespaceAlias:
if (CGDebugInfo *DI = getModuleDebugInfo())
DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(*D));
@@ -5747,6 +5865,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
// File-scope asm is ignored during device-side OpenMP compilation.
if (LangOpts.OpenMPIsDevice)
break;
+ // File-scope asm is ignored during device-side SYCL compilation.
+ if (LangOpts.SYCLIsDevice)
+ break;
auto *AD = cast<FileScopeAsmDecl>(D);
getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
break;
@@ -5804,6 +5925,7 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
break;
case Decl::OMPAllocate:
+ EmitOMPAllocateDecl(cast<OMPAllocateDecl>(D));
break;
case Decl::OMPDeclareReduction:
@@ -5973,7 +6095,7 @@ void CodeGenModule::EmitStaticExternCAliases() {
IdentifierInfo *Name = I.first;
llvm::GlobalValue *Val = I.second;
if (Val && !getModule().getNamedValue(Name->getName()))
- addUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
+ addCompilerUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
}
}
@@ -6215,15 +6337,16 @@ llvm::SanitizerStatReport &CodeGenModule::getSanStats() {
return *SanStats;
}
+
llvm::Value *
CodeGenModule::createOpenCLIntToSamplerConversion(const Expr *E,
CodeGenFunction &CGF) {
llvm::Constant *C = ConstantEmitter(CGF).emitAbstract(E, E->getType());
- auto SamplerT = getOpenCLRuntime().getSamplerType(E->getType().getTypePtr());
- auto FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
- return CGF.Builder.CreateCall(CreateRuntimeFunction(FTy,
- "__translate_sampler_initializer"),
- {C});
+ auto *SamplerT = getOpenCLRuntime().getSamplerType(E->getType().getTypePtr());
+ auto *FTy = llvm::FunctionType::get(SamplerT, {C->getType()}, false);
+ auto *Call = CGF.EmitRuntimeCall(
+ CreateRuntimeFunction(FTy, "__translate_sampler_initializer"), {C});
+ return Call;
}
CharUnits CodeGenModule::getNaturalPointeeTypeAlignment(
@@ -6322,3 +6445,8 @@ bool CodeGenModule::stopAutoInit() {
}
return false;
}
+
+void CodeGenModule::printPostfixForExternalizedStaticVar(
+ llvm::raw_ostream &OS) const {
+ OS << ".static." << getContext().getCUIDHash();
+}
diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h
index 618e2f857b07..47dc6f415b60 100644
--- a/clang/lib/CodeGen/CodeGenModule.h
+++ b/clang/lib/CodeGen/CodeGenModule.h
@@ -25,9 +25,10 @@
#include "clang/Basic/ABI.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/Module.h"
-#include "clang/Basic/SanitizerBlacklist.h"
+#include "clang/Basic/NoSanitizeList.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/XRayLists.h"
+#include "clang/Lex/PreprocessorOptions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -72,7 +73,6 @@ class VarDecl;
class LangOptions;
class CodeGenOptions;
class HeaderSearchOptions;
-class PreprocessorOptions;
class DiagnosticsEngine;
class AnnotateAttr;
class CXXDestructorDecl;
@@ -103,17 +103,17 @@ enum ForDefinition_t : bool {
ForDefinition = true
};
-struct OrderGlobalInits {
+struct OrderGlobalInitsOrStermFinalizers {
unsigned int priority;
unsigned int lex_order;
- OrderGlobalInits(unsigned int p, unsigned int l)
+ OrderGlobalInitsOrStermFinalizers(unsigned int p, unsigned int l)
: priority(p), lex_order(l) {}
- bool operator==(const OrderGlobalInits &RHS) const {
+ bool operator==(const OrderGlobalInitsOrStermFinalizers &RHS) const {
return priority == RHS.priority && lex_order == RHS.lex_order;
}
- bool operator<(const OrderGlobalInits &RHS) const {
+ bool operator<(const OrderGlobalInitsOrStermFinalizers &RHS) const {
return std::tie(priority, lex_order) <
std::tie(RHS.priority, RHS.lex_order);
}
@@ -210,6 +210,9 @@ struct ObjCEntrypoints {
/// void clang.arc.use(...);
llvm::Function *clang_arc_use;
+
+ /// void clang.arc.noop.use(...);
+ llvm::Function *clang_arc_noop_use;
};
/// This class records statistics on instrumentation based profiling.
@@ -308,6 +311,7 @@ private:
const TargetInfo &Target;
std::unique_ptr<CGCXXABI> ABI;
llvm::LLVMContext &VMContext;
+ std::string ModuleNameHash = "";
std::unique_ptr<CodeGenTBAA> TBAA;
@@ -453,7 +457,8 @@ private:
/// that we don't re-emit the initializer.
llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
- typedef std::pair<OrderGlobalInits, llvm::Function*> GlobalInitData;
+ typedef std::pair<OrderGlobalInitsOrStermFinalizers, llvm::Function *>
+ GlobalInitData;
struct GlobalInitPriorityCmp {
bool operator()(const GlobalInitData &LHS,
@@ -469,10 +474,26 @@ private:
/// Global destructor functions and arguments that need to run on termination.
/// When UseSinitAndSterm is set, it instead contains sterm finalizer
/// functions, which also run on unloading a shared library.
- std::vector<
- std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH, llvm::Constant *>>
+ typedef std::tuple<llvm::FunctionType *, llvm::WeakTrackingVH,
+ llvm::Constant *>
+ CXXGlobalDtorsOrStermFinalizer_t;
+ SmallVector<CXXGlobalDtorsOrStermFinalizer_t, 8>
CXXGlobalDtorsOrStermFinalizers;
+ typedef std::pair<OrderGlobalInitsOrStermFinalizers, llvm::Function *>
+ StermFinalizerData;
+
+ struct StermFinalizerPriorityCmp {
+ bool operator()(const StermFinalizerData &LHS,
+ const StermFinalizerData &RHS) const {
+ return LHS.first.priority < RHS.first.priority;
+ }
+ };
+
+ /// Global variables with sterm finalizers whose order of initialization is
+ /// set by init_priority attribute.
+ SmallVector<StermFinalizerData, 8> PrioritizedCXXStermFinalizers;
+
/// The complete set of modules that has been imported.
llvm::SetVector<clang::Module *> ImportedModules;
@@ -583,6 +604,8 @@ public:
/// Return true iff an Objective-C runtime has been configured.
bool hasObjCRuntime() { return !!ObjCRuntime; }
+ const std::string &getModuleNameHash() const { return ModuleNameHash; }
+
/// Return a reference to the configured OpenCL runtime.
CGOpenCLRuntime &getOpenCLRuntime() {
assert(OpenCLRuntime != nullptr);
@@ -832,6 +855,13 @@ public:
/// space, target-specific global or constant address space may be returned.
LangAS GetGlobalVarAddressSpace(const VarDecl *D);
+ /// Return the AST address space of constant literal, which is used to emit
+ /// the constant literal as global variable in LLVM IR.
+ /// Note: This is not necessarily the address space of the constant literal
+ /// in AST. For address space agnostic language, e.g. C++, constant literal
+ /// in AST is always in default address space.
+ LangAS GetGlobalConstantAddressSpace() const;
+
/// Return the llvm::Constant for the address of the given global variable.
/// If Ty is non-null and if the global doesn't exist, then it will be created
/// with the specified type instead of whatever the normal requested type
@@ -843,13 +873,6 @@ public:
ForDefinition_t IsForDefinition
= NotForDefinition);
- /// Return the AST address space of string literal, which is used to emit
- /// the string literal as global variable in LLVM IR.
- /// Note: This is not necessarily the address space of the string literal
- /// in AST. For address space agnostic language, e.g. C++, string literal
- /// in AST is always in default address space.
- LangAS getStringLiteralAddressSpace() const;
-
/// Return the address of the given function. If Ty is non-null, then this
/// function will use the specified type if it has to create it.
llvm::Constant *GetAddrOfFunction(GlobalDecl GD, llvm::Type *Ty = nullptr,
@@ -1051,6 +1074,9 @@ public:
/// Add a global to a list to be added to the llvm.compiler.used metadata.
void addCompilerUsedGlobal(llvm::GlobalValue *GV);
+ /// Add a global to a list to be added to the llvm.compiler.used metadata.
+ void addUsedOrCompilerUsedGlobal(llvm::GlobalValue *GV);
+
/// Add a destructor and object to add to the C++ global destructor function.
void AddCXXDtorEntry(llvm::FunctionCallee DtorFn, llvm::Constant *Object) {
CXXGlobalDtorsOrStermFinalizers.emplace_back(DtorFn.getFunctionType(),
@@ -1069,6 +1095,14 @@ public:
AddGlobalDtor(StermFinalizer, Priority);
}
+ void AddCXXPrioritizedStermFinalizerEntry(llvm::Function *StermFinalizer,
+ int Priority) {
+ OrderGlobalInitsOrStermFinalizers Key(Priority,
+ PrioritizedCXXStermFinalizers.size());
+ PrioritizedCXXStermFinalizers.push_back(
+ std::make_pair(Key, StermFinalizer));
+ }
+
/// Create or return a runtime function declaration with the specified type
/// and name. If \p AssumeConvergent is true, the call will have the
/// convergent attribute added.
@@ -1130,7 +1164,7 @@ public:
/// Set the LLVM function attributes (sext, zext, etc).
void SetLLVMFunctionAttributes(GlobalDecl GD, const CGFunctionInfo &Info,
- llvm::Function *F);
+ llvm::Function *F, bool IsThunk);
/// Set the LLVM function attributes which only apply to a function
/// definition.
@@ -1166,7 +1200,7 @@ public:
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info,
CGCalleeInfo CalleeInfo,
llvm::AttributeList &Attrs, unsigned &CallingConv,
- bool AttrOnCallSite);
+ bool AttrOnCallSite, bool IsThunk);
/// Adds attributes to F according to our CodeGenOptions and LangOptions, as
/// though we had emitted it ourselves. We remove any attributes on F that
@@ -1264,12 +1298,11 @@ public:
/// annotations are emitted during finalization of the LLVM code.
void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
- bool isInSanitizerBlacklist(SanitizerMask Kind, llvm::Function *Fn,
- SourceLocation Loc) const;
+ bool isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
+ SourceLocation Loc) const;
- bool isInSanitizerBlacklist(llvm::GlobalVariable *GV, SourceLocation Loc,
- QualType Ty,
- StringRef Category = StringRef()) const;
+ bool isInNoSanitizeList(llvm::GlobalVariable *GV, SourceLocation Loc,
+ QualType Ty, StringRef Category = StringRef()) const;
/// Imbue XRay attributes to a function, applying the always/never attribute
/// lists in the process. Returns true if we did imbue attributes this way,
@@ -1323,6 +1356,10 @@ public:
/// \param D Requires declaration
void EmitOMPRequiresDecl(const OMPRequiresDecl *D);
+ /// Emit a code for the allocate directive.
+ /// \param D The allocate declaration
+ void EmitOMPAllocateDecl(const OMPAllocateDecl *D);
+
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
@@ -1369,6 +1406,10 @@ public:
void CreateFunctionTypeMetadataForIcall(const FunctionDecl *FD,
llvm::Function *F);
+ /// Whether this function's return type has no side effects, and thus may
+ /// be trivially discarded if it is unused.
+ bool MayDropFunctionReturn(const ASTContext &Context, QualType ReturnType);
+
/// Returns whether this module needs the "all-vtables" type identifier.
bool NeedAllVtablesTypeId() const;
@@ -1420,6 +1461,10 @@ public:
TBAAAccessInfo *TBAAInfo = nullptr);
bool stopAutoInit();
+ /// Print the postfix for externalized static variable for single source
+ /// offloading languages CUDA and HIP.
+ void printPostfixForExternalizedStaticVar(llvm::raw_ostream &OS) const;
+
private:
llvm::Constant *GetOrCreateLLVMFunction(
StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable,
@@ -1432,11 +1477,10 @@ private:
const FunctionDecl *FD);
void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD);
- llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
- llvm::PointerType *PTy,
- const VarDecl *D,
- ForDefinition_t IsForDefinition
- = NotForDefinition);
+ llvm::Constant *
+ GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
+ unsigned AddrSpace, const VarDecl *D,
+ ForDefinition_t IsForDefinition = NotForDefinition);
bool GetCPUAndFeaturesAttributes(GlobalDecl GD,
llvm::AttrBuilder &AttrBuilder);
diff --git a/clang/lib/CodeGen/CodeGenPGO.cpp b/clang/lib/CodeGen/CodeGenPGO.cpp
index 08ae87785065..d828ac0eb5e9 100644
--- a/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -811,10 +811,10 @@ void CodeGenPGO::assignRegionCounters(GlobalDecl GD, llvm::Function *Fn) {
if (isa<CXXDestructorDecl>(D) && GD.getDtorType() != Dtor_Base)
return;
+ CGM.ClearUnusedCoverageMapping(D);
if (Fn->hasFnAttribute(llvm::Attribute::NoProfile))
return;
- CGM.ClearUnusedCoverageMapping(D);
setFuncName(Fn);
mapRegionCounters(D);
@@ -962,6 +962,12 @@ void CodeGenPGO::emitCounterIncrement(CGBuilderTy &Builder, const Stmt *S,
makeArrayRef(Args));
}
+void CodeGenPGO::setValueProfilingFlag(llvm::Module &M) {
+ if (CGM.getCodeGenOpts().hasProfileClangInstr())
+ M.addModuleFlag(llvm::Module::Warning, "EnableValueProfiling",
+ uint32_t(EnableValueProfiling));
+}
+
// This method either inserts a call to the profile run-time during
// instrumentation or puts profile data into metadata for PGO use.
void CodeGenPGO::valueProfile(CGBuilderTy &Builder, uint32_t ValueKind,
diff --git a/clang/lib/CodeGen/CodeGenPGO.h b/clang/lib/CodeGen/CodeGenPGO.h
index 906c5e406d77..f740692ac205 100644
--- a/clang/lib/CodeGen/CodeGenPGO.h
+++ b/clang/lib/CodeGen/CodeGenPGO.h
@@ -87,6 +87,10 @@ public:
// Insert instrumentation or attach profile metadata at value sites
void valueProfile(CGBuilderTy &Builder, uint32_t ValueKind,
llvm::Instruction *ValueSite, llvm::Value *ValuePtr);
+
+ // Set a module flag indicating if value profiling is enabled.
+ void setValueProfilingFlag(llvm::Module &M);
+
private:
void setFuncName(llvm::Function *Fn);
void setFuncName(StringRef Name, llvm::GlobalValue::LinkageTypes Linkage);
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index 7537ac12f1c8..9cb42941cb96 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -601,7 +601,16 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
llvm::FixedVectorType::get(ConvertType(Context.BoolTy), Size); \
break;
#include "clang/Basic/PPCTypes.def"
- case BuiltinType::Dependent:
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
+ {
+ ASTContext::BuiltinVectorTypeInfo Info =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(Ty));
+ return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
+ Info.EC.getKnownMinValue() *
+ Info.NumVectors);
+ }
+ case BuiltinType::Dependent:
#define BUILTIN_TYPE(Id, SingletonId)
#define PLACEHOLDER_TYPE(Id, SingletonId) \
case BuiltinType::Id:
diff --git a/clang/lib/CodeGen/CoverageMappingGen.cpp b/clang/lib/CodeGen/CoverageMappingGen.cpp
index 5c25c204cc0b..8a11da600e4a 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -104,26 +104,21 @@ class SourceMappingRegion {
/// The region's ending location.
Optional<SourceLocation> LocEnd;
- /// Whether this region should be emitted after its parent is emitted.
- bool DeferRegion;
-
/// Whether this region is a gap region. The count from a gap region is set
/// as the line execution count if there are no other regions on the line.
bool GapRegion;
public:
SourceMappingRegion(Counter Count, Optional<SourceLocation> LocStart,
- Optional<SourceLocation> LocEnd, bool DeferRegion = false,
- bool GapRegion = false)
- : Count(Count), LocStart(LocStart), LocEnd(LocEnd),
- DeferRegion(DeferRegion), GapRegion(GapRegion) {}
+ Optional<SourceLocation> LocEnd, bool GapRegion = false)
+ : Count(Count), LocStart(LocStart), LocEnd(LocEnd), GapRegion(GapRegion) {
+ }
SourceMappingRegion(Counter Count, Optional<Counter> FalseCount,
Optional<SourceLocation> LocStart,
- Optional<SourceLocation> LocEnd, bool DeferRegion = false,
- bool GapRegion = false)
+ Optional<SourceLocation> LocEnd, bool GapRegion = false)
: Count(Count), FalseCount(FalseCount), LocStart(LocStart),
- LocEnd(LocEnd), DeferRegion(DeferRegion), GapRegion(GapRegion) {}
+ LocEnd(LocEnd), GapRegion(GapRegion) {}
const Counter &getCounter() const { return Count; }
@@ -155,10 +150,6 @@ public:
return *LocEnd;
}
- bool isDeferred() const { return DeferRegion; }
-
- void setDeferred(bool Deferred) { DeferRegion = Deferred; }
-
bool isGap() const { return GapRegion; }
void setGap(bool Gap) { GapRegion = Gap; }
@@ -544,10 +535,6 @@ struct CounterCoverageMappingBuilder
/// A stack of currently live regions.
std::vector<SourceMappingRegion> RegionStack;
- /// The currently deferred region: its end location and count can be set once
- /// its parent has been popped from the region stack.
- Optional<SourceMappingRegion> DeferredRegion;
-
CounterExpressionBuilder Builder;
/// A location in the most recently visited file or macro.
@@ -556,8 +543,11 @@ struct CounterCoverageMappingBuilder
/// expressions cross file or macro boundaries.
SourceLocation MostRecentLocation;
- /// Location of the last terminated region.
- Optional<std::pair<SourceLocation, size_t>> LastTerminatedRegion;
+ /// Whether the visitor at a terminate statement.
+ bool HasTerminateStmt = false;
+
+ /// Gap region counter after terminate statement.
+ Counter GapRegionCounter;
/// Return a counter for the subtraction of \c RHS from \c LHS
Counter subtractCounters(Counter LHS, Counter RHS) {
@@ -590,77 +580,13 @@ struct CounterCoverageMappingBuilder
if (StartLoc && !FalseCount.hasValue()) {
MostRecentLocation = *StartLoc;
- completeDeferred(Count, MostRecentLocation);
}
- RegionStack.emplace_back(Count, FalseCount, StartLoc, EndLoc,
- FalseCount.hasValue());
+ RegionStack.emplace_back(Count, FalseCount, StartLoc, EndLoc);
return RegionStack.size() - 1;
}
- /// Complete any pending deferred region by setting its end location and
- /// count, and then pushing it onto the region stack.
- size_t completeDeferred(Counter Count, SourceLocation DeferredEndLoc) {
- size_t Index = RegionStack.size();
- if (!DeferredRegion)
- return Index;
-
- // Consume the pending region.
- SourceMappingRegion DR = DeferredRegion.getValue();
- DeferredRegion = None;
-
- // If the region ends in an expansion, find the expansion site.
- FileID StartFile = SM.getFileID(DR.getBeginLoc());
- if (SM.getFileID(DeferredEndLoc) != StartFile) {
- if (isNestedIn(DeferredEndLoc, StartFile)) {
- do {
- DeferredEndLoc = getIncludeOrExpansionLoc(DeferredEndLoc);
- } while (StartFile != SM.getFileID(DeferredEndLoc));
- } else {
- return Index;
- }
- }
-
- // The parent of this deferred region ends where the containing decl ends,
- // so the region isn't useful.
- if (DR.getBeginLoc() == DeferredEndLoc)
- return Index;
-
- // If we're visiting statements in non-source order (e.g switch cases or
- // a loop condition) we can't construct a sensible deferred region.
- if (!SpellingRegion(SM, DR.getBeginLoc(), DeferredEndLoc).isInSourceOrder())
- return Index;
-
- DR.setGap(true);
- DR.setCounter(Count);
- DR.setEndLoc(DeferredEndLoc);
- handleFileExit(DeferredEndLoc);
- RegionStack.push_back(DR);
- return Index;
- }
-
- /// Complete a deferred region created after a terminated region at the
- /// top-level.
- void completeTopLevelDeferredRegion(Counter Count,
- SourceLocation DeferredEndLoc) {
- if (DeferredRegion || !LastTerminatedRegion)
- return;
-
- if (LastTerminatedRegion->second != RegionStack.size())
- return;
-
- SourceLocation Start = LastTerminatedRegion->first;
- if (SM.getFileID(Start) != SM.getMainFileID())
- return;
-
- SourceMappingRegion DR = RegionStack.back();
- DR.setStartLoc(Start);
- DR.setDeferred(false);
- DeferredRegion = DR;
- completeDeferred(Count, DeferredEndLoc);
- }
-
size_t locationDepth(SourceLocation Loc) {
size_t Depth = 0;
while (Loc.isValid()) {
@@ -676,7 +602,6 @@ struct CounterCoverageMappingBuilder
/// function's \c SourceRegions.
void popRegions(size_t ParentIndex) {
assert(RegionStack.size() >= ParentIndex && "parent not in stack");
- bool ParentOfDeferredRegion = false;
while (RegionStack.size() > ParentIndex) {
SourceMappingRegion &Region = RegionStack.back();
if (Region.hasStartLoc()) {
@@ -746,32 +671,9 @@ struct CounterCoverageMappingBuilder
assert(SM.isWrittenInSameFile(Region.getBeginLoc(), EndLoc));
assert(SpellingRegion(SM, Region).isInSourceOrder());
SourceRegions.push_back(Region);
-
- if (ParentOfDeferredRegion) {
- ParentOfDeferredRegion = false;
-
- // If there's an existing deferred region, keep the old one, because
- // it means there are two consecutive returns (or a similar pattern).
- if (!DeferredRegion.hasValue() &&
- // File IDs aren't gathered within macro expansions, so it isn't
- // useful to try and create a deferred region inside of one.
- !EndLoc.isMacroID())
- DeferredRegion =
- SourceMappingRegion(Counter::getZero(), EndLoc, None);
}
- } else if (Region.isDeferred()) {
- assert(!ParentOfDeferredRegion && "Consecutive deferred regions");
- ParentOfDeferredRegion = true;
- }
RegionStack.pop_back();
-
- // If the zero region pushed after the last terminated region no longer
- // exists, clear its cached information.
- if (LastTerminatedRegion &&
- RegionStack.size() < LastTerminatedRegion->second)
- LastTerminatedRegion = None;
}
- assert(!ParentOfDeferredRegion && "Deferred region with no parent");
}
/// Return the currently active region.
@@ -955,8 +857,6 @@ struct CounterCoverageMappingBuilder
handleFileExit(StartLoc);
if (!Region.hasStartLoc())
Region.setStartLoc(StartLoc);
-
- completeDeferred(Region.getCounter(), StartLoc);
}
/// Mark \c S as a terminator, starting a zero region.
@@ -967,30 +867,56 @@ struct CounterCoverageMappingBuilder
if (!Region.hasEndLoc())
Region.setEndLoc(EndLoc);
pushRegion(Counter::getZero());
- auto &ZeroRegion = getRegion();
- ZeroRegion.setDeferred(true);
- LastTerminatedRegion = {EndLoc, RegionStack.size()};
+ HasTerminateStmt = true;
}
/// Find a valid gap range between \p AfterLoc and \p BeforeLoc.
Optional<SourceRange> findGapAreaBetween(SourceLocation AfterLoc,
SourceLocation BeforeLoc) {
+ // If AfterLoc is in function-like macro, use the right parenthesis
+ // location.
+ if (AfterLoc.isMacroID()) {
+ FileID FID = SM.getFileID(AfterLoc);
+ const SrcMgr::ExpansionInfo *EI = &SM.getSLocEntry(FID).getExpansion();
+ if (EI->isFunctionMacroExpansion())
+ AfterLoc = EI->getExpansionLocEnd();
+ }
+
+ size_t StartDepth = locationDepth(AfterLoc);
+ size_t EndDepth = locationDepth(BeforeLoc);
+ while (!SM.isWrittenInSameFile(AfterLoc, BeforeLoc)) {
+ bool UnnestStart = StartDepth >= EndDepth;
+ bool UnnestEnd = EndDepth >= StartDepth;
+ if (UnnestEnd) {
+ assert(SM.isWrittenInSameFile(getStartOfFileOrMacro(BeforeLoc),
+ BeforeLoc));
+
+ BeforeLoc = getIncludeOrExpansionLoc(BeforeLoc);
+ assert(BeforeLoc.isValid());
+ EndDepth--;
+ }
+ if (UnnestStart) {
+ assert(SM.isWrittenInSameFile(AfterLoc,
+ getEndOfFileOrMacro(AfterLoc)));
+
+ AfterLoc = getIncludeOrExpansionLoc(AfterLoc);
+ assert(AfterLoc.isValid());
+ AfterLoc = getPreciseTokenLocEnd(AfterLoc);
+ assert(AfterLoc.isValid());
+ StartDepth--;
+ }
+ }
+ AfterLoc = getPreciseTokenLocEnd(AfterLoc);
// If the start and end locations of the gap are both within the same macro
// file, the range may not be in source order.
if (AfterLoc.isMacroID() || BeforeLoc.isMacroID())
return None;
- if (!SM.isWrittenInSameFile(AfterLoc, BeforeLoc))
+ if (!SM.isWrittenInSameFile(AfterLoc, BeforeLoc) ||
+ !SpellingRegion(SM, AfterLoc, BeforeLoc).isInSourceOrder())
return None;
return {{AfterLoc, BeforeLoc}};
}
- /// Find the source range after \p AfterStmt and before \p BeforeStmt.
- Optional<SourceRange> findGapAreaBetween(const Stmt *AfterStmt,
- const Stmt *BeforeStmt) {
- return findGapAreaBetween(getPreciseTokenLocEnd(getEnd(AfterStmt)),
- getStart(BeforeStmt));
- }
-
/// Emit a gap region between \p StartLoc and \p EndLoc with the given count.
void fillGapAreaWithCount(SourceLocation StartLoc, SourceLocation EndLoc,
Counter Count) {
@@ -1015,15 +941,13 @@ struct CounterCoverageMappingBuilder
CoverageMappingModuleGen &CVM,
llvm::DenseMap<const Stmt *, unsigned> &CounterMap, SourceManager &SM,
const LangOptions &LangOpts)
- : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap),
- DeferredRegion(None) {}
+ : CoverageMappingBuilder(CVM, SM, LangOpts), CounterMap(CounterMap) {}
/// Write the mapping data to the output stream
void write(llvm::raw_ostream &OS) {
llvm::SmallVector<unsigned, 8> VirtualFileMapping;
gatherFileIDs(VirtualFileMapping);
SourceRegionFilter Filter = emitExpansionRegions();
- assert(!DeferredRegion && "Deferred region never completed");
emitSourceRegions(Filter);
gatherSkippedRegions();
@@ -1038,15 +962,32 @@ struct CounterCoverageMappingBuilder
void VisitStmt(const Stmt *S) {
if (S->getBeginLoc().isValid())
extendRegion(S);
+ const Stmt *LastStmt = nullptr;
+ bool SaveTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+ GapRegionCounter = Counter::getZero();
for (const Stmt *Child : S->children())
- if (Child)
+ if (Child) {
+ // If last statement contains terminate statements, add a gap area
+ // between the two statements. Skipping attributed statements, because
+ // they don't have valid start location.
+ if (LastStmt && HasTerminateStmt && !dyn_cast<AttributedStmt>(Child)) {
+ auto Gap = findGapAreaBetween(getEnd(LastStmt), getStart(Child));
+ if (Gap)
+ fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(),
+ GapRegionCounter);
+ SaveTerminateStmt = true;
+ HasTerminateStmt = false;
+ }
this->Visit(Child);
+ LastStmt = Child;
+ }
+ if (SaveTerminateStmt)
+ HasTerminateStmt = true;
handleFileExit(getEnd(S));
}
void VisitDecl(const Decl *D) {
- assert(!DeferredRegion && "Deferred region never completed");
-
Stmt *Body = D->getBody();
// Do not propagate region counts into system headers.
@@ -1064,11 +1005,6 @@ struct CounterCoverageMappingBuilder
propagateCounts(getRegionCounter(Body), Body,
/*VisitChildren=*/!Defaulted);
assert(RegionStack.empty() && "Regions entered but never exited");
-
- // Discard the last uncompleted deferred region in a decl, if one exists.
- // This prevents lines at the end of a function containing only whitespace
- // or closing braces from being marked as uncovered.
- DeferredRegion = None;
}
void VisitReturnStmt(const ReturnStmt *S) {
@@ -1102,8 +1038,6 @@ struct CounterCoverageMappingBuilder
void VisitLabelStmt(const LabelStmt *S) {
Counter LabelCount = getRegionCounter(S);
SourceLocation Start = getStart(S);
- completeTopLevelDeferredRegion(LabelCount, Start);
- completeDeferred(LabelCount, Start);
// We can't extendRegion here or we risk overlapping with our new region.
handleFileExit(Start);
pushRegion(LabelCount, Start);
@@ -1148,6 +1082,9 @@ struct CounterCoverageMappingBuilder
Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
BreakContinue BC = BreakContinueStack.pop_back_val();
+ bool BodyHasTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+
// Go back to handle the condition.
Counter CondCount =
addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
@@ -1155,14 +1092,18 @@ struct CounterCoverageMappingBuilder
adjustForOutOfOrderTraversal(getEnd(S));
// The body count applies to the area immediately after the increment.
- auto Gap = findGapAreaBetween(S->getCond(), S->getBody());
+ auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getBody()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
Counter OutCount =
addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ if (BodyHasTerminateStmt)
+ HasTerminateStmt = true;
+ }
// Create Branch Region around condition.
createBranchRegion(S->getCond(), BodyCount,
@@ -1181,17 +1122,25 @@ struct CounterCoverageMappingBuilder
propagateCounts(addCounters(ParentCount, BodyCount), S->getBody());
BreakContinue BC = BreakContinueStack.pop_back_val();
+ bool BodyHasTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+
Counter CondCount = addCounters(BackedgeCount, BC.ContinueCount);
propagateCounts(CondCount, S->getCond());
Counter OutCount =
addCounters(BC.BreakCount, subtractCounters(CondCount, BodyCount));
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ }
// Create Branch Region around condition.
createBranchRegion(S->getCond(), BodyCount,
subtractCounters(CondCount, BodyCount));
+
+ if (BodyHasTerminateStmt)
+ HasTerminateStmt = true;
}
void VisitForStmt(const ForStmt *S) {
@@ -1212,6 +1161,9 @@ struct CounterCoverageMappingBuilder
Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
BreakContinue BodyBC = BreakContinueStack.pop_back_val();
+ bool BodyHasTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+
// The increment is essentially part of the body but it needs to include
// the count for all the continue statements.
BreakContinue IncrementBC;
@@ -1230,15 +1182,18 @@ struct CounterCoverageMappingBuilder
}
// The body count applies to the area immediately after the increment.
- auto Gap = findGapAreaBetween(getPreciseTokenLocEnd(S->getRParenLoc()),
- getStart(S->getBody()));
+ auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getBody()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
Counter OutCount = addCounters(BodyBC.BreakCount, IncrementBC.BreakCount,
subtractCounters(CondCount, BodyCount));
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ if (BodyHasTerminateStmt)
+ HasTerminateStmt = true;
+ }
// Create Branch Region around condition.
createBranchRegion(S->getCond(), BodyCount,
@@ -1260,9 +1215,11 @@ struct CounterCoverageMappingBuilder
Counter BackedgeCount = propagateCounts(BodyCount, S->getBody());
BreakContinue BC = BreakContinueStack.pop_back_val();
+ bool BodyHasTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+
// The body count applies to the area immediately after the range.
- auto Gap = findGapAreaBetween(getPreciseTokenLocEnd(S->getRParenLoc()),
- getStart(S->getBody()));
+ auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getBody()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
@@ -1270,8 +1227,12 @@ struct CounterCoverageMappingBuilder
addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
Counter OutCount =
addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ if (BodyHasTerminateStmt)
+ HasTerminateStmt = true;
+ }
// Create Branch Region around condition.
createBranchRegion(S->getCond(), BodyCount,
@@ -1291,8 +1252,7 @@ struct CounterCoverageMappingBuilder
BreakContinue BC = BreakContinueStack.pop_back_val();
// The body count applies to the area immediately after the collection.
- auto Gap = findGapAreaBetween(getPreciseTokenLocEnd(S->getRParenLoc()),
- getStart(S->getBody()));
+ auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getBody()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), BodyCount);
@@ -1300,8 +1260,10 @@ struct CounterCoverageMappingBuilder
addCounters(ParentCount, BackedgeCount, BC.ContinueCount);
Counter OutCount =
addCounters(BC.BreakCount, subtractCounters(LoopCount, BodyCount));
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ }
}
void VisitSwitchStmt(const SwitchStmt *S) {
@@ -1321,8 +1283,7 @@ struct CounterCoverageMappingBuilder
// the unreachable code at the beginning of the switch body.
size_t Index = pushRegion(Counter::getZero(), getStart(CS));
getRegion().setGap(true);
- for (const auto *Child : CS->children())
- Visit(Child);
+ Visit(Body);
// Set the end for the body of the switch, if it isn't already set.
for (size_t i = RegionStack.size(); i != Index; --i) {
@@ -1344,6 +1305,7 @@ struct CounterCoverageMappingBuilder
Counter ExitCount = getRegionCounter(S);
SourceLocation ExitLoc = getEnd(S);
pushRegion(ExitCount);
+ GapRegionCounter = ExitCount;
// Ensure that handleFileExit recognizes when the end location is located
// in a different file.
@@ -1386,6 +1348,8 @@ struct CounterCoverageMappingBuilder
else
pushRegion(Count, getStart(S));
+ GapRegionCounter = Count;
+
if (const auto *CS = dyn_cast<CaseStmt>(S)) {
Visit(CS->getLHS());
if (const Expr *RHS = CS->getRHS())
@@ -1411,7 +1375,7 @@ struct CounterCoverageMappingBuilder
propagateCounts(ParentCount, S->getCond());
// The 'then' count applies to the area immediately after the condition.
- auto Gap = findGapAreaBetween(S->getCond(), S->getThen());
+ auto Gap = findGapAreaBetween(S->getRParenLoc(), getStart(S->getThen()));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), ThenCount);
@@ -1420,17 +1384,25 @@ struct CounterCoverageMappingBuilder
Counter ElseCount = subtractCounters(ParentCount, ThenCount);
if (const Stmt *Else = S->getElse()) {
+ bool ThenHasTerminateStmt = HasTerminateStmt;
+ HasTerminateStmt = false;
+
// The 'else' count applies to the area immediately after the 'then'.
- Gap = findGapAreaBetween(S->getThen(), Else);
+ Gap = findGapAreaBetween(getEnd(S->getThen()), getStart(Else));
if (Gap)
fillGapAreaWithCount(Gap->getBegin(), Gap->getEnd(), ElseCount);
extendRegion(Else);
OutCount = addCounters(OutCount, propagateCounts(ElseCount, Else));
+
+ if (ThenHasTerminateStmt)
+ HasTerminateStmt = true;
} else
OutCount = addCounters(OutCount, ElseCount);
- if (OutCount != ParentCount)
+ if (OutCount != ParentCount) {
pushRegion(OutCount);
+ GapRegionCounter = OutCount;
+ }
// Create Branch Region around condition.
createBranchRegion(S->getCond(), ThenCount,
@@ -1462,7 +1434,7 @@ struct CounterCoverageMappingBuilder
Counter ParentCount = getRegion().getCounter();
Counter TrueCount = getRegionCounter(E);
- Visit(E->getCond());
+ propagateCounts(ParentCount, E->getCond());
if (!isa<BinaryConditionalOperator>(E)) {
// The 'then' count applies to the area immediately after the condition.
@@ -1588,14 +1560,22 @@ static void dump(llvm::raw_ostream &OS, StringRef FunctionName,
CoverageMappingModuleGen::CoverageMappingModuleGen(
CodeGenModule &CGM, CoverageSourceInfo &SourceInfo)
: CGM(CGM), SourceInfo(SourceInfo) {
- ProfilePrefixMap = CGM.getCodeGenOpts().ProfilePrefixMap;
+ CoveragePrefixMap = CGM.getCodeGenOpts().CoveragePrefixMap;
+}
+
+std::string CoverageMappingModuleGen::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().CoverageCompilationDir.empty())
+ return CGM.getCodeGenOpts().CoverageCompilationDir;
+
+ SmallString<256> CWD;
+ llvm::sys::fs::current_path(CWD);
+ return CWD.str().str();
}
std::string CoverageMappingModuleGen::normalizeFilename(StringRef Filename) {
llvm::SmallString<256> Path(Filename);
- llvm::sys::fs::make_absolute(Path);
llvm::sys::path::remove_dots(Path, /*remove_dot_dot=*/true);
- for (const auto &Entry : ProfilePrefixMap) {
+ for (const auto &Entry : CoveragePrefixMap) {
if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
break;
}
@@ -1674,18 +1654,17 @@ void CoverageMappingModuleGen::addFunctionMappingRecord(
// also processed by the CoverageMappingWriter which performs
// additional minimization operations such as reducing the number of
// expressions.
+ llvm::SmallVector<std::string, 16> FilenameStrs;
std::vector<StringRef> Filenames;
std::vector<CounterExpression> Expressions;
std::vector<CounterMappingRegion> Regions;
- llvm::SmallVector<std::string, 16> FilenameStrs;
- llvm::SmallVector<StringRef, 16> FilenameRefs;
- FilenameStrs.resize(FileEntries.size());
- FilenameRefs.resize(FileEntries.size());
+ FilenameStrs.resize(FileEntries.size() + 1);
+ FilenameStrs[0] = normalizeFilename(getCurrentDirname());
for (const auto &Entry : FileEntries) {
auto I = Entry.second;
FilenameStrs[I] = normalizeFilename(Entry.first->getName());
- FilenameRefs[I] = FilenameStrs[I];
}
+ ArrayRef<std::string> FilenameRefs = llvm::makeArrayRef(FilenameStrs);
RawCoverageMappingReader Reader(CoverageMapping, FilenameRefs, Filenames,
Expressions, Regions);
if (Reader.read())
@@ -1702,19 +1681,18 @@ void CoverageMappingModuleGen::emit() {
// Create the filenames and merge them with coverage mappings
llvm::SmallVector<std::string, 16> FilenameStrs;
- llvm::SmallVector<StringRef, 16> FilenameRefs;
- FilenameStrs.resize(FileEntries.size());
- FilenameRefs.resize(FileEntries.size());
+ FilenameStrs.resize(FileEntries.size() + 1);
+ // The first filename is the current working directory.
+ FilenameStrs[0] = normalizeFilename(getCurrentDirname());
for (const auto &Entry : FileEntries) {
auto I = Entry.second;
FilenameStrs[I] = normalizeFilename(Entry.first->getName());
- FilenameRefs[I] = FilenameStrs[I];
}
std::string Filenames;
{
llvm::raw_string_ostream OS(Filenames);
- CoverageFilenamesSectionWriter(FilenameRefs).write(OS);
+ CoverageFilenamesSectionWriter(FilenameStrs).write(OS);
}
auto *FilenamesVal =
llvm::ConstantDataArray::getString(Ctx, Filenames, false);
@@ -1772,7 +1750,7 @@ unsigned CoverageMappingModuleGen::getFileID(const FileEntry *File) {
auto It = FileEntries.find(File);
if (It != FileEntries.end())
return It->second;
- unsigned FileID = FileEntries.size();
+ unsigned FileID = FileEntries.size() + 1;
FileEntries.insert(std::make_pair(File, FileID));
return FileID;
}
diff --git a/clang/lib/CodeGen/CoverageMappingGen.h b/clang/lib/CodeGen/CoverageMappingGen.h
index b26f79be5316..ae4f435d4ff3 100644
--- a/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/clang/lib/CodeGen/CoverageMappingGen.h
@@ -93,8 +93,9 @@ class CoverageMappingModuleGen {
llvm::SmallDenseMap<const FileEntry *, unsigned, 8> FileEntries;
std::vector<llvm::Constant *> FunctionNames;
std::vector<FunctionInfo> FunctionRecords;
- std::map<std::string, std::string> ProfilePrefixMap;
+ std::map<std::string, std::string> CoveragePrefixMap;
+ std::string getCurrentDirname();
std::string normalizeFilename(StringRef Filename);
/// Emit a function record.
diff --git a/clang/lib/CodeGen/EHScopeStack.h b/clang/lib/CodeGen/EHScopeStack.h
index 3a640d6117d6..cd649cb11f9b 100644
--- a/clang/lib/CodeGen/EHScopeStack.h
+++ b/clang/lib/CodeGen/EHScopeStack.h
@@ -150,6 +150,8 @@ public:
Cleanup(Cleanup &&) {}
Cleanup() = default;
+ virtual bool isRedundantBeforeReturn() { return false; }
+
/// Generation flags.
class Flags {
enum {
@@ -236,6 +238,9 @@ private:
/// The innermost EH scope on the stack.
stable_iterator InnermostEHScope;
+ /// The CGF this Stack belong to
+ CodeGenFunction* CGF;
+
/// The current set of branch fixups. A branch fixup is a jump to
/// an as-yet unemitted label, i.e. a label for which we don't yet
/// know the EH stack depth. Whenever we pop a cleanup, we have
@@ -261,9 +266,10 @@ private:
void *pushCleanup(CleanupKind K, size_t DataSize);
public:
- EHScopeStack() : StartOfBuffer(nullptr), EndOfBuffer(nullptr),
- StartOfData(nullptr), InnermostNormalCleanup(stable_end()),
- InnermostEHScope(stable_end()) {}
+ EHScopeStack()
+ : StartOfBuffer(nullptr), EndOfBuffer(nullptr), StartOfData(nullptr),
+ InnermostNormalCleanup(stable_end()), InnermostEHScope(stable_end()),
+ CGF(nullptr) {}
~EHScopeStack() { delete[] StartOfBuffer; }
/// Push a lazily-created cleanup on the stack.
@@ -311,6 +317,8 @@ public:
std::memcpy(Buffer, Cleanup, Size);
}
+ void setCGF(CodeGenFunction *inCGF) { CGF = inCGF; }
+
/// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
void popCleanup();
diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 50fb30a95cbb..d3dc0e6212b8 100644
--- a/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -337,14 +337,20 @@ public:
/// Determine whether we will definitely emit this variable with a constant
/// initializer, either because the language semantics demand it or because
/// we know that the initializer is a constant.
- bool isEmittedWithConstantInitializer(const VarDecl *VD) const {
+ // For weak definitions, any initializer available in the current translation
+ // is not necessarily reflective of the initializer used; such initializers
+ // are ignored unless if InspectInitForWeakDef is true.
+ bool
+ isEmittedWithConstantInitializer(const VarDecl *VD,
+ bool InspectInitForWeakDef = false) const {
VD = VD->getMostRecentDecl();
if (VD->hasAttr<ConstInitAttr>())
return true;
// All later checks examine the initializer specified on the variable. If
// the variable is weak, such examination would not be correct.
- if (VD->isWeak() || VD->hasAttr<SelectAnyAttr>())
+ if (!InspectInitForWeakDef &&
+ (VD->isWeak() || VD->hasAttr<SelectAnyAttr>()))
return false;
const VarDecl *InitDecl = VD->getInitializingDeclaration();
@@ -515,6 +521,9 @@ public:
: ItaniumCXXABI(CGM, /*UseARMMethodPtrABI=*/true,
/*UseARMGuardVarABI=*/true) {}
void emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) override;
+ llvm::CallInst *
+ emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn) override;
private:
bool HasThisReturn(GlobalDecl GD) const override {
@@ -543,7 +552,7 @@ private:
}
CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
- switch (CGM.getTarget().getCXXABI().getKind()) {
+ switch (CGM.getContext().getCXXABIKind()) {
// For IR-generation purposes, there's no significant difference
// between the ARM and iOS ABIs.
case TargetCXXABI::GenericARM:
@@ -648,7 +657,7 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// for consistency.
llvm::Value *This = ThisAddr.getPointer();
llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
- Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ Ptr = Builder.CreateInBoundsGEP(Builder.getInt8Ty(), Ptr, Adj);
This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
ThisPtrForCall = This;
@@ -717,7 +726,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
}
if (ShouldEmitVFEInfo) {
- llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
+ llvm::Value *VFPAddr =
+ Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
// If doing VFE, load from the vtable with a type.checked.load intrinsic
// call. Note that we use the GEP to calculate the address to load from
@@ -735,7 +745,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
// When not doing VFE, emit a normal load, as it allows more
// optimisations than type.checked.load.
if (ShouldEmitCFICheck || ShouldEmitWPDInfo) {
- llvm::Value *VFPAddr = Builder.CreateGEP(VTable, VTableOffset);
+ llvm::Value *VFPAddr =
+ Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
CheckResult = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test),
{Builder.CreateBitCast(VFPAddr, CGF.Int8PtrTy), TypeId});
@@ -748,11 +759,13 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
{VTable, VTableOffset});
VirtualFn = CGF.Builder.CreateBitCast(VirtualFn, FTy->getPointerTo());
} else {
- llvm::Value *VFPAddr = CGF.Builder.CreateGEP(VTable, VTableOffset);
+ llvm::Value *VFPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, VTable, VTableOffset);
VFPAddr = CGF.Builder.CreateBitCast(
VFPAddr, FTy->getPointerTo()->getPointerTo());
VirtualFn = CGF.Builder.CreateAlignedLoad(
- VFPAddr, CGF.getPointerAlign(), "memptr.virtualfn");
+ FTy->getPointerTo(), VFPAddr, CGF.getPointerAlign(),
+ "memptr.virtualfn");
}
}
assert(VirtualFn && "Virtual fuction pointer not created!");
@@ -855,8 +868,8 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(
Base = Builder.CreateElementBitCast(Base, CGF.Int8Ty);
// Apply the offset, which we assume is non-null.
- llvm::Value *Addr =
- Builder.CreateInBoundsGEP(Base.getPointer(), MemPtr, "memptr.offset");
+ llvm::Value *Addr = Builder.CreateInBoundsGEP(
+ Base.getElementType(), Base.getPointer(), MemPtr, "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the
// address space of the base pointer.
@@ -1250,14 +1263,14 @@ void ItaniumCXXABI::emitVirtualObjectDelete(CodeGenFunction &CGF,
// Track back to entry -2 and pull out the offset there.
llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
- VTable, -2, "complete-offset.ptr");
- llvm::Value *Offset =
- CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ CGF.IntPtrTy, VTable, -2, "complete-offset.ptr");
+ llvm::Value *Offset = CGF.Builder.CreateAlignedLoad(CGF.IntPtrTy, OffsetPtr, CGF.getPointerAlign());
// Apply the offset.
llvm::Value *CompletePtr =
CGF.Builder.CreateBitCast(Ptr.getPointer(), CGF.Int8PtrTy);
- CompletePtr = CGF.Builder.CreateInBoundsGEP(CompletePtr, Offset);
+ CompletePtr =
+ CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, CompletePtr, Offset);
// If we're supposed to call the global delete, make sure we do so
// even if the destructor throws.
@@ -1462,9 +1475,11 @@ llvm::Value *ItaniumCXXABI::EmitTypeid(CodeGenFunction &CGF,
Value = CGF.Builder.CreateBitCast(Value, StdTypeInfoPtrTy->getPointerTo());
} else {
// Load the type info.
- Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ Value =
+ CGF.Builder.CreateConstInBoundsGEP1_64(StdTypeInfoPtrTy, Value, -1ULL);
}
- return CGF.Builder.CreateAlignedLoad(Value, CGF.getPointerAlign());
+ return CGF.Builder.CreateAlignedLoad(StdTypeInfoPtrTy, Value,
+ CGF.getPointerAlign());
}
bool ItaniumCXXABI::shouldDynamicCastCallBeNullChecked(bool SrcIsPtr,
@@ -1530,9 +1545,9 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
// Get the offset-to-top from the vtable.
OffsetToTop =
- CGF.Builder.CreateConstInBoundsGEP1_32(/*Type=*/nullptr, VTable, -2U);
+ CGF.Builder.CreateConstInBoundsGEP1_32(CGM.Int32Ty, VTable, -2U);
OffsetToTop = CGF.Builder.CreateAlignedLoad(
- OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
+ CGM.Int32Ty, OffsetToTop, CharUnits::fromQuantity(4), "offset.to.top");
} else {
llvm::Type *PtrDiffLTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -1542,14 +1557,15 @@ llvm::Value *ItaniumCXXABI::EmitDynamicCastToVoid(CodeGenFunction &CGF,
CGF.GetVTablePtr(ThisAddr, PtrDiffLTy->getPointerTo(), ClassDecl);
// Get the offset-to-top from the vtable.
- OffsetToTop = CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_64(PtrDiffLTy, VTable, -2ULL);
OffsetToTop = CGF.Builder.CreateAlignedLoad(
- OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
+ PtrDiffLTy, OffsetToTop, CGF.getPointerAlign(), "offset.to.top");
}
// Finally, add the offset to the pointer.
llvm::Value *Value = ThisAddr.getPointer();
Value = CGF.EmitCastToVoidPtr(Value);
- Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
+ Value = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, Value, OffsetToTop);
return CGF.Builder.CreateBitCast(Value, DestLTy);
}
@@ -1571,20 +1587,22 @@ ItaniumCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(ClassDecl,
BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
- CGF.Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
- "vbase.offset.ptr");
+ CGF.Builder.CreateConstGEP1_64(
+ CGF.Int8Ty, VTablePtr, VBaseOffsetOffset.getQuantity(),
+ "vbase.offset.ptr");
llvm::Value *VBaseOffset;
if (CGM.getItaniumVTableContext().isRelativeLayout()) {
VBaseOffsetPtr =
CGF.Builder.CreateBitCast(VBaseOffsetPtr, CGF.Int32Ty->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
- VBaseOffsetPtr, CharUnits::fromQuantity(4), "vbase.offset");
+ CGF.Int32Ty, VBaseOffsetPtr, CharUnits::fromQuantity(4),
+ "vbase.offset");
} else {
VBaseOffsetPtr = CGF.Builder.CreateBitCast(VBaseOffsetPtr,
CGM.PtrDiffTy->getPointerTo());
VBaseOffset = CGF.Builder.CreateAlignedLoad(
- VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
+ CGM.PtrDiffTy, VBaseOffsetPtr, CGF.getPointerAlign(), "vbase.offset");
}
return VBaseOffset;
}
@@ -1767,8 +1785,22 @@ void ItaniumCXXABI::emitVTableDefinitions(CodeGenVTables &CGVT,
DC->getParent()->isTranslationUnit())
EmitFundamentalRTTIDescriptors(RD);
- if (!VTable->isDeclarationForLinker())
+ // Always emit type metadata on non-available_externally definitions, and on
+ // available_externally definitions if we are performing whole program
+ // devirtualization. For WPD we need the type metadata on all vtable
+ // definitions to ensure we associate derived classes with base classes
+ // defined in headers but with a strong definition only in a shared library.
+ if (!VTable->isDeclarationForLinker() ||
+ CGM.getCodeGenOpts().WholeProgramVTables) {
CGM.EmitVTableTypeMetadata(RD, VTable, VTLayout);
+ // For available_externally definitions, add the vtable to
+ // @llvm.compiler.used so that it isn't deleted before whole program
+ // analysis.
+ if (VTable->isDeclarationForLinker()) {
+ assert(CGM.getCodeGenOpts().WholeProgramVTables);
+ CGM.addCompilerUsedGlobal(VTable);
+ }
+ }
if (VTContext.isRelativeLayout() && !VTable->isDSOLocal())
CGVT.GenerateRelativeVTableAlias(VTable, VTable->getName());
@@ -1815,6 +1847,29 @@ ItaniumCXXABI::getVTableAddressPoint(BaseSubobject Base,
/*InRangeIndex=*/1);
}
+// Check whether all the non-inline virtual methods for the class have the
+// specified attribute.
+template <typename T>
+static bool CXXRecordAllNonInlineVirtualsHaveAttr(const CXXRecordDecl *RD) {
+ bool FoundNonInlineVirtualMethodWithAttr = false;
+ for (const auto *D : RD->noload_decls()) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ if (!FD->isVirtualAsWritten() || FD->isInlineSpecified() ||
+ FD->doesThisDeclarationHaveABody())
+ continue;
+ if (!D->hasAttr<T>())
+ return false;
+ FoundNonInlineVirtualMethodWithAttr = true;
+ }
+ }
+
+ // We didn't find any non-inline virtual methods missing the attribute. We
+ // will return true when we found at least one non-inline virtual with the
+ // attribute. (This lets our caller know that the attribute needs to be
+ // propagated up to the vtable.)
+ return FoundNonInlineVirtualMethodWithAttr;
+}
+
llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
CodeGenFunction &CGF, const CXXRecordDecl *VTableClass, BaseSubobject Base,
const CXXRecordDecl *NearestVBase) {
@@ -1828,10 +1883,12 @@ llvm::Value *ItaniumCXXABI::getVTableAddressPointInStructorWithVTT(
/// Load the VTT.
llvm::Value *VTT = CGF.LoadCXXVTT();
if (VirtualPointerIndex)
- VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(
+ CGF.VoidPtrTy, VTT, VirtualPointerIndex);
// And load the address point from the VTT.
- return CGF.Builder.CreateAlignedLoad(VTT, CGF.getPointerAlign());
+ return CGF.Builder.CreateAlignedLoad(CGF.VoidPtrTy, VTT,
+ CGF.getPointerAlign());
}
llvm::Constant *ItaniumCXXABI::getVTableAddressPointForConstExpr(
@@ -1870,6 +1927,24 @@ llvm::GlobalVariable *ItaniumCXXABI::getAddrOfVTable(const CXXRecordDecl *RD,
getContext().toCharUnitsFromBits(PAlign).getQuantity());
VTable->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ // In MS C++ if you have a class with virtual functions in which you are using
+ // selective member import/export, then all virtual functions must be exported
+ // unless they are inline, otherwise a link error will result. To match this
+ // behavior, for such classes, we dllimport the vtable if it is defined
+ // externally and all the non-inline virtual methods are marked dllimport, and
+ // we dllexport the vtable if it is defined in this TU and all the non-inline
+ // virtual methods are marked dllexport.
+ if (CGM.getTarget().hasPS4DLLImportExport()) {
+ if ((!RD->hasAttr<DLLImportAttr>()) && (!RD->hasAttr<DLLExportAttr>())) {
+ if (CGM.getVTables().isVTableExternal(RD)) {
+ if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ } else {
+ if (CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD))
+ VTable->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
+ }
+ }
+ }
CGM.setGVProperties(VTable, RD);
return VTable;
@@ -1880,9 +1955,10 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
Address This,
llvm::Type *Ty,
SourceLocation Loc) {
+ llvm::Type *TyPtr = Ty->getPointerTo();
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
llvm::Value *VTable = CGF.GetVTablePtr(
- This, Ty->getPointerTo()->getPointerTo(), MethodDecl->getParent());
+ This, TyPtr->getPointerTo(), MethodDecl->getParent());
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
llvm::Value *VFunc;
@@ -1899,14 +1975,15 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *Load = CGF.Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::load_relative, {CGM.Int32Ty}),
{VTable, llvm::ConstantInt::get(CGM.Int32Ty, 4 * VTableIndex)});
- VFuncLoad = CGF.Builder.CreateBitCast(Load, Ty->getPointerTo());
+ VFuncLoad = CGF.Builder.CreateBitCast(Load, TyPtr);
} else {
VTable =
- CGF.Builder.CreateBitCast(VTable, Ty->getPointerTo()->getPointerTo());
- llvm::Value *VTableSlotPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ CGF.Builder.CreateBitCast(VTable, TyPtr->getPointerTo());
+ llvm::Value *VTableSlotPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ TyPtr, VTable, VTableIndex, "vfn");
VFuncLoad =
- CGF.Builder.CreateAlignedLoad(VTableSlotPtr, CGF.getPointerAlign());
+ CGF.Builder.CreateAlignedLoad(TyPtr, VTableSlotPtr,
+ CGF.getPointerAlign());
}
// Add !invariant.load md to virtual function load to indicate that
@@ -2044,14 +2121,15 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
llvm::Value *Offset;
- llvm::Value *OffsetPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+ llvm::Value *OffsetPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ CGF.Int8Ty, VTablePtr, VirtualAdjustment);
if (CGF.CGM.getItaniumVTableContext().isRelativeLayout()) {
// Load the adjustment offset from the vtable as a 32-bit int.
OffsetPtr =
CGF.Builder.CreateBitCast(OffsetPtr, CGF.Int32Ty->getPointerTo());
Offset =
- CGF.Builder.CreateAlignedLoad(OffsetPtr, CharUnits::fromQuantity(4));
+ CGF.Builder.CreateAlignedLoad(CGF.Int32Ty, OffsetPtr,
+ CharUnits::fromQuantity(4));
} else {
llvm::Type *PtrDiffTy =
CGF.ConvertType(CGF.getContext().getPointerDiffType());
@@ -2060,10 +2138,12 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
// Load the adjustment offset from the vtable.
- Offset = CGF.Builder.CreateAlignedLoad(OffsetPtr, CGF.getPointerAlign());
+ Offset = CGF.Builder.CreateAlignedLoad(PtrDiffTy, OffsetPtr,
+ CGF.getPointerAlign());
}
// Adjust our pointer.
- ResultPtr = CGF.Builder.CreateInBoundsGEP(V.getPointer(), Offset);
+ ResultPtr = CGF.Builder.CreateInBoundsGEP(
+ V.getElementType(), V.getPointer(), Offset);
} else {
ResultPtr = V.getPointer();
}
@@ -2071,7 +2151,7 @@ static llvm::Value *performTypeAdjustment(CodeGenFunction &CGF,
// In a derived-to-base conversion, the non-virtual adjustment is
// applied second.
if (NonVirtualAdjustment && IsReturnAdjustment) {
- ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(ResultPtr,
+ ResultPtr = CGF.Builder.CreateConstInBoundsGEP1_64(CGF.Int8Ty, ResultPtr,
NonVirtualAdjustment);
}
@@ -2472,7 +2552,10 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
CGF.EmitNounwindRuntimeCall(getGuardReleaseFn(CGM, guardPtrTy),
guardAddr.getPointer());
} else {
- Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guardAddr);
+ // Store 1 into the first byte of the guard variable after initialization is
+ // complete.
+ Builder.CreateStore(llvm::ConstantInt::get(CGM.Int8Ty, 1),
+ Builder.CreateElementBitCast(guardAddr, CGM.Int8Ty));
}
CGF.EmitBlock(EndBlock);
@@ -2482,6 +2565,8 @@ void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
llvm::FunctionCallee dtor,
llvm::Constant *addr, bool TLS) {
+ assert(!CGF.getTarget().getTriple().isOSAIX() &&
+ "unexpected call to emitGlobalDtorWithCXAAtExit");
assert((TLS || CGF.getTypes().getCodeGenOpts().CXAAtExit) &&
"__cxa_atexit is disabled");
const char *Name = "__cxa_atexit";
@@ -2542,15 +2627,6 @@ static llvm::Function *createGlobalInitOrCleanupFn(CodeGen::CodeGenModule &CGM,
return GlobalInitOrCleanupFn;
}
-static FunctionDecl *
-createGlobalInitOrCleanupFnDecl(CodeGen::CodeGenModule &CGM, StringRef FnName) {
- ASTContext &Ctx = CGM.getContext();
- QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, llvm::None, {});
- return FunctionDecl::Create(
- Ctx, Ctx.getTranslationUnitDecl(), SourceLocation(), SourceLocation(),
- &Ctx.Idents.get(FnName), FunctionTy, nullptr, SC_Static, false, false);
-}
-
void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
for (const auto &I : DtorsUsingAtExit) {
int Priority = I.first;
@@ -2560,13 +2636,11 @@ void CodeGenModule::unregisterGlobalDtorsWithUnAtExit() {
llvm::Function *GlobalCleanupFn =
createGlobalInitOrCleanupFn(*this, GlobalCleanupFnName);
- FunctionDecl *GlobalCleanupFD =
- createGlobalInitOrCleanupFnDecl(*this, GlobalCleanupFnName);
-
CodeGenFunction CGF(*this);
- CGF.StartFunction(GlobalDecl(GlobalCleanupFD), getContext().VoidTy,
- GlobalCleanupFn, getTypes().arrangeNullaryFunction(),
- FunctionArgList(), SourceLocation(), SourceLocation());
+ CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalCleanupFn,
+ getTypes().arrangeNullaryFunction(), FunctionArgList(),
+ SourceLocation(), SourceLocation());
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
// Get the destructor function type, void(*)(void).
llvm::FunctionType *dtorFuncTy = llvm::FunctionType::get(CGF.VoidTy, false);
@@ -2619,13 +2693,12 @@ void CodeGenModule::registerGlobalDtorsWithAtExit() {
std::string("__GLOBAL_init_") + llvm::to_string(Priority);
llvm::Function *GlobalInitFn =
createGlobalInitOrCleanupFn(*this, GlobalInitFnName);
- FunctionDecl *GlobalInitFD =
- createGlobalInitOrCleanupFnDecl(*this, GlobalInitFnName);
CodeGenFunction CGF(*this);
- CGF.StartFunction(GlobalDecl(GlobalInitFD), getContext().VoidTy,
- GlobalInitFn, getTypes().arrangeNullaryFunction(),
- FunctionArgList(), SourceLocation(), SourceLocation());
+ CGF.StartFunction(GlobalDecl(), getContext().VoidTy, GlobalInitFn,
+ getTypes().arrangeNullaryFunction(), FunctionArgList(),
+ SourceLocation(), SourceLocation());
+ auto AL = ApplyDebugLocation::CreateArtificial(CGF);
// Since constructor functions are run in non-descending order of their
// priorities, destructors are registered in non-descending order of their
@@ -2742,7 +2815,7 @@ ItaniumCXXABI::getOrCreateThreadLocalWrapper(const VarDecl *VD,
if (CGM.supportsCOMDAT() && Wrapper->isWeakForLinker())
Wrapper->setComdat(CGM.getModule().getOrInsertComdat(Wrapper->getName()));
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper);
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Wrapper, /*IsThunk=*/false);
// Always resolve references to the wrapper at link time.
if (!Wrapper->hasLocalLinkage())
@@ -2875,8 +2948,8 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::GlobalVariable::ExternalWeakLinkage,
InitFnName.str(), &CGM.getModule());
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
- CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
- cast<llvm::Function>(Init));
+ CGM.SetLLVMFunctionAttributes(
+ GlobalDecl(), FI, cast<llvm::Function>(Init), /*IsThunk=*/false);
}
if (Init) {
@@ -2887,6 +2960,33 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
}
llvm::LLVMContext &Context = CGM.getModule().getContext();
+
+ // The linker on AIX is not happy with missing weak symbols. However,
+ // other TUs will not know whether the initialization routine exists
+ // so create an empty, init function to satisfy the linker.
+ // This is needed whenever a thread wrapper function is not used, and
+ // also when the symbol is weak.
+ if (CGM.getTriple().isOSAIX() && VD->hasDefinition() &&
+ isEmittedWithConstantInitializer(VD, true) &&
+ !VD->needsDestruction(getContext())) {
+ // Init should be null. If it were non-null, then the logic above would
+ // either be defining the function to be an alias or declaring the
+ // function with the expectation that the definition of the variable
+ // is elsewhere.
+ assert(Init == nullptr && "Expected Init to be null.");
+
+ llvm::Function *Func = llvm::Function::Create(
+ InitFnTy, Var->getLinkage(), InitFnName.str(), &CGM.getModule());
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
+ CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI,
+ cast<llvm::Function>(Func),
+ /*IsThunk=*/false);
+ // Create a function body that just returns
+ llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Func);
+ CGBuilderTy Builder(CGM, Entry);
+ Builder.CreateRetVoid();
+ }
+
llvm::BasicBlock *Entry = llvm::BasicBlock::Create(Context, "", Wrapper);
CGBuilderTy Builder(CGM, Entry);
if (HasConstantInitialization) {
@@ -2901,6 +3001,15 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
Fn->setCallingConv(llvm::CallingConv::CXX_FAST_TLS);
}
}
+ } else if (CGM.getTriple().isOSAIX()) {
+ // On AIX, except if constinit and also neither of class type or of
+ // (possibly multi-dimensional) array of class type, thread_local vars
+ // will have init routines regardless of whether they are
+ // const-initialized. Since the routine is guaranteed to exist, we can
+ // unconditionally call it without testing for its existance. This
+ // avoids potentially unresolved weak symbols which the AIX linker
+ // isn't happy with.
+ Builder.CreateCall(InitFnTy, Init);
} else {
// Don't know whether we have an init function. Call it if it exists.
llvm::Value *Have = Builder.CreateIsNotNull(Init);
@@ -2920,7 +3029,7 @@ void ItaniumCXXABI::EmitThreadLocalInitFuncs(
llvm::Value *Val = Var;
if (VD->getType()->isReferenceType()) {
CharUnits Align = CGM.getContext().getDeclAlign(VD);
- Val = Builder.CreateAlignedLoad(Val, Align);
+ Val = Builder.CreateAlignedLoad(Var->getValueType(), Var, Align);
}
if (Val->getType() != Wrapper->getReturnType())
Val = Builder.CreatePointerBitCastOrAddrSpaceCast(
@@ -3111,6 +3220,14 @@ ItaniumRTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
Name);
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
CGM.setGVProperties(GV, RD);
+ // Import the typeinfo symbol when all non-inline virtual methods are
+ // imported.
+ if (CGM.getTarget().hasPS4DLLImportExport()) {
+ if (RD && CXXRecordAllNonInlineVirtualsHaveAttr<DLLImportAttr>(RD)) {
+ GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
+ CGM.setDSOLocal(GV);
+ }
+ }
}
return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
@@ -3181,6 +3298,8 @@ static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -3285,11 +3404,14 @@ static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM,
if (CGM.getTriple().isWindowsGNUEnvironment())
return false;
- if (CGM.getVTables().isVTableExternal(RD))
+ if (CGM.getVTables().isVTableExternal(RD)) {
+ if (CGM.getTarget().hasPS4DLLImportExport())
+ return true;
+
return IsDLLImport && !CGM.getTriple().isWindowsItaniumEnvironment()
? false
: true;
-
+ }
if (IsDLLImport)
return true;
}
@@ -3741,6 +3863,18 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
new llvm::GlobalVariable(M, Init->getType(),
/*isConstant=*/true, Linkage, Init, Name);
+ // Export the typeinfo in the same circumstances as the vtable is exported.
+ auto GVDLLStorageClass = DLLStorageClass;
+ if (CGM.getTarget().hasPS4DLLImportExport()) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (RD->hasAttr<DLLExportAttr>() ||
+ CXXRecordAllNonInlineVirtualsHaveAttr<DLLExportAttr>(RD)) {
+ GVDLLStorageClass = llvm::GlobalVariable::DLLExportStorageClass;
+ }
+ }
+ }
+
// If there's already an old global variable, replace it with the new one.
if (OldGV) {
GV->takeName(OldGV);
@@ -3779,7 +3913,9 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(
CGM.setDSOLocal(GV);
TypeName->setDLLStorageClass(DLLStorageClass);
- GV->setDLLStorageClass(DLLStorageClass);
+ GV->setDLLStorageClass(CGM.getTarget().hasPS4DLLImportExport()
+ ? GVDLLStorageClass
+ : DLLStorageClass);
TypeName->setPartition(CGM.getCodeGenOpts().SymbolPartition);
GV->setPartition(CGM.getCodeGenOpts().SymbolPartition);
@@ -4360,7 +4496,8 @@ static void InitCatchParam(CodeGenFunction &CGF,
// we have to skip past in order to reach the exception data.
unsigned HeaderSize =
CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
- AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+ AdjustedExn =
+ CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, Exn, HeaderSize);
// However, if we're catching a pointer-to-record type that won't
// work, because the personality function might have adjusted
@@ -4621,22 +4758,57 @@ void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF,
ItaniumCXXABI::emitBeginCatch(CGF, C);
}
+llvm::CallInst *
+WebAssemblyCXXABI::emitTerminateForUnexpectedException(CodeGenFunction &CGF,
+ llvm::Value *Exn) {
+ // Itanium ABI calls __clang_call_terminate(), which __cxa_begin_catch() on
+ // the violating exception to mark it handled, but it is currently hard to do
+ // with wasm EH instruction structure with catch/catch_all, we just call
+ // std::terminate and ignore the violating exception as in CGCXXABI.
+ // TODO Consider code transformation that makes calling __clang_call_terminate
+ // possible.
+ return CGCXXABI::emitTerminateForUnexpectedException(CGF, Exn);
+}
+
/// Register a global destructor as best as we know how.
void XLCXXABI::registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::FunctionCallee dtor,
- llvm::Constant *addr) {
- if (D.getTLSKind() != VarDecl::TLS_None)
- llvm::report_fatal_error("thread local storage not yet implemented on AIX");
+ llvm::FunctionCallee Dtor,
+ llvm::Constant *Addr) {
+ if (D.getTLSKind() != VarDecl::TLS_None) {
+ // atexit routine expects "int(*)(int,...)"
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.IntTy, CGM.IntTy, true);
+ llvm::PointerType *FpTy = FTy->getPointerTo();
+
+ // extern "C" int __pt_atexit_np(int flags, int(*)(int,...), ...);
+ llvm::FunctionType *AtExitTy =
+ llvm::FunctionType::get(CGM.IntTy, {CGM.IntTy, FpTy}, true);
+
+ // Fetch the actual function.
+ llvm::FunctionCallee AtExit =
+ CGM.CreateRuntimeFunction(AtExitTy, "__pt_atexit_np");
+
+ // Create __dtor function for the var decl.
+ llvm::Function *DtorStub = CGF.createTLSAtExitStub(D, Dtor, Addr, AtExit);
+
+ // Register above __dtor with atexit().
+ // First param is flags and must be 0, second param is function ptr
+ llvm::Value *NV = llvm::Constant::getNullValue(CGM.IntTy);
+ CGF.EmitNounwindRuntimeCall(AtExit, {NV, DtorStub});
+
+ // Cannot unregister TLS __dtor so done
+ return;
+ }
// Create __dtor function for the var decl.
- llvm::Function *dtorStub = CGF.createAtExitStub(D, dtor, addr);
+ llvm::Function *DtorStub = CGF.createAtExitStub(D, Dtor, Addr);
// Register above __dtor with atexit().
- CGF.registerGlobalDtorWithAtExit(dtorStub);
+ CGF.registerGlobalDtorWithAtExit(DtorStub);
// Emit __finalize function to unregister __dtor and (as appropriate) call
// __dtor.
- emitCXXStermFinalizer(D, dtorStub, addr);
+ emitCXXStermFinalizer(D, DtorStub, Addr);
}
void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
@@ -4686,16 +4858,17 @@ void XLCXXABI::emitCXXStermFinalizer(const VarDecl &D, llvm::Function *dtorStub,
CGF.FinishFunction();
- assert(!D.getAttr<InitPriorityAttr>() &&
- "Prioritized sinit and sterm functions are not yet supported.");
-
- if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
- getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR)
+ if (auto *IPA = D.getAttr<InitPriorityAttr>()) {
+ CGM.AddCXXPrioritizedStermFinalizerEntry(StermFinalizer,
+ IPA->getPriority());
+ } else if (isTemplateInstantiation(D.getTemplateSpecializationKind()) ||
+ getContext().GetGVALinkageForVariable(&D) == GVA_DiscardableODR) {
// According to C++ [basic.start.init]p2, class template static data
// members (i.e., implicitly or explicitly instantiated specializations)
// have unordered initialization. As a consequence, we can put them into
// their own llvm.global_dtors entry.
CGM.AddCXXStermFinalizerToGlobalDtor(StermFinalizer, 65535);
- else
+ } else {
CGM.AddCXXStermFinalizerEntry(StermFinalizer);
+ }
}
diff --git a/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index cb0dc1d5d717..990648b131fe 100644
--- a/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -131,7 +131,12 @@ public:
/// MSVC needs an extra flag to indicate a catchall.
CatchTypeInfo getCatchAllTypeInfo() override {
- return CatchTypeInfo{nullptr, 0x40};
+ // For -EHa catch(...) must handle HW exception
+ // Adjective = HT_IsStdDotDot (0x40), only catch C++ exceptions
+ if (getContext().getLangOpts().EHAsynch)
+ return CatchTypeInfo{nullptr, 0};
+ else
+ return CatchTypeInfo{nullptr, 0x40};
}
bool shouldTypeidBeNullChecked(bool IsDeref, QualType SrcRecordTy) override;
@@ -937,7 +942,8 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
llvm::Value *Offset =
GetVirtualBaseClassOffset(CGF, Value, SrcDecl, PolymorphicBase);
- llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(Value.getPointer(), Offset);
+ llvm::Value *Ptr = CGF.Builder.CreateInBoundsGEP(
+ Value.getElementType(), Value.getPointer(), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
return std::make_tuple(Address(Ptr, VBaseAlign), Offset, PolymorphicBase);
@@ -1219,9 +1225,10 @@ void MicrosoftCXXABI::initializeHiddenVirtualInheritanceMembers(
if (!Int8This)
Int8This = Builder.CreateBitCast(getThisValue(CGF),
CGF.Int8Ty->getPointerTo(AS));
- llvm::Value *VtorDispPtr = Builder.CreateInBoundsGEP(Int8This, VBaseOffset);
+ llvm::Value *VtorDispPtr =
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, Int8This, VBaseOffset);
// vtorDisp is always the 32-bits before the vbase in the class layout.
- VtorDispPtr = Builder.CreateConstGEP1_32(VtorDispPtr, -4);
+ VtorDispPtr = Builder.CreateConstGEP1_32(CGF.Int8Ty, VtorDispPtr, -4);
VtorDispPtr = Builder.CreateBitCast(
VtorDispPtr, CGF.Int32Ty->getPointerTo(AS), "vtordisp.ptr");
@@ -1457,8 +1464,8 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
const CXXRecordDecl *VBase = ML.VBase;
llvm::Value *VBaseOffset =
GetVirtualBaseClassOffset(CGF, Result, Derived, VBase);
- llvm::Value *VBasePtr =
- CGF.Builder.CreateInBoundsGEP(Result.getPointer(), VBaseOffset);
+ llvm::Value *VBasePtr = CGF.Builder.CreateInBoundsGEP(
+ Result.getElementType(), Result.getPointer(), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
Result = Address(VBasePtr, VBaseAlign);
@@ -1911,12 +1918,13 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
SourceLocation Loc) {
CGBuilderTy &Builder = CGF.Builder;
- Ty = Ty->getPointerTo()->getPointerTo();
+ Ty = Ty->getPointerTo();
Address VPtr =
adjustThisArgumentForVirtualFunctionCall(CGF, GD, This, true);
auto *MethodDecl = cast<CXXMethodDecl>(GD.getDecl());
- llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty, MethodDecl->getParent());
+ llvm::Value *VTable = CGF.GetVTablePtr(VPtr, Ty->getPointerTo(),
+ MethodDecl->getParent());
MicrosoftVTableContext &VFTContext = CGM.getMicrosoftVTableContext();
MethodVFTableLocation ML = VFTContext.getMethodVFTableLocation(GD);
@@ -1943,8 +1951,8 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
CGF.EmitTypeMetadataCodeForVCall(getObjectWithVPtr(), VTable, Loc);
llvm::Value *VFuncPtr =
- Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
- VFunc = Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+ Builder.CreateConstInBoundsGEP1_64(Ty, VTable, ML.Index, "vfn");
+ VFunc = Builder.CreateAlignedLoad(Ty, VFuncPtr, CGF.getPointerAlign());
}
CGCallee Callee(GD, VFunc);
@@ -2043,7 +2051,7 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
if (MD->isExternallyVisible())
ThunkFn->setComdat(CGM.getModule().getOrInsertComdat(ThunkFn->getName()));
- CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn);
+ CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/false);
CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn);
// Add the "thunk" attribute so that LLVM knows that the return type is
@@ -2072,13 +2080,14 @@ MicrosoftCXXABI::EmitVirtualMemPtrThunk(const CXXMethodDecl *MD,
// Load the vfptr and then callee from the vftable. The callee should have
// adjusted 'this' so that the vfptr is at offset zero.
+ llvm::Type *ThunkPtrTy = ThunkTy->getPointerTo();
llvm::Value *VTable = CGF.GetVTablePtr(
- getThisAddress(CGF), ThunkTy->getPointerTo()->getPointerTo(), MD->getParent());
+ getThisAddress(CGF), ThunkPtrTy->getPointerTo(), MD->getParent());
- llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, ML.Index, "vfn");
+ llvm::Value *VFuncPtr = CGF.Builder.CreateConstInBoundsGEP1_64(
+ ThunkPtrTy, VTable, ML.Index, "vfn");
llvm::Value *Callee =
- CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.getPointerAlign());
+ CGF.Builder.CreateAlignedLoad(ThunkPtrTy, VFuncPtr, CGF.getPointerAlign());
CGF.EmitMustTailThunk(MD, getThisValue(CGF), {ThunkTy, Callee});
@@ -2163,8 +2172,7 @@ void MicrosoftCXXABI::emitVBTableDefinition(const VPtrInfo &VBT,
}
assert(Offsets.size() ==
- cast<llvm::ArrayType>(cast<llvm::PointerType>(GV->getType())
- ->getElementType())->getNumElements());
+ cast<llvm::ArrayType>(GV->getValueType())->getNumElements());
llvm::ArrayType *VBTableType =
llvm::ArrayType::get(CGM.IntTy, Offsets.size());
llvm::Constant *Init = llvm::ConstantArray::get(VBTableType, Offsets);
@@ -2193,7 +2201,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
CharUnits::fromQuantity(TA.Virtual.Microsoft.VtordispOffset));
VtorDispPtr = CGF.Builder.CreateElementBitCast(VtorDispPtr, CGF.Int32Ty);
llvm::Value *VtorDisp = CGF.Builder.CreateLoad(VtorDispPtr, "vtordisp");
- V = CGF.Builder.CreateGEP(This.getPointer(),
+ V = CGF.Builder.CreateGEP(This.getElementType(), This.getPointer(),
CGF.Builder.CreateNeg(VtorDisp));
// Unfortunately, having applied the vtordisp means that we no
@@ -2211,7 +2219,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
-TA.Virtual.Microsoft.VBPtrOffset,
TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
- V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
+ V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, VBPtr, VBaseOffset);
}
}
@@ -2219,7 +2227,7 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
// Non-virtual adjustment might result in a pointer outside the allocated
// object, e.g. if the final overrider class is laid out after the virtual
// base that declares a method in the most derived class.
- V = CGF.Builder.CreateConstGEP1_32(V, TA.NonVirtual);
+ V = CGF.Builder.CreateConstGEP1_32(CGF.Int8Ty, V, TA.NonVirtual);
}
// Don't need to bitcast back, the call CodeGen will handle this.
@@ -2243,7 +2251,7 @@ MicrosoftCXXABI::performReturnAdjustment(CodeGenFunction &CGF, Address Ret,
llvm::Value *VBaseOffset =
GetVBaseOffsetFromVBPtr(CGF, Ret, RA.Virtual.Microsoft.VBPtrOffset,
IntSize * RA.Virtual.Microsoft.VBIndex, &VBPtr);
- V = CGF.Builder.CreateInBoundsGEP(VBPtr, VBaseOffset);
+ V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, VBPtr, VBaseOffset);
}
if (RA.NonVirtual)
@@ -3008,8 +3016,8 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
CGBuilderTy &Builder = CGF.Builder;
// Load the vbtable pointer from the vbptr in the instance.
This = Builder.CreateElementBitCast(This, CGM.Int8Ty);
- llvm::Value *VBPtr =
- Builder.CreateInBoundsGEP(This.getPointer(), VBPtrOffset, "vbptr");
+ llvm::Value *VBPtr = Builder.CreateInBoundsGEP(
+ This.getElementType(), This.getPointer(), VBPtrOffset, "vbptr");
if (VBPtrOut) *VBPtrOut = VBPtr;
VBPtr = Builder.CreateBitCast(VBPtr,
CGM.Int32Ty->getPointerTo(0)->getPointerTo(This.getAddressSpace()));
@@ -3022,7 +3030,8 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
VBPtrAlign = CGF.getPointerAlign();
}
- llvm::Value *VBTable = Builder.CreateAlignedLoad(VBPtr, VBPtrAlign, "vbtable");
+ llvm::Value *VBTable = Builder.CreateAlignedLoad(
+ CGM.Int32Ty->getPointerTo(0), VBPtr, VBPtrAlign, "vbtable");
// Translate from byte offset to table index. It improves analyzability.
llvm::Value *VBTableIndex = Builder.CreateAShr(
@@ -3030,10 +3039,11 @@ MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
"vbtindex", /*isExact=*/true);
// Load an i32 offset from the vb-table.
- llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableIndex);
+ llvm::Value *VBaseOffs =
+ Builder.CreateInBoundsGEP(CGM.Int32Ty, VBTable, VBTableIndex);
VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
- return Builder.CreateAlignedLoad(VBaseOffs, CharUnits::fromQuantity(4),
- "vbase_offs");
+ return Builder.CreateAlignedLoad(CGM.Int32Ty, VBaseOffs,
+ CharUnits::fromQuantity(4), "vbase_offs");
}
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
@@ -3080,7 +3090,8 @@ llvm::Value *MicrosoftCXXABI::AdjustVirtualBase(
llvm::Value *VBPtr = nullptr;
llvm::Value *VBaseOffs =
GetVBaseOffsetFromVBPtr(CGF, Base, VBPtrOffset, VBTableOffset, &VBPtr);
- llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
+ llvm::Value *AdjustedBase =
+ Builder.CreateInBoundsGEP(CGM.Int8Ty, VBPtr, VBaseOffs);
// Merge control flow with the case where we didn't have to adjust.
if (VBaseAdjustBB) {
@@ -3132,7 +3143,8 @@ llvm::Value *MicrosoftCXXABI::EmitMemberDataPointerAddress(
Addr = Builder.CreateBitCast(Addr, CGF.Int8Ty->getPointerTo(AS));
// Apply the offset, which we assume is non-null.
- Addr = Builder.CreateInBoundsGEP(Addr, FieldOffset, "memptr.offset");
+ Addr = Builder.CreateInBoundsGEP(CGF.Int8Ty, Addr, FieldOffset,
+ "memptr.offset");
// Cast the address to the appropriate pointer type, adopting the address
// space of the base pointer.
@@ -3294,9 +3306,10 @@ llvm::Value *MicrosoftCXXABI::EmitNonNullMemberPointerConversion(
Mapping->getAggregateElement(cast<llvm::Constant>(VBIndex));
} else {
llvm::Value *Idxs[] = {getZeroInt(), VBIndex};
- VirtualBaseAdjustmentOffset =
- Builder.CreateAlignedLoad(Builder.CreateInBoundsGEP(VDispMap, Idxs),
- CharUnits::fromQuantity(4));
+ VirtualBaseAdjustmentOffset = Builder.CreateAlignedLoad(
+ CGM.IntTy, Builder.CreateInBoundsGEP(VDispMap->getValueType(),
+ VDispMap, Idxs),
+ CharUnits::fromQuantity(4));
}
DstVBIndexEqZero =
@@ -3426,7 +3439,7 @@ CGCallee MicrosoftCXXABI::EmitLoadOfMemberFunctionPointer(
if (NonVirtualBaseAdjustment) {
// Apply the adjustment and cast back to the original struct type.
llvm::Value *Ptr = Builder.CreateBitCast(ThisPtrForCall, CGF.Int8PtrTy);
- Ptr = Builder.CreateInBoundsGEP(Ptr, NonVirtualBaseAdjustment);
+ Ptr = Builder.CreateInBoundsGEP(CGF.Int8Ty, Ptr, NonVirtualBaseAdjustment);
ThisPtrForCall = Builder.CreateBitCast(Ptr, ThisPtrForCall->getType(),
"this.adjusted");
}
@@ -4325,7 +4338,7 @@ llvm::GlobalVariable *MicrosoftCXXABI::getThrowInfo(QualType T) {
};
auto *GV = new llvm::GlobalVariable(
CGM.getModule(), TIType, /*isConstant=*/true, getLinkageForRTTI(T),
- llvm::ConstantStruct::get(TIType, Fields), StringRef(MangledName));
+ llvm::ConstantStruct::get(TIType, Fields), MangledName.str());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
GV->setSection(".xdata");
if (GV->isWeakForLinker())
diff --git a/clang/lib/CodeGen/ModuleBuilder.cpp b/clang/lib/CodeGen/ModuleBuilder.cpp
index 01093cf20c18..b63f756ca288 100644
--- a/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -138,7 +138,7 @@ namespace {
Ctx = &Context;
M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
- M->setDataLayout(Ctx->getTargetInfo().getDataLayout());
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
const auto &SDKVersion = Ctx->getTargetInfo().getSDKVersion();
if (!SDKVersion.empty())
M->setSDKVersion(SDKVersion);
diff --git a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index de5c1a4c8f02..1adf0ad9c0e5 100644
--- a/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -166,7 +166,7 @@ public:
Ctx = &Context;
VMContext.reset(new llvm::LLVMContext());
M.reset(new llvm::Module(MainFileName, *VMContext));
- M->setDataLayout(Ctx->getTargetInfo().getDataLayout());
+ M->setDataLayout(Ctx->getTargetInfo().getDataLayoutString());
Builder.reset(new CodeGen::CodeGenModule(
*Ctx, HeaderSearchOpts, PreprocessorOpts, CodeGenOpts, *M, Diags));
@@ -245,7 +245,7 @@ public:
return;
M->setTargetTriple(Ctx.getTargetInfo().getTriple().getTriple());
- M->setDataLayout(Ctx.getTargetInfo().getDataLayout());
+ M->setDataLayout(Ctx.getTargetInfo().getDataLayoutString());
// PCH files don't have a signature field in the control block,
// but LLVM detects DWO CUs by looking for a non-zero DWO id.
@@ -295,7 +295,7 @@ public:
llvm::SmallString<0> Buffer;
clang::EmitBackendOutput(
Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts, LangOpts,
- Ctx.getTargetInfo().getDataLayout(), M.get(),
+ Ctx.getTargetInfo().getDataLayoutString(), M.get(),
BackendAction::Backend_EmitLL,
std::make_unique<llvm::raw_svector_ostream>(Buffer));
llvm::dbgs() << Buffer;
@@ -303,9 +303,9 @@ public:
// Use the LLVM backend to emit the pch container.
clang::EmitBackendOutput(Diags, HeaderSearchOpts, CodeGenOpts, TargetOpts,
- LangOpts, Ctx.getTargetInfo().getDataLayout(),
- M.get(), BackendAction::Backend_EmitObj,
- std::move(OS));
+ LangOpts,
+ Ctx.getTargetInfo().getDataLayoutString(), M.get(),
+ BackendAction::Backend_EmitObj, std::move(OS));
// Free the memory for the temporary buffer.
llvm::SmallVector<char, 0> Empty;
diff --git a/clang/lib/CodeGen/SanitizerMetadata.cpp b/clang/lib/CodeGen/SanitizerMetadata.cpp
index cdf83370c41f..009965a36c39 100644
--- a/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -1,4 +1,4 @@
-//===--- SanitizerMetadata.cpp - Blacklist for sanitizers -----------------===//
+//===--- SanitizerMetadata.cpp - Ignored entities for sanitizers ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -34,15 +34,15 @@ void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
bool IsExcluded) {
if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
- IsDynInit &= !CGM.isInSanitizerBlacklist(GV, Loc, Ty, "init");
- IsExcluded |= CGM.isInSanitizerBlacklist(GV, Loc, Ty);
+ IsDynInit &= !CGM.isInNoSanitizeList(GV, Loc, Ty, "init");
+ IsExcluded |= CGM.isInNoSanitizeList(GV, Loc, Ty);
llvm::Metadata *LocDescr = nullptr;
llvm::Metadata *GlobalName = nullptr;
llvm::LLVMContext &VMContext = CGM.getLLVMContext();
if (!IsExcluded) {
- // Don't generate source location and global name if it is blacklisted -
- // it won't be instrumented anyway.
+ // Don't generate source location and global name if it is on
+ // the NoSanitizeList - it won't be instrumented anyway.
LocDescr = getLocationMetadata(Loc);
if (!Name.empty())
GlobalName = llvm::MDString::get(VMContext, Name);
diff --git a/clang/lib/CodeGen/TargetInfo.cpp b/clang/lib/CodeGen/TargetInfo.cpp
index bcd24292ff41..a2b68a04d351 100644
--- a/clang/lib/CodeGen/TargetInfo.cpp
+++ b/clang/lib/CodeGen/TargetInfo.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/CodeGenOptions.h"
#include "clang/Basic/DiagnosticFrontend.h"
+#include "clang/Basic/Builtins.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/CodeGen/SwiftCallingConv.h"
#include "llvm/ADT/SmallBitVector.h"
@@ -30,6 +31,7 @@
#include "llvm/ADT/Twine.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/IntrinsicsNVPTX.h"
+#include "llvm/IR/IntrinsicsS390.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm> // std::sort
@@ -1103,6 +1105,7 @@ class X86_32ABIInfo : public SwiftABIInfo {
bool IsWin32StructABI;
bool IsSoftFloatABI;
bool IsMCUABI;
+ bool IsLinuxABI;
unsigned DefaultNumRegisterParameters;
static bool isRegisterSize(unsigned Size) {
@@ -1165,9 +1168,9 @@ public:
unsigned NumRegisterParameters, bool SoftFloatABI)
: SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
IsRetSmallStructInRegABI(RetSmallStructInRegABI),
- IsWin32StructABI(Win32StructABI),
- IsSoftFloatABI(SoftFloatABI),
+ IsWin32StructABI(Win32StructABI), IsSoftFloatABI(SoftFloatABI),
IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
+ IsLinuxABI(CGT.getTarget().getTriple().isOSLinux()),
DefaultNumRegisterParameters(NumRegisterParameters) {}
bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
@@ -1592,6 +1595,14 @@ unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
if (Align <= MinABIStackAlignInBytes)
return 0; // Use default alignment.
+ if (IsLinuxABI) {
+ // Exclude other System V OS (e.g Darwin, PS4 and FreeBSD) since we don't
+ // want to spend any effort dealing with the ramifications of ABI breaks.
+ //
+ // If the vector type is __m128/__m256/__m512, return the default alignment.
+ if (Ty->isVectorType() && (Align == 16 || Align == 32 || Align == 64))
+ return Align;
+ }
// On non-Darwin, the stack type alignment is always 4.
if (!IsDarwinVectorABI) {
// Set explicit alignment, since we may need to realign the top.
@@ -2599,7 +2610,7 @@ static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
bool Quote = (Lib.find(' ') != StringRef::npos);
std::string ArgStr = Quote ? "\"" : "";
ArgStr += Lib;
- if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
+ if (!Lib.endswith_insensitive(".lib") && !Lib.endswith_insensitive(".a"))
ArgStr += ".lib";
ArgStr += Quote ? "\"" : "";
return ArgStr;
@@ -3955,8 +3966,8 @@ static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
llvm::Value *Offset =
llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
- overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
- "overflow_arg_area.next");
+ overflow_arg_area = CGF.Builder.CreateGEP(CGF.Int8Ty, overflow_arg_area,
+ Offset, "overflow_arg_area.next");
CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
// AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
@@ -4050,8 +4061,10 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
"Unexpected ABI info for mixed regs");
llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
- llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
- llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
+ llvm::Value *GPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset);
+ llvm::Value *FPAddr =
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset);
llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
@@ -4070,7 +4083,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
} else if (neededInt) {
- RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, gp_offset),
CharUnits::fromQuantity(8));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
@@ -4088,7 +4101,7 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
} else if (neededSSE == 1) {
- RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ RegAddr = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, fp_offset),
CharUnits::fromQuantity(16));
RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
} else {
@@ -4099,7 +4112,8 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// to assume that the slots are 16-byte aligned, since the stack is
// naturally 16-byte aligned and the prologue is expected to store
// all the SSE registers to the RSA.
- Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
+ Address RegAddrLo = Address(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea,
+ fp_offset),
CharUnits::fromQuantity(16));
Address RegAddrHi =
CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
@@ -4150,7 +4164,12 @@ Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
/*allowHigherAlign*/ false);
@@ -4347,15 +4366,10 @@ void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
-
- bool IsIndirect = false;
-
// MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
// not 1, 2, 4, or 8 bytes, must be passed by reference."
- if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
- uint64_t Width = getContext().getTypeSize(Ty);
- IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
- }
+ uint64_t Width = getContext().getTypeSize(Ty);
+ bool IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
@@ -4561,10 +4575,6 @@ Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
if (Ty->isAnyComplexType())
llvm::report_fatal_error("complex type is not supported on AIX yet");
- if (Ty->isVectorType())
- llvm::report_fatal_error(
- "vector types are not yet supported for variadic functions on AIX");
-
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
TypeInfo.Align = getParamTypeAlignment(Ty);
@@ -5411,7 +5421,8 @@ private:
bool isDarwinPCS() const { return Kind == DarwinPCS; }
ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
- ABIArgInfo classifyArgumentType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, bool IsVariadic,
+ unsigned CallingConvention) const;
ABIArgInfo coerceIllegalVector(QualType Ty) const;
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
@@ -5425,7 +5436,8 @@ private:
classifyReturnType(FI.getReturnType(), FI.isVariadic());
for (auto &it : FI.arguments())
- it.info = classifyArgumentType(it.type);
+ it.info = classifyArgumentType(it.type, FI.isVariadic(),
+ FI.getCallingConvention());
}
Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
@@ -5628,7 +5640,9 @@ ABIArgInfo AArch64ABIInfo::coerceIllegalVector(QualType Ty) const {
return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
}
-ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
+ABIArgInfo
+AArch64ABIInfo::classifyArgumentType(QualType Ty, bool IsVariadic,
+ unsigned CallingConvention) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
// Handle illegal vector types here.
@@ -5674,9 +5688,24 @@ ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
// Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
const Type *Base = nullptr;
uint64_t Members = 0;
- if (isHomogeneousAggregate(Ty, Base, Members)) {
+ bool IsWin64 = Kind == Win64 || CallingConvention == llvm::CallingConv::Win64;
+ bool IsWinVariadic = IsWin64 && IsVariadic;
+ // In variadic functions on Windows, all composite types are treated alike,
+ // no special handling of HFAs/HVAs.
+ if (!IsWinVariadic && isHomogeneousAggregate(Ty, Base, Members)) {
+ if (Kind != AArch64ABIInfo::AAPCS)
+ return ABIArgInfo::getDirect(
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
+
+ // For alignment adjusted HFAs, cap the argument alignment to 16, leave it
+ // default otherwise.
+ unsigned Align =
+ getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
+ Align = (Align > BaseAlign && Align >= 16) ? 16 : 0;
return ABIArgInfo::getDirect(
- llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
+ llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members), 0,
+ nullptr, true, Align);
}
// Aggregates <= 16 bytes are passed directly in registers or on the stack.
@@ -5755,6 +5784,18 @@ ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
if (getTarget().isRenderScriptTarget()) {
return coerceToIntArray(RetTy, getContext(), getVMContext());
}
+
+ if (Size <= 64 && getDataLayout().isLittleEndian()) {
+ // Composite types are returned in lower bits of a 64-bit register for LE,
+ // and in higher bits for BE. However, integer types are always returned
+ // in lower bits for both LE and BE, and they are not rounded up to
+ // 64-bits. We can skip rounding up of composite types for LE, but not for
+ // BE, otherwise composite types will be indistinguishable from integer
+ // types.
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), Size));
+ }
+
unsigned Alignment = getContext().getTypeAlign(RetTy);
Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
@@ -5831,10 +5872,10 @@ bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
-Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
- QualType Ty,
- CodeGenFunction &CGF) const {
- ABIArgInfo AI = classifyArgumentType(Ty);
+Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
+ CGF.CurFnInfo->getCallingConvention());
bool IsIndirect = AI.isIndirect();
llvm::Type *BaseTy = CGF.ConvertType(Ty);
@@ -5948,7 +5989,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
Address reg_top_p =
CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
- Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
+ Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
CharUnits::fromQuantity(IsFPR ? 16 : 8));
Address RegAddr = Address::invalid();
llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
@@ -6046,8 +6087,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
StackSize = TySize.alignTo(StackSlotSize);
llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
- llvm::Value *NewStack =
- CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
+ llvm::Value *NewStack = CGF.Builder.CreateInBoundsGEP(
+ CGF.Int8Ty, OnStackPtr, StackSizeC, "new_stack");
// Write the new value of __stack for the next call to va_arg
CGF.Builder.CreateStore(NewStack, stack_p);
@@ -6114,7 +6155,13 @@ Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
QualType Ty) const {
- return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
+ bool IsIndirect = false;
+
+ // Composites larger than 16 bytes are passed by reference.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) > 128)
+ IsIndirect = true;
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
CGF.getContext().getTypeInfoInChars(Ty),
CharUnits::fromQuantity(8),
/*allowHigherAlign*/ false);
@@ -6396,7 +6443,16 @@ ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
}
}
- return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
+ unsigned Align = 0;
+ if (getABIKind() == ARMABIInfo::AAPCS ||
+ getABIKind() == ARMABIInfo::AAPCS_VFP) {
+ // For alignment adjusted HFAs, cap the argument alignment to 8, leave it
+ // default otherwise.
+ Align = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
+ unsigned BaseAlign = getContext().getTypeAlignInChars(Base).getQuantity();
+ Align = (Align > BaseAlign && Align >= 8) ? 8 : 0;
+ }
+ return ABIArgInfo::getDirect(nullptr, 0, nullptr, false, Align);
}
ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
@@ -7200,8 +7256,49 @@ public:
SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
: TargetCodeGenInfo(
std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
-};
+ llvm::Value *testFPKind(llvm::Value *V, unsigned BuiltinID,
+ CGBuilderTy &Builder,
+ CodeGenModule &CGM) const override {
+ assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
+ // Only use TDC in constrained FP mode.
+ if (!Builder.getIsFPConstrained())
+ return nullptr;
+
+ llvm::Type *Ty = V->getType();
+ if (Ty->isFloatTy() || Ty->isDoubleTy() || Ty->isFP128Ty()) {
+ llvm::Module &M = CGM.getModule();
+ auto &Ctx = M.getContext();
+ llvm::Function *TDCFunc =
+ llvm::Intrinsic::getDeclaration(&M, llvm::Intrinsic::s390_tdc, Ty);
+ unsigned TDCBits = 0;
+ switch (BuiltinID) {
+ case Builtin::BI__builtin_isnan:
+ TDCBits = 0xf;
+ break;
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
+ case Builtin::BI__builtin_isfinite:
+ TDCBits = 0xfc0;
+ break;
+ case Builtin::BI__builtin_isinf:
+ TDCBits = 0x30;
+ break;
+ default:
+ break;
+ }
+ if (TDCBits)
+ return Builder.CreateCall(
+ TDCFunc,
+ {V, llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), TDCBits)});
+ }
+ return nullptr;
+ }
+};
}
bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
@@ -7366,7 +7463,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
+ OverflowArgArea.getPointer(), PaddedSizeV,
"overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
@@ -7416,7 +7514,7 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
llvm::Value *RegSaveArea =
CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
+ Address RawRegAddr(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset,
"raw_reg_addr"),
PaddedSize);
Address RegAddr =
@@ -7445,7 +7543,8 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Update overflow_arg_area_ptr pointer
llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
+ CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
+ OverflowArgArea.getPointer(), PaddedSizeV,
"overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
CGF.EmitBranch(ContBlock);
@@ -8023,14 +8122,89 @@ MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
}
//===----------------------------------------------------------------------===//
-// AVR ABI Implementation.
+// M68k ABI Implementation
//===----------------------------------------------------------------------===//
namespace {
+
+class M68kTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ M68kTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+ void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const override;
+};
+
+} // namespace
+
+void M68kTargetCodeGenInfo::setTargetAttributes(
+ const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
+ if (const auto *attr = FD->getAttr<M68kInterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::M68k_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() / 2;
+ llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
+ "__isr_" + Twine(Num), F);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// AVR ABI Implementation. Documented at
+// https://gcc.gnu.org/wiki/avr-gcc#Calling_Convention
+// https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AVRABIInfo : public DefaultABIInfo {
+public:
+ AVRABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType Ty) const {
+ // A return struct with size less than or equal to 8 bytes is returned
+ // directly via registers R18-R25.
+ if (isAggregateTypeForABI(Ty) && getContext().getTypeSize(Ty) <= 64)
+ return ABIArgInfo::getDirect();
+ else
+ return DefaultABIInfo::classifyReturnType(Ty);
+ }
+
+ // Just copy the original implementation of DefaultABIInfo::computeInfo(),
+ // since DefaultABIInfo::classify{Return,Argument}Type() are not virtual.
+ void computeInfo(CGFunctionInfo &FI) const override {
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (auto &I : FI.arguments())
+ I.info = classifyArgumentType(I.type);
+ }
+};
+
class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
AVRTargetCodeGenInfo(CodeGenTypes &CGT)
- : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
+ : TargetCodeGenInfo(std::make_unique<AVRABIInfo>(CGT)) {}
+
+ LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
+ const VarDecl *D) const override {
+ // Check if a global/static variable is defined within address space 1
+ // but not constant.
+ LangAS AS = D->getType().getAddressSpace();
+ if (isTargetAddressSpace(AS) && toTargetAddressSpace(AS) == 1 &&
+ !D->getType().isConstQualified())
+ CGM.getDiags().Report(D->getLocation(),
+ diag::err_verify_nonconst_addrspace)
+ << "__flash";
+ return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
+ }
void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &CGM) const override {
@@ -8301,7 +8475,7 @@ Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
// Add offset to the current pointer to access the argument.
__overflow_area_pointer =
- CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __overflow_area_pointer, Offset);
llvm::Value *AsInt =
CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
@@ -8324,7 +8498,8 @@ Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
__overflow_area_pointer = CGF.Builder.CreateGEP(
- __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, Offset),
"__overflow_area_pointer.next");
CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
@@ -8355,7 +8530,7 @@ Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr = Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
+ CGF.Int8Ty, Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
Builder.CreateStore(NextAddr, VAListAddrAsBPP);
return AddrTyped;
@@ -8427,7 +8602,7 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
}
llvm::Value *__new_saved_reg_area_pointer =
- CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
+ CGF.Builder.CreateGEP(CGF.Int8Ty, __current_saved_reg_area_pointer,
llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
"__new_saved_reg_area_pointer");
@@ -8483,7 +8658,8 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
// Get the pointer for next argument in overflow area and store it
// to overflow area pointer.
llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
- __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
+ CGF.Int8Ty, __overflow_area_pointer,
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
"__overflow_area_pointer.next");
CGF.Builder.CreateStore(__new_overflow_area_pointer,
@@ -8998,9 +9174,13 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
assert(Max == 0 && "Max must be zero");
} else if (IsOpenCLKernel || IsHIPKernel) {
// By default, restrict the maximum size to a value specified by
- // --gpu-max-threads-per-block=n or its default value.
+ // --gpu-max-threads-per-block=n or its default value for HIP.
+ const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
+ const unsigned DefaultMaxWorkGroupSize =
+ IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
+ : M.getLangOpts().GPUMaxThreadsPerBlock;
std::string AttrVal =
- std::string("1,") + llvm::utostr(M.getLangOpts().GPUMaxThreadsPerBlock);
+ std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
}
@@ -9039,6 +9219,9 @@ void AMDGPUTargetCodeGenInfo::setTargetAttributes(
if (M.getContext().getTargetInfo().allowAMDGPUUnsafeFPAtomics())
F->addFnAttr("amdgpu-unsafe-fp-atomics", "true");
+
+ if (!getABIInfo().getCodeGenOpts().EmitIEEENaNCompliantInsts)
+ F->addFnAttr("amdgpu-ieee", "false");
}
unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
@@ -9941,6 +10124,12 @@ class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
public:
SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
: TargetCodeGenInfo(std::make_unique<SPIRABIInfo>(CGT)) {}
+
+ LangAS getASTAllocaAddressSpace() const override {
+ return getLangASFromTargetAS(
+ getABIInfo().getDataLayout().getAllocaAddrSpace());
+ }
+
unsigned getOpenCLKernelCallingConv() const override;
};
@@ -10620,8 +10809,8 @@ ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
llvm::Type *Field2Ty = nullptr;
CharUnits Field1Off = CharUnits::Zero();
CharUnits Field2Off = CharUnits::Zero();
- int NeededArgGPRs;
- int NeededArgFPRs;
+ int NeededArgGPRs = 0;
+ int NeededArgFPRs = 0;
bool IsCandidate =
detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
NeededArgGPRs, NeededArgFPRs);
@@ -10845,6 +11034,8 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
case llvm::Triple::le32:
return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
+ case llvm::Triple::m68k:
+ return SetCGInfo(new M68kTargetCodeGenInfo(Types));
case llvm::Triple::mips:
case llvm::Triple::mipsel:
if (Triple.getOS() == llvm::Triple::NaCl)
diff --git a/clang/lib/CodeGen/TargetInfo.h b/clang/lib/CodeGen/TargetInfo.h
index 0df9667e91e1..e6e474544fc4 100644
--- a/clang/lib/CodeGen/TargetInfo.h
+++ b/clang/lib/CodeGen/TargetInfo.h
@@ -14,6 +14,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
#define LLVM_CLANG_LIB_CODEGEN_TARGETINFO_H
+#include "CGBuilder.h"
#include "CodeGenModule.h"
#include "CGValue.h"
#include "clang/AST/Type.h"
@@ -126,6 +127,16 @@ public:
return Address;
}
+ /// Performs a target specific test of a floating point value for things
+ /// like IsNaN, Infinity, ... Nullptr is returned if no implementation
+ /// exists.
+ virtual llvm::Value *
+ testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder,
+ CodeGenModule &CGM) const {
+ assert(V->getType()->isFloatingPointTy() && "V should have an FP type.");
+ return nullptr;
+ }
+
/// Corrects the low-level LLVM type for a given constraint and "usual"
/// type.
///
diff --git a/clang/lib/CodeGen/VarBypassDetector.cpp b/clang/lib/CodeGen/VarBypassDetector.cpp
index f3a172e91c4f..e8717a61ce5e 100644
--- a/clang/lib/CodeGen/VarBypassDetector.cpp
+++ b/clang/lib/CodeGen/VarBypassDetector.cpp
@@ -1,4 +1,4 @@
-//===--- VarBypassDetector.h - Bypass jumps detector --------------*- C++ -*-=//
+//===--- VarBypassDetector.cpp - Bypass jumps detector ------------*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/lib/CodeGen/VarBypassDetector.h b/clang/lib/CodeGen/VarBypassDetector.h
index 8a2e388eae3f..b654eefd963d 100644
--- a/clang/lib/CodeGen/VarBypassDetector.h
+++ b/clang/lib/CodeGen/VarBypassDetector.h
@@ -1,4 +1,4 @@
-//===--- VarBypassDetector.cpp - Bypass jumps detector ------------*- C++ -*-=//
+//===--- VarBypassDetector.h - Bypass jumps detector --------------*- C++ -*-=//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/lib/CrossTU/CrossTranslationUnit.cpp b/clang/lib/CrossTU/CrossTranslationUnit.cpp
index e27779f91abc..0aecad491ecc 100644
--- a/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -92,6 +92,10 @@ public:
std::string message(int Condition) const override {
switch (static_cast<index_error_code>(Condition)) {
+ case index_error_code::success:
+ // There should not be a success error. Jump to unreachable directly.
+ // Add this case to make the compiler stop complaining.
+ break;
case index_error_code::unspecified:
return "An unknown error has occurred.";
case index_error_code::missing_index_file:
@@ -630,7 +634,7 @@ parseInvocationList(StringRef FileContent, llvm::sys::path::Style PathStyle) {
SmallString<32> NativeSourcePath(SourcePath);
llvm::sys::path::native(NativeSourcePath, PathStyle);
- StringRef InvocationKey(NativeSourcePath);
+ StringRef InvocationKey = NativeSourcePath;
if (InvocationList.find(InvocationKey) != InvocationList.end())
return llvm::make_error<IndexError>(
@@ -667,12 +671,15 @@ llvm::Error CrossTranslationUnitContext::ASTLoader::lazyInitInvocationList() {
/// Lazily initialize the invocation list member used for on-demand parsing.
if (InvocationList)
return llvm::Error::success();
+ if (index_error_code::success != PreviousParsingResult)
+ return llvm::make_error<IndexError>(PreviousParsingResult);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> FileContent =
llvm::MemoryBuffer::getFile(InvocationListFilePath);
- if (!FileContent)
- return llvm::make_error<IndexError>(
- index_error_code::invocation_list_file_not_found);
+ if (!FileContent) {
+ PreviousParsingResult = index_error_code::invocation_list_file_not_found;
+ return llvm::make_error<IndexError>(PreviousParsingResult);
+ }
std::unique_ptr<llvm::MemoryBuffer> ContentBuffer = std::move(*FileContent);
assert(ContentBuffer && "If no error was produced after loading, the pointer "
"should not be nullptr.");
@@ -680,8 +687,13 @@ llvm::Error CrossTranslationUnitContext::ASTLoader::lazyInitInvocationList() {
llvm::Expected<InvocationListTy> ExpectedInvocationList =
parseInvocationList(ContentBuffer->getBuffer(), PathStyle);
- if (!ExpectedInvocationList)
- return ExpectedInvocationList.takeError();
+ // Handle the error to store the code for next call to this function.
+ if (!ExpectedInvocationList) {
+ llvm::handleAllErrors(
+ ExpectedInvocationList.takeError(),
+ [&](const IndexError &E) { PreviousParsingResult = E.getCode(); });
+ return llvm::make_error<IndexError>(PreviousParsingResult);
+ }
InvocationList = *ExpectedInvocationList;
@@ -754,31 +766,15 @@ CrossTranslationUnitContext::getOrCreateASTImporter(ASTUnit *Unit) {
ASTImporter *NewImporter = new ASTImporter(
Context, Context.getSourceManager().getFileManager(), From,
From.getSourceManager().getFileManager(), false, ImporterSharedSt);
- NewImporter->setFileIDImportHandler([this, Unit](FileID ToID, FileID FromID) {
- assert(ImportedFileIDs.find(ToID) == ImportedFileIDs.end() &&
- "FileID already imported, should not happen.");
- ImportedFileIDs[ToID] = std::make_pair(FromID, Unit);
- });
ASTUnitImporterMap[From.getTranslationUnitDecl()].reset(NewImporter);
return *NewImporter;
}
-llvm::Optional<std::pair<SourceLocation, ASTUnit *>>
-CrossTranslationUnitContext::getImportedFromSourceLocation(
+llvm::Optional<clang::MacroExpansionContext>
+CrossTranslationUnitContext::getMacroExpansionContextForSourceLocation(
const clang::SourceLocation &ToLoc) const {
- const SourceManager &SM = Context.getSourceManager();
- auto DecToLoc = SM.getDecomposedLoc(ToLoc);
-
- auto I = ImportedFileIDs.find(DecToLoc.first);
- if (I == ImportedFileIDs.end())
- return {};
-
- FileID FromID = I->second.first;
- clang::ASTUnit *Unit = I->second.second;
- SourceLocation FromLoc =
- Unit->getSourceManager().getComposedLoc(FromID, DecToLoc.second);
-
- return std::make_pair(FromLoc, Unit);
+ // FIXME: Implement: Record such a context for every imported ASTUnit; lookup.
+ return llvm::None;
}
} // namespace cross_tu
diff --git a/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp b/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
index 176d6d6abf33..963256f268bb 100644
--- a/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
+++ b/clang/lib/DirectoryWatcher/linux/DirectoryWatcher-linux.cpp
@@ -14,6 +14,7 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Errno.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Path.h"
#include <atomic>
#include <condition_variable>
diff --git a/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp b/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
index 25cbcf536388..1f040f60ff19 100644
--- a/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
+++ b/clang/lib/DirectoryWatcher/windows/DirectoryWatcher-windows.cpp
@@ -6,19 +6,12 @@
//
//===----------------------------------------------------------------------===//
-// TODO: This is not yet an implementation, but it will make it so Windows
-// builds don't fail.
-
#include "DirectoryScanner.h"
#include "clang/DirectoryWatcher/DirectoryWatcher.h"
-
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/ScopeExit.h"
-#include "llvm/Support/AlignOf.h"
-#include "llvm/Support/Errno.h"
-#include "llvm/Support/Mutex.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Path.h"
-#include <atomic>
+#include "llvm/Support/Windows/WindowsSupport.h"
#include <condition_variable>
#include <mutex>
#include <queue>
@@ -28,23 +21,271 @@
namespace {
+using DirectoryWatcherCallback =
+ std::function<void(llvm::ArrayRef<clang::DirectoryWatcher::Event>, bool)>;
+
using namespace llvm;
using namespace clang;
class DirectoryWatcherWindows : public clang::DirectoryWatcher {
+ OVERLAPPED Overlapped;
+
+ std::vector<DWORD> Notifications;
+
+ std::thread WatcherThread;
+ std::thread HandlerThread;
+ std::function<void(ArrayRef<DirectoryWatcher::Event>, bool)> Callback;
+ SmallString<MAX_PATH> Path;
+ HANDLE Terminate;
+
+ std::mutex Mutex;
+ bool WatcherActive = false;
+ std::condition_variable Ready;
+
+ class EventQueue {
+ std::mutex M;
+ std::queue<DirectoryWatcher::Event> Q;
+ std::condition_variable CV;
+
+ public:
+ void emplace(DirectoryWatcher::Event::EventKind Kind, StringRef Path) {
+ {
+ std::unique_lock<std::mutex> L(M);
+ Q.emplace(Kind, Path);
+ }
+ CV.notify_one();
+ }
+
+ DirectoryWatcher::Event pop_front() {
+ std::unique_lock<std::mutex> L(M);
+ while (true) {
+ if (!Q.empty()) {
+ DirectoryWatcher::Event E = Q.front();
+ Q.pop();
+ return E;
+ }
+ CV.wait(L, [this]() { return !Q.empty(); });
+ }
+ }
+ } Q;
+
public:
- ~DirectoryWatcherWindows() override { }
- void InitialScan() { }
- void EventReceivingLoop() { }
- void StopWork() { }
+ DirectoryWatcherWindows(HANDLE DirectoryHandle, bool WaitForInitialSync,
+ DirectoryWatcherCallback Receiver);
+
+ ~DirectoryWatcherWindows() override;
+
+ void InitialScan();
+ void WatcherThreadProc(HANDLE DirectoryHandle);
+ void NotifierThreadProc(bool WaitForInitialSync);
};
+
+DirectoryWatcherWindows::DirectoryWatcherWindows(
+ HANDLE DirectoryHandle, bool WaitForInitialSync,
+ DirectoryWatcherCallback Receiver)
+ : Callback(Receiver), Terminate(INVALID_HANDLE_VALUE) {
+ // Pre-compute the real location as we will be handing over the directory
+ // handle to the watcher and performing synchronous operations.
+ {
+ DWORD Size = GetFinalPathNameByHandleW(DirectoryHandle, NULL, 0, 0);
+ std::unique_ptr<WCHAR[]> Buffer{new WCHAR[Size]};
+ Size = GetFinalPathNameByHandleW(DirectoryHandle, Buffer.get(), Size, 0);
+ Buffer[Size] = L'\0';
+ llvm::sys::windows::UTF16ToUTF8(Buffer.get(), Size, Path);
+ }
+
+ size_t EntrySize = sizeof(FILE_NOTIFY_INFORMATION) + MAX_PATH * sizeof(WCHAR);
+ Notifications.resize((4 * EntrySize) / sizeof(DWORD));
+
+ memset(&Overlapped, 0, sizeof(Overlapped));
+ Overlapped.hEvent =
+ CreateEventW(NULL, /*bManualReset=*/FALSE, /*bInitialState=*/FALSE, NULL);
+ assert(Overlapped.hEvent && "unable to create event");
+
+ Terminate =
+ CreateEventW(NULL, /*bManualReset=*/TRUE, /*bInitialState=*/FALSE, NULL);
+
+ WatcherThread = std::thread([this, DirectoryHandle]() {
+ this->WatcherThreadProc(DirectoryHandle);
+ });
+
+ if (WaitForInitialSync)
+ InitialScan();
+
+ HandlerThread = std::thread([this, WaitForInitialSync]() {
+ this->NotifierThreadProc(WaitForInitialSync);
+ });
+}
+
+DirectoryWatcherWindows::~DirectoryWatcherWindows() {
+ // Signal the Watcher to exit.
+ SetEvent(Terminate);
+ HandlerThread.join();
+ WatcherThread.join();
+ CloseHandle(Terminate);
+ CloseHandle(Overlapped.hEvent);
+}
+
+void DirectoryWatcherWindows::InitialScan() {
+ std::unique_lock<std::mutex> lock(Mutex);
+ Ready.wait(lock, [this] { return this->WatcherActive; });
+
+ Callback(getAsFileEvents(scanDirectory(Path.data())), /*IsInitial=*/true);
+}
+
+void DirectoryWatcherWindows::WatcherThreadProc(HANDLE DirectoryHandle) {
+ while (true) {
+ // We do not guarantee subdirectories, but macOS already provides
+ // subdirectories, might as well as ...
+ BOOL WatchSubtree = TRUE;
+ DWORD NotifyFilter = FILE_NOTIFY_CHANGE_FILE_NAME
+ | FILE_NOTIFY_CHANGE_DIR_NAME
+ | FILE_NOTIFY_CHANGE_SIZE
+ | FILE_NOTIFY_CHANGE_LAST_WRITE
+ | FILE_NOTIFY_CHANGE_CREATION;
+
+ DWORD BytesTransferred;
+ if (!ReadDirectoryChangesW(DirectoryHandle, Notifications.data(),
+ Notifications.size() * sizeof(DWORD),
+ WatchSubtree, NotifyFilter, &BytesTransferred,
+ &Overlapped, NULL)) {
+ Q.emplace(DirectoryWatcher::Event::EventKind::WatcherGotInvalidated,
+ "");
+ break;
+ }
+
+ if (!WatcherActive) {
+ std::unique_lock<std::mutex> lock(Mutex);
+ WatcherActive = true;
+ }
+ Ready.notify_one();
+
+ HANDLE Handles[2] = { Terminate, Overlapped.hEvent };
+ switch (WaitForMultipleObjects(2, Handles, FALSE, INFINITE)) {
+ case WAIT_OBJECT_0: // Terminate Request
+ case WAIT_FAILED: // Failure
+ Q.emplace(DirectoryWatcher::Event::EventKind::WatcherGotInvalidated,
+ "");
+ (void)CloseHandle(DirectoryHandle);
+ return;
+ case WAIT_TIMEOUT: // Spurious wakeup?
+ continue;
+ case WAIT_OBJECT_0 + 1: // Directory change
+ break;
+ }
+
+ if (!GetOverlappedResult(DirectoryHandle, &Overlapped, &BytesTransferred,
+ FALSE)) {
+ Q.emplace(DirectoryWatcher::Event::EventKind::WatchedDirRemoved,
+ "");
+ Q.emplace(DirectoryWatcher::Event::EventKind::WatcherGotInvalidated,
+ "");
+ break;
+ }
+
+ // There was a buffer underrun on the kernel side. We may have lost
+ // events, please re-synchronize.
+ if (BytesTransferred == 0) {
+ Q.emplace(DirectoryWatcher::Event::EventKind::WatcherGotInvalidated,
+ "");
+ break;
+ }
+
+ for (FILE_NOTIFY_INFORMATION *I =
+ (FILE_NOTIFY_INFORMATION *)Notifications.data();
+ I;
+ I = I->NextEntryOffset
+ ? (FILE_NOTIFY_INFORMATION *)((CHAR *)I + I->NextEntryOffset)
+ : NULL) {
+ DirectoryWatcher::Event::EventKind Kind =
+ DirectoryWatcher::Event::EventKind::WatcherGotInvalidated;
+ switch (I->Action) {
+ case FILE_ACTION_ADDED:
+ case FILE_ACTION_MODIFIED:
+ case FILE_ACTION_RENAMED_NEW_NAME:
+ Kind = DirectoryWatcher::Event::EventKind::Modified;
+ break;
+ case FILE_ACTION_REMOVED:
+ case FILE_ACTION_RENAMED_OLD_NAME:
+ Kind = DirectoryWatcher::Event::EventKind::Removed;
+ break;
+ }
+
+ SmallString<MAX_PATH> filename;
+ sys::windows::UTF16ToUTF8(I->FileName, I->FileNameLength / sizeof(WCHAR),
+ filename);
+ Q.emplace(Kind, filename);
+ }
+ }
+
+ (void)CloseHandle(DirectoryHandle);
+}
+
+void DirectoryWatcherWindows::NotifierThreadProc(bool WaitForInitialSync) {
+ // If we did not wait for the initial sync, then we should perform the
+ // scan when we enter the thread.
+ if (!WaitForInitialSync)
+ this->InitialScan();
+
+ while (true) {
+ DirectoryWatcher::Event E = Q.pop_front();
+ Callback(E, /*IsInitial=*/false);
+ if (E.Kind == DirectoryWatcher::Event::EventKind::WatcherGotInvalidated)
+ break;
+ }
+}
+
+auto error(DWORD ErrorCode) {
+ DWORD Flags = FORMAT_MESSAGE_ALLOCATE_BUFFER
+ | FORMAT_MESSAGE_FROM_SYSTEM
+ | FORMAT_MESSAGE_IGNORE_INSERTS;
+
+ LPSTR Buffer;
+ if (!FormatMessageA(Flags, NULL, ErrorCode,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&Buffer,
+ 0, NULL)) {
+ return make_error<llvm::StringError>("error " + utostr(ErrorCode),
+ inconvertibleErrorCode());
+ }
+ std::string Message{Buffer};
+ LocalFree(Buffer);
+ return make_error<llvm::StringError>(Message, inconvertibleErrorCode());
+}
+
} // namespace
llvm::Expected<std::unique_ptr<DirectoryWatcher>>
-clang::DirectoryWatcher::create(
- StringRef Path,
- std::function<void(llvm::ArrayRef<DirectoryWatcher::Event>, bool)> Receiver,
- bool WaitForInitialSync) {
- return llvm::Expected<std::unique_ptr<DirectoryWatcher>>(
- llvm::errorCodeToError(std::make_error_code(std::errc::not_supported)));
+clang::DirectoryWatcher::create(StringRef Path,
+ DirectoryWatcherCallback Receiver,
+ bool WaitForInitialSync) {
+ if (Path.empty())
+ llvm::report_fatal_error(
+ "DirectoryWatcher::create can not accept an empty Path.");
+
+ if (!sys::fs::is_directory(Path))
+ llvm::report_fatal_error(
+ "DirectoryWatcher::create can not accept a filepath.");
+
+ SmallVector<wchar_t, MAX_PATH> WidePath;
+ if (sys::windows::UTF8ToUTF16(Path, WidePath))
+ return llvm::make_error<llvm::StringError>(
+ "unable to convert path to UTF-16", llvm::inconvertibleErrorCode());
+
+ DWORD DesiredAccess = FILE_LIST_DIRECTORY;
+ DWORD ShareMode = FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE;
+ DWORD CreationDisposition = OPEN_EXISTING;
+ DWORD FlagsAndAttributes = FILE_FLAG_BACKUP_SEMANTICS | FILE_FLAG_OVERLAPPED;
+
+ HANDLE DirectoryHandle =
+ CreateFileW(WidePath.data(), DesiredAccess, ShareMode,
+ /*lpSecurityAttributes=*/NULL, CreationDisposition,
+ FlagsAndAttributes, NULL);
+ if (DirectoryHandle == INVALID_HANDLE_VALUE)
+ return error(GetLastError());
+
+ // NOTE: We use the watcher instance as a RAII object to discard the handles
+ // for the directory in case of an error. Hence, this is early allocated,
+ // with the state being written directly to the watcher.
+ return std::make_unique<DirectoryWatcherWindows>(
+ DirectoryHandle, WaitForInitialSync, Receiver);
}
diff --git a/clang/lib/Driver/Action.cpp b/clang/lib/Driver/Action.cpp
index 2ec063d873be..e2d2f6c22de0 100644
--- a/clang/lib/Driver/Action.cpp
+++ b/clang/lib/Driver/Action.cpp
@@ -165,8 +165,8 @@ StringRef Action::GetOffloadKindName(OffloadKind Kind) {
void InputAction::anchor() {}
-InputAction::InputAction(const Arg &_Input, types::ID _Type)
- : Action(InputClass, _Type), Input(_Input) {}
+InputAction::InputAction(const Arg &_Input, types::ID _Type, StringRef _Id)
+ : Action(InputClass, _Type), Input(_Input), Id(_Id.str()) {}
void BindArchAction::anchor() {}
diff --git a/clang/lib/Driver/Compilation.cpp b/clang/lib/Driver/Compilation.cpp
index d33055739080..0144d808cf12 100644
--- a/clang/lib/Driver/Compilation.cpp
+++ b/clang/lib/Driver/Compilation.cpp
@@ -170,11 +170,12 @@ int Compilation::ExecuteCommand(const Command &C,
// Follow gcc implementation of CC_PRINT_OPTIONS; we could also cache the
// output stream.
- if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
+ if (getDriver().CCPrintOptions &&
+ !getDriver().CCPrintOptionsFilename.empty()) {
std::error_code EC;
OwnedStream.reset(new llvm::raw_fd_ostream(
- getDriver().CCPrintOptionsFilename, EC,
- llvm::sys::fs::OF_Append | llvm::sys::fs::OF_Text));
+ getDriver().CCPrintOptionsFilename.c_str(), EC,
+ llvm::sys::fs::OF_Append | llvm::sys::fs::OF_TextWithCRLF));
if (EC) {
getDriver().Diag(diag::err_drv_cc_print_options_failure)
<< EC.message();
diff --git a/clang/lib/Driver/DarwinSDKInfo.cpp b/clang/lib/Driver/DarwinSDKInfo.cpp
deleted file mode 100644
index 761c6717266b..000000000000
--- a/clang/lib/Driver/DarwinSDKInfo.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-//===--- DarwinSDKInfo.cpp - SDK Information parser for darwin - ----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "clang/Driver/DarwinSDKInfo.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/JSON.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
-
-using namespace clang::driver;
-using namespace clang;
-
-Expected<Optional<DarwinSDKInfo>>
-driver::parseDarwinSDKInfo(llvm::vfs::FileSystem &VFS, StringRef SDKRootPath) {
- llvm::SmallString<256> Filepath = SDKRootPath;
- llvm::sys::path::append(Filepath, "SDKSettings.json");
- llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
- VFS.getBufferForFile(Filepath);
- if (!File) {
- // If the file couldn't be read, assume it just doesn't exist.
- return None;
- }
- Expected<llvm::json::Value> Result =
- llvm::json::parse(File.get()->getBuffer());
- if (!Result)
- return Result.takeError();
-
- if (const auto *Obj = Result->getAsObject()) {
- auto VersionString = Obj->getString("Version");
- if (VersionString) {
- VersionTuple Version;
- if (!Version.tryParse(*VersionString))
- return DarwinSDKInfo(Version);
- }
- }
- return llvm::make_error<llvm::StringError>("invalid SDKSettings.json",
- llvm::inconvertibleErrorCode());
-}
diff --git a/clang/lib/Driver/Distro.cpp b/clang/lib/Driver/Distro.cpp
index ee4fe841e7ee..c4cf4e48b5b8 100644
--- a/clang/lib/Driver/Distro.cpp
+++ b/clang/lib/Driver/Distro.cpp
@@ -36,6 +36,7 @@ static Distro::DistroType DetectOsRelease(llvm::vfs::FileSystem &VFS) {
for (StringRef Line : Lines)
if (Version == Distro::UnknownDistro && Line.startswith("ID="))
Version = llvm::StringSwitch<Distro::DistroType>(Line.substr(3))
+ .Case("alpine", Distro::AlpineLinux)
.Case("fedora", Distro::Fedora)
.Case("gentoo", Distro::Gentoo)
.Case("arch", Distro::ArchLinux)
@@ -88,6 +89,7 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
.Case("focal", Distro::UbuntuFocal)
.Case("groovy", Distro::UbuntuGroovy)
.Case("hirsute", Distro::UbuntuHirsute)
+ .Case("impish", Distro::UbuntuImpish)
.Default(Distro::UnknownDistro);
return Version;
}
@@ -188,15 +190,6 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
}
// ...and others.
- if (VFS.exists("/etc/exherbo-release"))
- return Distro::Exherbo;
-
- if (VFS.exists("/etc/alpine-release"))
- return Distro::AlpineLinux;
-
- if (VFS.exists("/etc/arch-release"))
- return Distro::ArchLinux;
-
if (VFS.exists("/etc/gentoo-release"))
return Distro::Gentoo;
diff --git a/clang/lib/Driver/Driver.cpp b/clang/lib/Driver/Driver.cpp
index 418e1d3e8ec9..5c323cb6ea23 100644
--- a/clang/lib/Driver/Driver.cpp
+++ b/clang/lib/Driver/Driver.cpp
@@ -7,9 +7,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Driver.h"
-#include "InputInfo.h"
#include "ToolChains/AIX.h"
#include "ToolChains/AMDGPU.h"
+#include "ToolChains/AMDGPUOpenMP.h"
#include "ToolChains/AVR.h"
#include "ToolChains/Ananas.h"
#include "ToolChains/BareMetal.h"
@@ -53,6 +53,7 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
@@ -62,6 +63,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
@@ -76,6 +78,7 @@
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/FormatVariadic.h"
#include "llvm/Support/Host.h"
+#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Process.h"
@@ -133,13 +136,13 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
: Diags(Diags), VFS(std::move(VFS)), Mode(GCCMode),
SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone), LTOMode(LTOK_None),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
- DriverTitle(Title), CCPrintOptionsFilename(nullptr),
- CCPrintHeadersFilename(nullptr), CCLogDiagnosticsFilename(nullptr),
+ DriverTitle(Title), CCPrintStatReportFilename(), CCPrintOptionsFilename(),
+ CCPrintHeadersFilename(), CCLogDiagnosticsFilename(),
CCCPrintBindings(false), CCPrintOptions(false), CCPrintHeaders(false),
CCLogDiagnostics(false), CCGenDiagnostics(false),
- TargetTriple(TargetTriple), CCCGenericGCCName(""), Saver(Alloc),
- CheckInputsExist(true), GenReproducer(false),
- SuppressMissingInputWarning(false) {
+ CCPrintProcessStats(false), TargetTriple(TargetTriple),
+ CCCGenericGCCName(""), Saver(Alloc), CheckInputsExist(true),
+ GenReproducer(false), SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
this->VFS = llvm::vfs::getRealFileSystem();
@@ -166,28 +169,9 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
ResourceDir = GetResourcesPath(ClangExecutable, CLANG_RESOURCE_DIR);
}
-void Driver::ParseDriverMode(StringRef ProgramName,
- ArrayRef<const char *> Args) {
- if (ClangNameParts.isEmpty())
- ClangNameParts = ToolChain::getTargetAndModeFromProgramName(ProgramName);
- setDriverModeFromOption(ClangNameParts.DriverMode);
-
- for (const char *ArgPtr : Args) {
- // Ignore nullptrs, they are the response file's EOL markers.
- if (ArgPtr == nullptr)
- continue;
- const StringRef Arg = ArgPtr;
- setDriverModeFromOption(Arg);
- }
-}
-
-void Driver::setDriverModeFromOption(StringRef Opt) {
- const std::string OptName =
+void Driver::setDriverMode(StringRef Value) {
+ static const std::string OptName =
getOpts().getOption(options::OPT_driver_mode).getPrefixedName();
- if (!Opt.startswith(OptName))
- return;
- StringRef Value = Opt.drop_front(OptName.size());
-
if (auto M = llvm::StringSwitch<llvm::Optional<DriverMode>>(Value)
.Case("gcc", GCCMode)
.Case("g++", GXXMode)
@@ -517,14 +501,21 @@ static llvm::Triple computeTargetTriple(const Driver &D,
AT = Target.get64BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
+ else if (Target.getEnvironment() == llvm::Triple::MuslX32)
+ Target.setEnvironment(llvm::Triple::Musl);
} else if (A->getOption().matches(options::OPT_mx32) &&
Target.get64BitArchVariant().getArch() == llvm::Triple::x86_64) {
AT = llvm::Triple::x86_64;
- Target.setEnvironment(llvm::Triple::GNUX32);
+ if (Target.getEnvironment() == llvm::Triple::Musl)
+ Target.setEnvironment(llvm::Triple::MuslX32);
+ else
+ Target.setEnvironment(llvm::Triple::GNUX32);
} else if (A->getOption().matches(options::OPT_m32)) {
AT = Target.get32BitArchVariant().getArch();
if (Target.getEnvironment() == llvm::Triple::GNUX32)
Target.setEnvironment(llvm::Triple::GNU);
+ else if (Target.getEnvironment() == llvm::Triple::MuslX32)
+ Target.setEnvironment(llvm::Triple::Musl);
} else if (A->getOption().matches(options::OPT_m16) &&
Target.get32BitArchVariant().getArch() == llvm::Triple::x86) {
AT = llvm::Triple::x86;
@@ -582,9 +573,9 @@ static llvm::Triple computeTargetTriple(const Driver &D,
A = Args.getLastArg(options::OPT_march_EQ);
if (A && Target.isRISCV()) {
StringRef ArchName = A->getValue();
- if (ArchName.startswith_lower("rv32"))
+ if (ArchName.startswith_insensitive("rv32"))
Target.setArch(llvm::Triple::riscv32);
- else if (ArchName.startswith_lower("rv64"))
+ else if (ArchName.startswith_insensitive("rv64"))
Target.setArch(llvm::Triple::riscv64);
}
@@ -592,16 +583,24 @@ static llvm::Triple computeTargetTriple(const Driver &D,
}
// Parse the LTO options and record the type of LTO compilation
-// based on which -f(no-)?lto(=.*)? option occurs last.
-void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
- LTOMode = LTOK_None;
- if (!Args.hasFlag(options::OPT_flto, options::OPT_flto_EQ,
- options::OPT_fno_lto, false))
- return;
+// based on which -f(no-)?lto(=.*)? or -f(no-)?offload-lto(=.*)?
+// option occurs last.
+static llvm::Optional<driver::LTOKind>
+parseLTOMode(Driver &D, const llvm::opt::ArgList &Args, OptSpecifier OptPos,
+ OptSpecifier OptNeg, OptSpecifier OptEq, bool IsOffload) {
+ driver::LTOKind LTOMode = LTOK_None;
+ // Non-offload LTO allows -flto=auto and -flto=jobserver. Offload LTO does
+ // not support those options.
+ if (!Args.hasFlag(OptPos, OptEq, OptNeg, false) &&
+ (IsOffload ||
+ (!Args.hasFlag(options::OPT_flto_EQ_auto, options::OPT_fno_lto, false) &&
+ !Args.hasFlag(options::OPT_flto_EQ_jobserver, options::OPT_fno_lto,
+ false))))
+ return None;
StringRef LTOName("full");
- const Arg *A = Args.getLastArg(options::OPT_flto_EQ);
+ const Arg *A = Args.getLastArg(OptEq);
if (A)
LTOName = A->getValue();
@@ -612,9 +611,27 @@ void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
if (LTOMode == LTOK_Unknown) {
assert(A);
- Diag(diag::err_drv_unsupported_option_argument) << A->getOption().getName()
- << A->getValue();
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue();
+ return None;
}
+ return LTOMode;
+}
+
+// Parse the LTO options.
+void Driver::setLTOMode(const llvm::opt::ArgList &Args) {
+ LTOMode = LTOK_None;
+ if (auto M = parseLTOMode(*this, Args, options::OPT_flto,
+ options::OPT_fno_lto, options::OPT_flto_EQ,
+ /*IsOffload=*/false))
+ LTOMode = M.getValue();
+
+ OffloadLTOMode = LTOK_None;
+ if (auto M = parseLTOMode(*this, Args, options::OPT_foffload_lto,
+ options::OPT_fno_offload_lto,
+ options::OPT_foffload_lto_EQ,
+ /*IsOffload=*/true))
+ OffloadLTOMode = M.getValue();
}
/// Compute the desired OpenMP runtime from the flags provided.
@@ -739,18 +756,27 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
Diag(clang::diag::err_drv_invalid_omp_target) << Val;
else {
const ToolChain *TC;
- // CUDA toolchains have to be selected differently. They pair host
+ // Device toolchains have to be selected differently. They pair host
// and device in their implementation.
- if (TT.isNVPTX()) {
+ if (TT.isNVPTX() || TT.isAMDGCN()) {
const ToolChain *HostTC =
C.getSingleOffloadToolChain<Action::OFK_Host>();
assert(HostTC && "Host toolchain should be always defined.");
- auto &CudaTC =
+ auto &DeviceTC =
ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
- if (!CudaTC)
- CudaTC = std::make_unique<toolchains::CudaToolChain>(
- *this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
- TC = CudaTC.get();
+ if (!DeviceTC) {
+ if (TT.isNVPTX())
+ DeviceTC = std::make_unique<toolchains::CudaToolChain>(
+ *this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
+ else if (TT.isAMDGCN())
+ DeviceTC =
+ std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
+ *this, TT, *HostTC, C.getInputArgs());
+ else
+ assert(DeviceTC && "Device toolchain not defined.");
+ }
+
+ TC = DeviceTC.get();
} else
TC = &getToolChain(C.getInputArgs(), TT);
C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
@@ -987,7 +1013,10 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
// We look for the driver mode option early, because the mode can affect
// how other options are parsed.
- ParseDriverMode(ClangExecutable, ArgList.slice(1));
+
+ auto DriverMode = getDriverMode(ClangExecutable, ArgList.slice(1));
+ if (!DriverMode.empty())
+ setDriverMode(DriverMode);
// FIXME: What are we going to do with -V and -b?
@@ -1085,6 +1114,15 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
options::OPT_fno_crash_diagnostics,
!!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
+
+ // Process -fproc-stat-report options.
+ if (const Arg *A = Args.getLastArg(options::OPT_fproc_stat_report_EQ)) {
+ CCPrintProcessStats = true;
+ CCPrintStatReportFilename = A->getValue();
+ }
+ if (Args.hasArg(options::OPT_fproc_stat_report))
+ CCPrintProcessStats = true;
+
// FIXME: TargetTriple is used by the target-prefixed calls to as/ld
// and getToolChain is const.
if (IsCLMode()) {
@@ -1434,7 +1472,9 @@ void Driver::generateCompilationDiagnostics(
llvm::SmallString<128> Script(CrashInfo.Filename);
llvm::sys::path::replace_extension(Script, "sh");
std::error_code EC;
- llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::CD_CreateNew);
+ llvm::raw_fd_ostream ScriptOS(Script, EC, llvm::sys::fs::CD_CreateNew,
+ llvm::sys::fs::FA_Write,
+ llvm::sys::fs::OF_Text);
if (EC) {
Diag(clang::diag::note_drv_command_failed_diag_msg)
<< "Error generating run script: " << Script << " " << EC.message();
@@ -1471,8 +1511,7 @@ void Driver::generateCompilationDiagnostics(
}
}
- for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file,
- options::OPT_frewrite_map_file_EQ))
+ for (const auto &A : C.getArgs().filtered(options::OPT_frewrite_map_file_EQ))
Diag(clang::diag::note_drv_command_failed_diag_msg) << A->getValue();
Diag(clang::diag::note_drv_command_failed_diag_msg)
@@ -1581,7 +1620,7 @@ void Driver::PrintHelp(bool ShowHidden) const {
ExcludedFlagsBitmask |= options::FlangOnlyOption;
std::string Usage = llvm::formatv("{0} [options] file...", Name).str();
- getOpts().PrintHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
+ getOpts().printHelp(llvm::outs(), Usage.c_str(), DriverTitle.c_str(),
IncludedFlagsBitmask, ExcludedFlagsBitmask,
/*ShowAllAliases=*/false);
}
@@ -1701,7 +1740,7 @@ void Driver::HandleAutocompletions(StringRef PassedFlags) const {
// case-insensitive sorting for consistency with the -help option
// which prints out options in the case-insensitive alphabetical order.
llvm::sort(SuggestedCompletions, [](StringRef A, StringRef B) {
- if (int X = A.compare_lower(B))
+ if (int X = A.compare_insensitive(B))
return X < 0;
return A.compare(B) > 0;
});
@@ -1802,6 +1841,15 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_runtime_dir)) {
+ std::string CandidateRuntimePath = TC.getRuntimePath();
+ if (getVFS().exists(CandidateRuntimePath))
+ llvm::outs() << CandidateRuntimePath << '\n';
+ else
+ llvm::outs() << TC.getCompilerRTPath() << '\n';
+ return false;
+ }
+
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
@@ -1870,6 +1918,12 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_multiarch)) {
+ llvm::outs() << TC.getMultiarchTriple(*this, TC.getTriple(), SysRoot)
+ << "\n";
+ return false;
+ }
+
if (C.getArgs().hasArg(options::OPT_print_targets)) {
llvm::TargetRegistry::printRegisteredTargetsForVersion(llvm::outs());
return false;
@@ -2179,15 +2233,20 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
// stdin must be handled specially.
if (memcmp(Value, "-", 2) == 0) {
- // If running with -E, treat as a C input (this changes the builtin
- // macros, for example). This may be overridden by -ObjC below.
- //
- // Otherwise emit an error but still use a valid type to avoid
- // spurious errors (e.g., no inputs).
- if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
- Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
- : clang::diag::err_drv_unknown_stdin_type);
- Ty = types::TY_C;
+ if (IsFlangMode()) {
+ Ty = types::TY_Fortran;
+ } else {
+ // If running with -E, treat as a C input (this changes the
+ // builtin macros, for example). This may be overridden by -ObjC
+ // below.
+ //
+ // Otherwise emit an error but still use a valid type to avoid
+ // spurious errors (e.g., no inputs).
+ if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP())
+ Diag(IsCLMode() ? clang::diag::err_drv_unknown_stdin_type_clang_cl
+ : clang::diag::err_drv_unknown_stdin_type);
+ Ty = types::TY_C;
+ }
} else {
// Otherwise lookup by extension.
// Fallback is C if invoked as C preprocessor, C++ if invoked with
@@ -2443,6 +2502,14 @@ class OffloadingActionBuilder final {
/// Default GPU architecture if there's no one specified.
CudaArch DefaultCudaArch = CudaArch::UNKNOWN;
+ /// Method to generate compilation unit ID specified by option
+ /// '-fuse-cuid='.
+ enum UseCUIDKind { CUID_Hash, CUID_Random, CUID_None, CUID_Invalid };
+ UseCUIDKind UseCUID = CUID_Hash;
+
+ /// Compilation unit ID specified by option '-cuid='.
+ StringRef FixedCUID;
+
public:
CudaActionBuilderBase(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs,
@@ -2479,9 +2546,32 @@ class OffloadingActionBuilder final {
// Replicate inputs for each GPU architecture.
auto Ty = IA->getType() == types::TY_HIP ? types::TY_HIP_DEVICE
: types::TY_CUDA_DEVICE;
+ std::string CUID = FixedCUID.str();
+ if (CUID.empty()) {
+ if (UseCUID == CUID_Random)
+ CUID = llvm::utohexstr(llvm::sys::Process::GetRandomNumber(),
+ /*LowerCase=*/true);
+ else if (UseCUID == CUID_Hash) {
+ llvm::MD5 Hasher;
+ llvm::MD5::MD5Result Hash;
+ SmallString<256> RealPath;
+ llvm::sys::fs::real_path(IA->getInputArg().getValue(), RealPath,
+ /*expand_tilde=*/true);
+ Hasher.update(RealPath);
+ for (auto *A : Args) {
+ if (A->getOption().matches(options::OPT_INPUT))
+ continue;
+ Hasher.update(A->getAsString(Args));
+ }
+ Hasher.final(Hash);
+ CUID = llvm::utohexstr(Hash.low(), /*LowerCase=*/true);
+ }
+ }
+ IA->setId(CUID);
+
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
CudaDeviceActions.push_back(
- C.MakeAction<InputAction>(IA->getInputArg(), Ty));
+ C.MakeAction<InputAction>(IA->getInputArg(), Ty, IA->getId()));
}
return ABRT_Success;
@@ -2603,6 +2693,21 @@ class OffloadingActionBuilder final {
options::OPT_cuda_device_only);
EmitLLVM = Args.getLastArg(options::OPT_emit_llvm);
EmitAsm = Args.getLastArg(options::OPT_S);
+ FixedCUID = Args.getLastArgValue(options::OPT_cuid_EQ);
+ if (Arg *A = Args.getLastArg(options::OPT_fuse_cuid_EQ)) {
+ StringRef UseCUIDStr = A->getValue();
+ UseCUID = llvm::StringSwitch<UseCUIDKind>(UseCUIDStr)
+ .Case("hash", CUID_Hash)
+ .Case("random", CUID_Random)
+ .Case("none", CUID_None)
+ .Default(CUID_Invalid);
+ if (UseCUID == CUID_Invalid) {
+ C.getDriver().Diag(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << UseCUIDStr;
+ C.setContainsError();
+ return true;
+ }
+ }
// Collect all cuda_gpu_arch parameters, removing duplicates.
std::set<StringRef> GpuArchs;
@@ -2665,7 +2770,7 @@ class OffloadingActionBuilder final {
StringRef getCanonicalOffloadArch(StringRef ArchStr) override {
CudaArch Arch = StringToCudaArch(ArchStr);
- if (Arch == CudaArch::UNKNOWN) {
+ if (Arch == CudaArch::UNKNOWN || !IsNVIDIAGpuArch(Arch)) {
C.getDriver().Diag(clang::diag::err_drv_cuda_bad_gpu_arch) << ArchStr;
return StringRef();
}
@@ -2786,12 +2891,25 @@ class OffloadingActionBuilder final {
class HIPActionBuilder final : public CudaActionBuilderBase {
/// The linker inputs obtained for each device arch.
SmallVector<ActionList, 8> DeviceLinkerInputs;
+ bool GPUSanitize;
+ // The default bundling behavior depends on the type of output, therefore
+ // BundleOutput needs to be tri-value: None, true, or false.
+ // Bundle code objects except --no-gpu-output is specified for device
+ // only compilation. Bundle other type of output files only if
+ // --gpu-bundle-output is specified for device only compilation.
+ Optional<bool> BundleOutput;
public:
HIPActionBuilder(Compilation &C, DerivedArgList &Args,
const Driver::InputList &Inputs)
: CudaActionBuilderBase(C, Args, Inputs, Action::OFK_HIP) {
DefaultCudaArch = CudaArch::GFX803;
+ GPUSanitize = Args.hasFlag(options::OPT_fgpu_sanitize,
+ options::OPT_fno_gpu_sanitize, false);
+ if (Args.hasArg(options::OPT_gpu_bundle_output,
+ options::OPT_no_gpu_bundle_output))
+ BundleOutput = Args.hasFlag(options::OPT_gpu_bundle_output,
+ options::OPT_no_gpu_bundle_output);
}
bool canUseBundlerUnbundler() const override { return true; }
@@ -2840,17 +2958,31 @@ class OffloadingActionBuilder final {
// a fat binary containing all the code objects for different GPU's.
// The fat binary is then an input to the host action.
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
- auto BackendAction = C.getDriver().ConstructPhaseAction(
- C, Args, phases::Backend, CudaDeviceActions[I],
- AssociatedOffloadKind);
- auto AssembleAction = C.getDriver().ConstructPhaseAction(
- C, Args, phases::Assemble, BackendAction, AssociatedOffloadKind);
- // Create a link action to link device IR with device library
- // and generate ISA.
- ActionList AL;
- AL.push_back(AssembleAction);
- CudaDeviceActions[I] =
- C.MakeAction<LinkJobAction>(AL, types::TY_Image);
+ if (C.getDriver().isUsingLTO(/*IsOffload=*/true)) {
+ // When LTO is enabled, skip the backend and assemble phases and
+ // use lld to link the bitcode.
+ ActionList AL;
+ AL.push_back(CudaDeviceActions[I]);
+ // Create a link action to link device IR with device library
+ // and generate ISA.
+ CudaDeviceActions[I] =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Image);
+ } else {
+ // When LTO is not enabled, we follow the conventional
+ // compiler phases, including backend and assemble phases.
+ ActionList AL;
+ auto BackendAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Backend, CudaDeviceActions[I],
+ AssociatedOffloadKind);
+ auto AssembleAction = C.getDriver().ConstructPhaseAction(
+ C, Args, phases::Assemble, BackendAction,
+ AssociatedOffloadKind);
+ AL.push_back(AssembleAction);
+ // Create a link action to link device IR with device library
+ // and generate ISA.
+ CudaDeviceActions[I] =
+ C.MakeAction<LinkJobAction>(AL, types::TY_Image);
+ }
// OffloadingActionBuilder propagates device arch until an offload
// action. Since the next action for creating fatbin does
@@ -2864,22 +2996,25 @@ class OffloadingActionBuilder final {
CudaDeviceActions[I] = C.MakeAction<OffloadAction>(
DDep, CudaDeviceActions[I]->getType());
}
- // Create HIP fat binary with a special "link" action.
- CudaFatBinary =
- C.MakeAction<LinkJobAction>(CudaDeviceActions,
- types::TY_HIP_FATBIN);
- if (!CompileDeviceOnly) {
- DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
- AssociatedOffloadKind);
- // Clear the fat binary, it is already a dependence to an host
- // action.
- CudaFatBinary = nullptr;
- }
+ if (!CompileDeviceOnly || !BundleOutput.hasValue() ||
+ BundleOutput.getValue()) {
+ // Create HIP fat binary with a special "link" action.
+ CudaFatBinary = C.MakeAction<LinkJobAction>(CudaDeviceActions,
+ types::TY_HIP_FATBIN);
- // Remove the CUDA actions as they are already connected to an host
- // action or fat binary.
- CudaDeviceActions.clear();
+ if (!CompileDeviceOnly) {
+ DA.add(*CudaFatBinary, *ToolChains.front(), /*BoundArch=*/nullptr,
+ AssociatedOffloadKind);
+ // Clear the fat binary, it is already a dependence to an host
+ // action.
+ CudaFatBinary = nullptr;
+ }
+
+ // Remove the CUDA actions as they are already connected to an host
+ // action or fat binary.
+ CudaDeviceActions.clear();
+ }
return CompileDeviceOnly ? ABRT_Ignore_Host : ABRT_Success;
} else if (CurPhase == phases::Link) {
@@ -2906,6 +3041,20 @@ class OffloadingActionBuilder final {
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A,
AssociatedOffloadKind);
+ if (CompileDeviceOnly && CurPhase == FinalPhase &&
+ BundleOutput.hasValue() && BundleOutput.getValue()) {
+ for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I],
+ AssociatedOffloadKind);
+ CudaDeviceActions[I] = C.MakeAction<OffloadAction>(
+ DDep, CudaDeviceActions[I]->getType());
+ }
+ CudaFatBinary =
+ C.MakeAction<OffloadBundlingJobAction>(CudaDeviceActions);
+ CudaDeviceActions.clear();
+ }
+
return (CompileDeviceOnly && CurPhase == FinalPhase) ? ABRT_Ignore_Host
: ABRT_Success;
}
@@ -3343,7 +3492,7 @@ public:
return nullptr;
// Let builders add host linking actions.
- Action* HA;
+ Action* HA = nullptr;
for (DeviceActionBuilder *SB : SpecializedBuilders) {
if (!SB->isValid())
continue;
@@ -3418,7 +3567,8 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
if (Args.hasArg(options::OPT_emit_llvm))
Diag(clang::diag::err_drv_emit_llvm_link);
if (IsCLMode() && LTOMode != LTOK_None &&
- !Args.getLastArgValue(options::OPT_fuse_ld_EQ).equals_lower("lld"))
+ !Args.getLastArgValue(options::OPT_fuse_ld_EQ)
+ .equals_insensitive("lld"))
Diag(clang::diag::err_drv_lto_without_lld);
}
@@ -3890,10 +4040,13 @@ void Driver::BuildJobs(Compilation &C) const {
}
const llvm::Triple &RawTriple = C.getDefaultToolChain().getTriple();
- if (RawTriple.isOSAIX())
+ if (RawTriple.isOSAIX()) {
if (Arg *A = C.getArgs().getLastArg(options::OPT_G))
Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << RawTriple.str();
+ if (LTOMode == LTOK_Thin)
+ Diag(diag::err_drv_clang_unsupported) << "thinLTO on AIX";
+ }
// Collect the list of architectures.
llvm::StringSet<> ArchNames;
@@ -3927,66 +4080,64 @@ void Driver::BuildJobs(Compilation &C) const {
/*TargetDeviceOffloadKind*/ Action::OFK_None);
}
- StringRef StatReportFile;
- bool PrintProcessStat = false;
- if (const Arg *A = C.getArgs().getLastArg(options::OPT_fproc_stat_report_EQ))
- StatReportFile = A->getValue();
- if (C.getArgs().hasArg(options::OPT_fproc_stat_report))
- PrintProcessStat = true;
-
// If we have more than one job, then disable integrated-cc1 for now. Do this
// also when we need to report process execution statistics.
- if (C.getJobs().size() > 1 || !StatReportFile.empty() || PrintProcessStat)
+ if (C.getJobs().size() > 1 || CCPrintProcessStats)
for (auto &J : C.getJobs())
J.InProcess = false;
- if (!StatReportFile.empty() || PrintProcessStat) {
+ if (CCPrintProcessStats) {
C.setPostCallback([=](const Command &Cmd, int Res) {
Optional<llvm::sys::ProcessStatistics> ProcStat =
Cmd.getProcessStatistics();
if (!ProcStat)
return;
- if (PrintProcessStat) {
+
+ const char *LinkingOutput = nullptr;
+ if (FinalOutput)
+ LinkingOutput = FinalOutput->getValue();
+ else if (!Cmd.getOutputFilenames().empty())
+ LinkingOutput = Cmd.getOutputFilenames().front().c_str();
+ else
+ LinkingOutput = getDefaultImageName();
+
+ if (CCPrintStatReportFilename.empty()) {
using namespace llvm;
// Human readable output.
outs() << sys::path::filename(Cmd.getExecutable()) << ": "
- << "output=";
- if (Cmd.getOutputFilenames().empty())
- outs() << "\"\"";
- else
- outs() << Cmd.getOutputFilenames().front();
+ << "output=" << LinkingOutput;
outs() << ", total="
<< format("%.3f", ProcStat->TotalTime.count() / 1000.) << " ms"
<< ", user="
<< format("%.3f", ProcStat->UserTime.count() / 1000.) << " ms"
<< ", mem=" << ProcStat->PeakMemory << " Kb\n";
- }
- if (!StatReportFile.empty()) {
+ } else {
// CSV format.
std::string Buffer;
llvm::raw_string_ostream Out(Buffer);
llvm::sys::printArg(Out, llvm::sys::path::filename(Cmd.getExecutable()),
/*Quote*/ true);
Out << ',';
- if (Cmd.getOutputFilenames().empty())
- Out << "\"\"";
- else
- llvm::sys::printArg(Out, Cmd.getOutputFilenames().front(), true);
+ llvm::sys::printArg(Out, LinkingOutput, true);
Out << ',' << ProcStat->TotalTime.count() << ','
<< ProcStat->UserTime.count() << ',' << ProcStat->PeakMemory
<< '\n';
Out.flush();
std::error_code EC;
- llvm::raw_fd_ostream OS(StatReportFile, EC, llvm::sys::fs::OF_Append);
+ llvm::raw_fd_ostream OS(CCPrintStatReportFilename.c_str(), EC,
+ llvm::sys::fs::OF_Append |
+ llvm::sys::fs::OF_Text);
if (EC)
return;
auto L = OS.lock();
if (!L) {
- llvm::errs() << "ERROR: Cannot lock file " << StatReportFile << ": "
+ llvm::errs() << "ERROR: Cannot lock file "
+ << CCPrintStatReportFilename << ": "
<< toString(L.takeError()) << "\n";
return;
}
OS << Buffer;
+ OS.flush();
}
});
}
@@ -4479,6 +4630,25 @@ InputInfo Driver::BuildJobsForActionNoCache(
if (!T)
return InputInfo();
+ if (BuildingForOffloadDevice &&
+ A->getOffloadingDeviceKind() == Action::OFK_OpenMP) {
+ if (TC->getTriple().isAMDGCN()) {
+ // AMDGCN treats backend and assemble actions as no-op because
+ // linker does not support object files.
+ if (const BackendJobAction *BA = dyn_cast<BackendJobAction>(A)) {
+ return BuildJobsForAction(C, *BA->input_begin(), TC, BoundArch,
+ AtTopLevel, MultipleArchs, LinkingOutput,
+ CachedResults, TargetDeviceOffloadKind);
+ }
+
+ if (const AssembleJobAction *AA = dyn_cast<AssembleJobAction>(A)) {
+ return BuildJobsForAction(C, *AA->input_begin(), TC, BoundArch,
+ AtTopLevel, MultipleArchs, LinkingOutput,
+ CachedResults, TargetDeviceOffloadKind);
+ }
+ }
+ }
+
// If we've collapsed action list that contained OffloadAction we
// need to build jobs for host/device-side inputs it may have held.
for (const auto *OA : CollapsedOffloadActions)
@@ -4598,11 +4768,12 @@ InputInfo Driver::BuildJobsForActionNoCache(
/*CreatePrefixForHost=*/!!A->getOffloadingHostActiveKinds() &&
!AtTopLevel);
if (isa<OffloadWrapperJobAction>(JA)) {
- OffloadingPrefix += "-wrapper";
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
BaseInput = FinalOutput->getValue();
else
BaseInput = getDefaultImageName();
+ BaseInput =
+ C.getArgs().MakeArgString(std::string(BaseInput) + "-wrapper");
}
Result = InputInfo(A, GetNamedOutputPath(C, *JA, BaseInput, BoundArch,
AtTopLevel, MultipleArchs,
@@ -4729,6 +4900,11 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
return "-";
}
+ if (JA.getType() == types::TY_ModuleFile &&
+ C.getArgs().getLastArg(options::OPT_module_file_info)) {
+ return "-";
+ }
+
// Is this the assembly listing for /FA?
if (JA.getType() == types::TY_PP_Asm &&
(C.getArgs().hasArg(options::OPT__SLASH_FA) ||
@@ -4963,11 +5139,6 @@ void Driver::generatePrefixedToolNames(
// FIXME: Needs a better variable than TargetTriple
Names.emplace_back((TargetTriple + "-" + Tool).str());
Names.emplace_back(Tool);
-
- // Allow the discovery of tools prefixed with LLVM's default target triple.
- std::string DefaultTargetTriple = llvm::sys::getDefaultTargetTriple();
- if (DefaultTargetTriple != TargetTriple)
- Names.emplace_back((DefaultTargetTriple + "-" + Tool).str());
}
static bool ScanDirForExecutable(SmallString<128> &Dir, StringRef Name) {
@@ -5158,7 +5329,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::MSVC:
case llvm::Triple::UnknownEnvironment:
if (Args.getLastArgValue(options::OPT_fuse_ld_EQ)
- .startswith_lower("bfd"))
+ .startswith_insensitive("bfd"))
TC = std::make_unique<toolchains::CrossWindowsToolChain>(
*this, Target, Args);
else
@@ -5387,3 +5558,21 @@ bool clang::driver::willEmitRemarks(const ArgList &Args) {
return true;
return false;
}
+
+llvm::StringRef clang::driver::getDriverMode(StringRef ProgName,
+ ArrayRef<const char *> Args) {
+ static const std::string OptName =
+ getDriverOptTable().getOption(options::OPT_driver_mode).getPrefixedName();
+ llvm::StringRef Opt;
+ for (StringRef Arg : Args) {
+ if (!Arg.startswith(OptName))
+ continue;
+ Opt = Arg;
+ break;
+ }
+ if (Opt.empty())
+ Opt = ToolChain::getTargetAndModeFromProgramName(ProgName).DriverMode;
+ return Opt.consume_front(OptName) ? Opt : "";
+}
+
+bool driver::IsClangCL(StringRef DriverMode) { return DriverMode.equals("cl"); }
diff --git a/clang/lib/Driver/Job.cpp b/clang/lib/Driver/Job.cpp
index 911fd5df3ca2..5b87106b6565 100644
--- a/clang/lib/Driver/Job.cpp
+++ b/clang/lib/Driver/Job.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Job.h"
-#include "InputInfo.h"
#include "clang/Basic/LLVM.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/ArrayRef.h"
@@ -43,7 +43,7 @@ Command::Command(const Action &Source, const Tool &Creator,
Executable(Executable), Arguments(Arguments) {
for (const auto &II : Inputs)
if (II.isFilename())
- InputFilenames.push_back(II.getFilename());
+ InputInfoList.push_back(II);
for (const auto &II : Outputs)
if (II.isFilename())
OutputFilenames.push_back(II.getFilename());
@@ -237,9 +237,10 @@ void Command::Print(raw_ostream &OS, const char *Terminator, bool Quote,
}
}
- auto Found = llvm::find_if(InputFilenames,
- [&Arg](StringRef IF) { return IF == Arg; });
- if (Found != InputFilenames.end() &&
+ auto Found = llvm::find_if(InputInfoList, [&Arg](const InputInfo &II) {
+ return II.getFilename() == Arg;
+ });
+ if (Found != InputInfoList.end() &&
(i == 0 || StringRef(Args[i - 1]) != "-main-file-name")) {
// Replace the input file name with the crashinfo's file name.
OS << ' ';
@@ -302,8 +303,8 @@ void Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
void Command::PrintFileNames() const {
if (PrintInputFilenames) {
- for (const char *Arg : InputFilenames)
- llvm::outs() << llvm::sys::path::filename(Arg) << "\n";
+ for (const auto &Arg : InputInfoList)
+ llvm::outs() << llvm::sys::path::filename(Arg.getFilename()) << "\n";
llvm::outs().flush();
}
}
@@ -414,50 +415,6 @@ void CC1Command::setEnvironment(llvm::ArrayRef<const char *> NewEnvironment) {
"The CC1Command doesn't support changing the environment vars!");
}
-FallbackCommand::FallbackCommand(const Action &Source_, const Tool &Creator_,
- ResponseFileSupport ResponseSupport,
- const char *Executable_,
- const llvm::opt::ArgStringList &Arguments_,
- ArrayRef<InputInfo> Inputs,
- ArrayRef<InputInfo> Outputs,
- std::unique_ptr<Command> Fallback_)
- : Command(Source_, Creator_, ResponseSupport, Executable_, Arguments_,
- Inputs, Outputs),
- Fallback(std::move(Fallback_)) {}
-
-void FallbackCommand::Print(raw_ostream &OS, const char *Terminator,
- bool Quote, CrashReportInfo *CrashInfo) const {
- Command::Print(OS, "", Quote, CrashInfo);
- OS << " ||";
- Fallback->Print(OS, Terminator, Quote, CrashInfo);
-}
-
-static bool ShouldFallback(int ExitCode) {
- // FIXME: We really just want to fall back for internal errors, such
- // as when some symbol cannot be mangled, when we should be able to
- // parse something but can't, etc.
- return ExitCode != 0;
-}
-
-int FallbackCommand::Execute(ArrayRef<llvm::Optional<StringRef>> Redirects,
- std::string *ErrMsg, bool *ExecutionFailed) const {
- int PrimaryStatus = Command::Execute(Redirects, ErrMsg, ExecutionFailed);
- if (!ShouldFallback(PrimaryStatus))
- return PrimaryStatus;
-
- // Clear ExecutionFailed and ErrMsg before falling back.
- if (ErrMsg)
- ErrMsg->clear();
- if (ExecutionFailed)
- *ExecutionFailed = false;
-
- const Driver &D = getCreator().getToolChain().getDriver();
- D.Diag(diag::warn_drv_invoking_fallback) << Fallback->getExecutable();
-
- int SecondaryStatus = Fallback->Execute(Redirects, ErrMsg, ExecutionFailed);
- return SecondaryStatus;
-}
-
ForceSuccessCommand::ForceSuccessCommand(
const Action &Source_, const Tool &Creator_,
ResponseFileSupport ResponseSupport, const char *Executable_,
diff --git a/clang/lib/Driver/SanitizerArgs.cpp b/clang/lib/Driver/SanitizerArgs.cpp
index 5c275353b679..8770fb1cf9fe 100644
--- a/clang/lib/Driver/SanitizerArgs.cpp
+++ b/clang/lib/Driver/SanitizerArgs.cpp
@@ -18,6 +18,7 @@
#include "llvm/Support/SpecialCaseList.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
#include <memory>
using namespace clang;
@@ -133,41 +134,41 @@ static void validateSpecialCaseListFormat(const Driver &D,
D.Diag(MalformedSCLErrorDiagID) << BLError;
}
-static void addDefaultBlacklists(const Driver &D, SanitizerMask Kinds,
- std::vector<std::string> &BlacklistFiles) {
- struct Blacklist {
+static void addDefaultIgnorelists(const Driver &D, SanitizerMask Kinds,
+ std::vector<std::string> &IgnorelistFiles) {
+ struct Ignorelist {
const char *File;
SanitizerMask Mask;
- } Blacklists[] = {{"asan_blacklist.txt", SanitizerKind::Address},
- {"hwasan_blacklist.txt", SanitizerKind::HWAddress},
- {"memtag_blacklist.txt", SanitizerKind::MemTag},
- {"msan_blacklist.txt", SanitizerKind::Memory},
- {"tsan_blacklist.txt", SanitizerKind::Thread},
- {"dfsan_abilist.txt", SanitizerKind::DataFlow},
- {"cfi_blacklist.txt", SanitizerKind::CFI},
- {"ubsan_blacklist.txt",
- SanitizerKind::Undefined | SanitizerKind::Integer |
- SanitizerKind::Nullability |
- SanitizerKind::FloatDivideByZero}};
-
- for (auto BL : Blacklists) {
+ } Ignorelists[] = {{"asan_ignorelist.txt", SanitizerKind::Address},
+ {"hwasan_ignorelist.txt", SanitizerKind::HWAddress},
+ {"memtag_ignorelist.txt", SanitizerKind::MemTag},
+ {"msan_ignorelist.txt", SanitizerKind::Memory},
+ {"tsan_ignorelist.txt", SanitizerKind::Thread},
+ {"dfsan_abilist.txt", SanitizerKind::DataFlow},
+ {"cfi_ignorelist.txt", SanitizerKind::CFI},
+ {"ubsan_ignorelist.txt",
+ SanitizerKind::Undefined | SanitizerKind::Integer |
+ SanitizerKind::Nullability |
+ SanitizerKind::FloatDivideByZero}};
+
+ for (auto BL : Ignorelists) {
if (!(Kinds & BL.Mask))
continue;
clang::SmallString<64> Path(D.ResourceDir);
llvm::sys::path::append(Path, "share", BL.File);
if (D.getVFS().exists(Path))
- BlacklistFiles.push_back(std::string(Path.str()));
+ IgnorelistFiles.push_back(std::string(Path.str()));
else if (BL.Mask == SanitizerKind::CFI)
- // If cfi_blacklist.txt cannot be found in the resource dir, driver
+ // If cfi_ignorelist.txt cannot be found in the resource dir, driver
// should fail.
D.Diag(clang::diag::err_drv_no_such_file) << Path;
}
validateSpecialCaseListFormat(
- D, BlacklistFiles, clang::diag::err_drv_malformed_sanitizer_blacklist);
+ D, IgnorelistFiles, clang::diag::err_drv_malformed_sanitizer_ignorelist);
}
-/// Parse -f(no-)?sanitize-(coverage-)?(white|black)list argument's values,
+/// Parse -f(no-)?sanitize-(coverage-)?(white|ignore)list argument's values,
/// diagnosing any invalid file paths and validating special case list format.
static void parseSpecialCaseListArg(const Driver &D,
const llvm::opt::ArgList &Args,
@@ -176,7 +177,7 @@ static void parseSpecialCaseListArg(const Driver &D,
llvm::opt::OptSpecifier NoSCLOptionID,
unsigned MalformedSCLErrorDiagID) {
for (const auto *Arg : Args) {
- // Match -fsanitize-(coverage-)?(white|black)list.
+ // Match -fsanitize-(coverage-)?(white|ignore)list.
if (Arg->getOption().matches(SCLOptionID)) {
Arg->claim();
std::string SCLPath = Arg->getValue();
@@ -185,7 +186,7 @@ static void parseSpecialCaseListArg(const Driver &D,
} else {
D.Diag(clang::diag::err_drv_no_such_file) << SCLPath;
}
- // Match -fno-sanitize-blacklist.
+ // Match -fno-sanitize-ignorelist.
} else if (Arg->getOption().matches(NoSCLOptionID)) {
Arg->claim();
SCLFiles.clear();
@@ -581,18 +582,18 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
TrappingKinds &= Kinds;
RecoverableKinds &= ~TrappingKinds;
- // Setup blacklist files.
- // Add default blacklist from resource directory for activated sanitizers, and
- // validate special case lists format.
- if (!Args.hasArgNoClaim(options::OPT_fno_sanitize_blacklist))
- addDefaultBlacklists(D, Kinds, SystemBlacklistFiles);
+ // Setup ignorelist files.
+ // Add default ignorelist from resource directory for activated sanitizers,
+ // and validate special case lists format.
+ if (!Args.hasArgNoClaim(options::OPT_fno_sanitize_ignorelist))
+ addDefaultIgnorelists(D, Kinds, SystemIgnorelistFiles);
- // Parse -f(no-)?sanitize-blacklist options.
+ // Parse -f(no-)?sanitize-ignorelist options.
// This also validates special case lists format.
- parseSpecialCaseListArg(D, Args, UserBlacklistFiles,
- options::OPT_fsanitize_blacklist,
- options::OPT_fno_sanitize_blacklist,
- clang::diag::err_drv_malformed_sanitizer_blacklist);
+ parseSpecialCaseListArg(D, Args, UserIgnorelistFiles,
+ options::OPT_fsanitize_ignorelist_EQ,
+ options::OPT_fno_sanitize_ignorelist,
+ clang::diag::err_drv_malformed_sanitizer_ignorelist);
// Parse -f[no-]sanitize-memory-track-origins[=level] options.
if (AllAddedKinds & SanitizerKind::Memory) {
@@ -746,7 +747,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
CoverageFeatures |= CoverageFunc;
}
- // Parse -fsanitize-coverage-(black|white)list options if coverage enabled.
+ // Parse -fsanitize-coverage-(ignore|white)list options if coverage enabled.
// This also validates special case lists format.
// Here, OptSpecifier() acts as a never-matching command-line argument.
// So, there is no way to clear coverage lists but you can append to them.
@@ -756,9 +757,9 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fsanitize_coverage_allowlist, OptSpecifier(),
clang::diag::err_drv_malformed_sanitizer_coverage_whitelist);
parseSpecialCaseListArg(
- D, Args, CoverageBlocklistFiles,
- options::OPT_fsanitize_coverage_blocklist, OptSpecifier(),
- clang::diag::err_drv_malformed_sanitizer_coverage_blacklist);
+ D, Args, CoverageIgnorelistFiles,
+ options::OPT_fsanitize_coverage_ignorelist, OptSpecifier(),
+ clang::diag::err_drv_malformed_sanitizer_coverage_ignorelist);
}
SharedRuntime =
@@ -804,6 +805,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
options::OPT_fno_sanitize_address_poison_custom_array_cookie,
AsanPoisonCustomArrayCookie);
+ AsanOutlineInstrumentation =
+ Args.hasFlag(options::OPT_fsanitize_address_outline_instrumentation,
+ options::OPT_fno_sanitize_address_outline_instrumentation,
+ AsanOutlineInstrumentation);
+
// As a workaround for a bug in gold 2.26 and earlier, dead stripping of
// globals in ASan is disabled by default on ELF targets.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
@@ -825,6 +831,34 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
AsanInvalidPointerSub = true;
}
+ if (TC.getTriple().isOSDarwin() &&
+ (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))) {
+ AsanDtorKind = llvm::AsanDtorKind::None;
+ }
+
+ if (const auto *Arg =
+ Args.getLastArg(options::OPT_sanitize_address_destructor_EQ)) {
+ auto parsedAsanDtorKind = AsanDtorKindFromString(Arg->getValue());
+ if (parsedAsanDtorKind == llvm::AsanDtorKind::Invalid) {
+ TC.getDriver().Diag(clang::diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << Arg->getValue();
+ }
+ AsanDtorKind = parsedAsanDtorKind;
+ }
+
+ if (const auto *Arg = Args.getLastArg(
+ options::OPT_sanitize_address_use_after_return_EQ)) {
+ auto parsedAsanUseAfterReturn =
+ AsanDetectStackUseAfterReturnModeFromString(Arg->getValue());
+ if (parsedAsanUseAfterReturn ==
+ llvm::AsanDetectStackUseAfterReturnMode::Invalid) {
+ TC.getDriver().Diag(clang::diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << Arg->getValue();
+ }
+ AsanUseAfterReturn = parsedAsanUseAfterReturn;
+ }
+
} else {
AsanUseAfterScope = false;
// -fsanitize=pointer-compare/pointer-subtract requires -fsanitize=address.
@@ -849,6 +883,11 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
} else {
HwasanAbi = "interceptor";
}
+ if (TC.getTriple().getArch() == llvm::Triple::x86_64)
+ HwasanUseAliases = Args.hasFlag(
+ options::OPT_fsanitize_hwaddress_experimental_aliasing,
+ options::OPT_fno_sanitize_hwaddress_experimental_aliasing,
+ HwasanUseAliases);
}
if (AllAddedKinds & SanitizerKind::SafeStack) {
@@ -931,10 +970,15 @@ static bool hasTargetFeatureMTE(const llvm::opt::ArgStringList &CmdArgs) {
void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
types::ID InputType) const {
- // NVPTX/AMDGPU doesn't currently support sanitizers. Bailing out here means
+ // NVPTX doesn't currently support sanitizers. Bailing out here means
// that e.g. -fsanitize=address applies only to host code, which is what we
// want for now.
- if (TC.getTriple().isNVPTX() || TC.getTriple().isAMDGPU())
+ //
+ // AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
+ if (TC.getTriple().isNVPTX() ||
+ (TC.getTriple().isAMDGPU() &&
+ !Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ false)))
return;
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
@@ -966,8 +1010,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
}
addSpecialCaseListOpt(
Args, CmdArgs, "-fsanitize-coverage-allowlist=", CoverageAllowlistFiles);
- addSpecialCaseListOpt(
- Args, CmdArgs, "-fsanitize-coverage-blocklist=", CoverageBlocklistFiles);
+ addSpecialCaseListOpt(Args, CmdArgs, "-fsanitize-coverage-ignorelist=",
+ CoverageIgnorelistFiles);
if (TC.getTriple().isOSWindows() && needsUbsanRt()) {
// Instruct the code generator to embed linker directives in the object file
@@ -1006,9 +1050,9 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
Args.MakeArgString("-fsanitize-trap=" + toString(TrapSanitizers)));
addSpecialCaseListOpt(Args, CmdArgs,
- "-fsanitize-blacklist=", UserBlacklistFiles);
+ "-fsanitize-ignorelist=", UserIgnorelistFiles);
addSpecialCaseListOpt(Args, CmdArgs,
- "-fsanitize-system-blacklist=", SystemBlacklistFiles);
+ "-fsanitize-system-ignorelist=", SystemIgnorelistFiles);
if (MsanTrackOrigins)
CmdArgs.push_back(Args.MakeArgString("-fsanitize-memory-track-origins=" +
@@ -1033,6 +1077,11 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back("-tsan-instrument-atomics=0");
}
+ if (HwasanUseAliases) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-hwasan-experimental-use-page-aliases=1");
+ }
+
if (CfiCrossDso)
CmdArgs.push_back("-fsanitize-cfi-cross-dso");
@@ -1074,6 +1123,24 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
CmdArgs.push_back("-asan-detect-invalid-pointer-sub");
}
+ if (AsanOutlineInstrumentation) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-asan-instrumentation-with-call-threshold=0");
+ }
+
+ // Only pass the option to the frontend if the user requested,
+ // otherwise the frontend will just use the codegen default.
+ if (AsanDtorKind != llvm::AsanDtorKind::Invalid) {
+ CmdArgs.push_back(Args.MakeArgString("-fsanitize-address-destructor=" +
+ AsanDtorKindToString(AsanDtorKind)));
+ }
+
+ if (AsanUseAfterReturn != llvm::AsanDetectStackUseAfterReturnMode::Invalid) {
+ CmdArgs.push_back(Args.MakeArgString(
+ "-fsanitize-address-use-after-return=" +
+ AsanDetectStackUseAfterReturnModeToString(AsanUseAfterReturn)));
+ }
+
if (!HwasanAbi.empty()) {
CmdArgs.push_back("-default-function-attr");
CmdArgs.push_back(Args.MakeArgString("hwasan-abi=" + HwasanAbi));
diff --git a/clang/lib/Driver/Tool.cpp b/clang/lib/Driver/Tool.cpp
index 449f69cfcb35..a198f4f3b675 100644
--- a/clang/lib/Driver/Tool.cpp
+++ b/clang/lib/Driver/Tool.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/Tool.h"
-#include "InputInfo.h"
+#include "clang/Driver/InputInfo.h"
using namespace clang::driver;
diff --git a/clang/lib/Driver/ToolChain.cpp b/clang/lib/Driver/ToolChain.cpp
index b2ddef141a75..6c1b88141c45 100644
--- a/clang/lib/Driver/ToolChain.cpp
+++ b/clang/lib/Driver/ToolChain.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Driver/ToolChain.h"
-#include "InputInfo.h"
#include "ToolChains/Arch/ARM.h"
#include "ToolChains/Clang.h"
#include "ToolChains/InterfaceStubs.h"
@@ -18,6 +17,7 @@
#include "clang/Driver/Action.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
@@ -75,13 +75,13 @@ ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
const ArgList &Args)
: D(D), Triple(T), Args(Args), CachedRTTIArg(GetRTTIArgument(Args)),
CachedRTTIMode(CalculateRTTIMode(Args, Triple, CachedRTTIArg)) {
- if (D.CCCIsCXX()) {
- if (auto CXXStdlibPath = getCXXStdlibPath())
- getFilePaths().push_back(*CXXStdlibPath);
- }
+ std::string RuntimePath = getRuntimePath();
+ if (getVFS().exists(RuntimePath))
+ getLibraryPaths().push_back(RuntimePath);
- if (auto RuntimePath = getRuntimePath())
- getLibraryPaths().push_back(*RuntimePath);
+ std::string StdlibPath = getStdlibPath();
+ if (getVFS().exists(StdlibPath))
+ getFilePaths().push_back(StdlibPath);
std::string CandidateLibPath = getArchSpecificLibPath();
if (getVFS().exists(CandidateLibPath))
@@ -383,10 +383,16 @@ static StringRef getArchNameForCompilerRTLib(const ToolChain &TC,
if (TC.getArch() == llvm::Triple::x86 && Triple.isAndroid())
return "i686";
+ if (TC.getArch() == llvm::Triple::x86_64 && Triple.isX32())
+ return "x32";
+
return llvm::Triple::getArchTypeName(TC.getArch());
}
StringRef ToolChain::getOSLibName() const {
+ if (Triple.isOSDarwin())
+ return "darwin";
+
switch (Triple.getOS()) {
case llvm::Triple::FreeBSD:
return "freebsd";
@@ -414,8 +420,16 @@ std::string ToolChain::getCompilerRTPath() const {
}
std::string ToolChain::getCompilerRTBasename(const ArgList &Args,
- StringRef Component, FileType Type,
- bool AddArch) const {
+ StringRef Component,
+ FileType Type) const {
+ std::string CRTAbsolutePath = getCompilerRT(Args, Component, Type);
+ return llvm::sys::path::filename(CRTAbsolutePath).str();
+}
+
+std::string ToolChain::buildCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type,
+ bool AddArch) const {
const llvm::Triple &TT = getTriple();
bool IsITANMSVCWindows =
TT.isWindowsMSVCEnvironment() || TT.isWindowsItaniumEnvironment();
@@ -431,8 +445,8 @@ std::string ToolChain::getCompilerRTBasename(const ArgList &Args,
Suffix = IsITANMSVCWindows ? ".lib" : ".a";
break;
case ToolChain::FT_Shared:
- Suffix = Triple.isOSWindows()
- ? (Triple.isWindowsGNUEnvironment() ? ".dll.a" : ".lib")
+ Suffix = TT.isOSWindows()
+ ? (TT.isWindowsGNUEnvironment() ? ".dll.a" : ".lib")
: ".so";
break;
}
@@ -450,7 +464,7 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
FileType Type) const {
// Check for runtime files in the new layout without the architecture first.
std::string CRTBasename =
- getCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
+ buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
for (const auto &LibPath : getLibraryPaths()) {
SmallString<128> P(LibPath);
llvm::sys::path::append(P, CRTBasename);
@@ -460,7 +474,8 @@ std::string ToolChain::getCompilerRT(const ArgList &Args, StringRef Component,
// Fall back to the old expected compiler-rt name if the new one does not
// exist.
- CRTBasename = getCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
+ CRTBasename =
+ buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/true);
SmallString<128> Path(getCompilerRTPath());
llvm::sys::path::append(Path, CRTBasename);
return std::string(Path.str());
@@ -472,41 +487,16 @@ const char *ToolChain::getCompilerRTArgString(const llvm::opt::ArgList &Args,
return Args.MakeArgString(getCompilerRT(Args, Component, Type));
}
-
-Optional<std::string> ToolChain::getRuntimePath() const {
- SmallString<128> P;
-
- // First try the triple passed to driver as --target=<triple>.
- P.assign(D.ResourceDir);
- llvm::sys::path::append(P, "lib", D.getTargetTriple());
- if (getVFS().exists(P))
- return llvm::Optional<std::string>(std::string(P.str()));
-
- // Second try the normalized triple.
- P.assign(D.ResourceDir);
- llvm::sys::path::append(P, "lib", Triple.str());
- if (getVFS().exists(P))
- return llvm::Optional<std::string>(std::string(P.str()));
-
- return None;
+std::string ToolChain::getRuntimePath() const {
+ SmallString<128> P(D.ResourceDir);
+ llvm::sys::path::append(P, "lib", getTripleString());
+ return std::string(P.str());
}
-Optional<std::string> ToolChain::getCXXStdlibPath() const {
- SmallString<128> P;
-
- // First try the triple passed to driver as --target=<triple>.
- P.assign(D.Dir);
- llvm::sys::path::append(P, "..", "lib", D.getTargetTriple(), "c++");
- if (getVFS().exists(P))
- return llvm::Optional<std::string>(std::string(P.str()));
-
- // Second try the normalized triple.
- P.assign(D.Dir);
- llvm::sys::path::append(P, "..", "lib", Triple.str(), "c++");
- if (getVFS().exists(P))
- return llvm::Optional<std::string>(std::string(P.str()));
-
- return None;
+std::string ToolChain::getStdlibPath() const {
+ SmallString<128> P(D.Dir);
+ llvm::sys::path::append(P, "..", "lib", getTripleString());
+ return std::string(P.str());
}
std::string ToolChain::getArchSpecificLibPath() const {
@@ -611,11 +601,11 @@ std::string ToolChain::GetLinkerPath(bool *LinkerIsLLD,
std::string LinkerPath(GetProgramPath(LinkerName.c_str()));
if (llvm::sys::fs::can_execute(LinkerPath)) {
- // FIXME: Remove lld.darwinnew here once it's the only MachO lld.
+ // FIXME: Remove LinkerIsLLDDarwinNew once there's only one MachO lld.
if (LinkerIsLLD)
- *LinkerIsLLD = UseLinker == "lld" || UseLinker == "lld.darwinnew";
+ *LinkerIsLLD = UseLinker == "lld" || UseLinker == "lld.darwinold";
if (LinkerIsLLDDarwinNew)
- *LinkerIsLLDDarwinNew = UseLinker == "lld.darwinnew";
+ *LinkerIsLLDDarwinNew = UseLinker == "lld";
return LinkerPath;
}
}
@@ -726,118 +716,9 @@ std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb: {
- // FIXME: Factor into subclasses.
llvm::Triple Triple = getTriple();
- bool IsBigEndian = getTriple().getArch() == llvm::Triple::armeb ||
- getTriple().getArch() == llvm::Triple::thumbeb;
-
- // Handle pseudo-target flags '-mlittle-endian'/'-EL' and
- // '-mbig-endian'/'-EB'.
- if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
- options::OPT_mbig_endian)) {
- IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
- }
-
- // Thumb2 is the default for V7 on Darwin.
- //
- // FIXME: Thumb should just be another -target-feaure, not in the triple.
- StringRef MCPU, MArch;
- if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
- MCPU = A->getValue();
- if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
- MArch = A->getValue();
- std::string CPU =
- Triple.isOSBinFormatMachO()
- ? tools::arm::getARMCPUForMArch(MArch, Triple).str()
- : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
- StringRef Suffix =
- tools::arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
- bool IsMProfile = ARM::parseArchProfile(Suffix) == ARM::ProfileKind::M;
- bool ThumbDefault = IsMProfile || (ARM::parseArchVersion(Suffix) == 7 &&
- getTriple().isOSBinFormatMachO());
- // FIXME: this is invalid for WindowsCE
- if (getTriple().isOSWindows())
- ThumbDefault = true;
- std::string ArchName;
- if (IsBigEndian)
- ArchName = "armeb";
- else
- ArchName = "arm";
-
- // Check if ARM ISA was explicitly selected (using -mno-thumb or -marm) for
- // M-Class CPUs/architecture variants, which is not supported.
- bool ARMModeRequested = !Args.hasFlag(options::OPT_mthumb,
- options::OPT_mno_thumb, ThumbDefault);
- if (IsMProfile && ARMModeRequested) {
- if (!MCPU.empty())
- getDriver().Diag(diag::err_cpu_unsupported_isa) << CPU << "ARM";
- else
- getDriver().Diag(diag::err_arch_unsupported_isa)
- << tools::arm::getARMArch(MArch, getTriple()) << "ARM";
- }
-
- // Check to see if an explicit choice to use thumb has been made via
- // -mthumb. For assembler files we must check for -mthumb in the options
- // passed to the assembler via -Wa or -Xassembler.
- bool IsThumb = false;
- if (InputType != types::TY_PP_Asm)
- IsThumb = Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb,
- ThumbDefault);
- else {
- // Ideally we would check for these flags in
- // CollectArgsForIntegratedAssembler but we can't change the ArchName at
- // that point. There is no assembler equivalent of -mno-thumb, -marm, or
- // -mno-arm.
- for (const auto *A :
- Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
- for (StringRef Value : A->getValues()) {
- if (Value == "-mthumb")
- IsThumb = true;
- }
- }
- }
- // Assembly files should start in ARM mode, unless arch is M-profile, or
- // -mthumb has been passed explicitly to the assembler. Windows is always
- // thumb.
- if (IsThumb || IsMProfile || getTriple().isOSWindows()) {
- if (IsBigEndian)
- ArchName = "thumbeb";
- else
- ArchName = "thumb";
- }
- Triple.setArchName(ArchName + Suffix.str());
-
- bool isHardFloat =
- (arm::getARMFloatABI(getDriver(), Triple, Args) == arm::FloatABI::Hard);
- switch (Triple.getEnvironment()) {
- case Triple::GNUEABI:
- case Triple::GNUEABIHF:
- Triple.setEnvironment(isHardFloat ? Triple::GNUEABIHF : Triple::GNUEABI);
- break;
- case Triple::EABI:
- case Triple::EABIHF:
- Triple.setEnvironment(isHardFloat ? Triple::EABIHF : Triple::EABI);
- break;
- case Triple::MuslEABI:
- case Triple::MuslEABIHF:
- Triple.setEnvironment(isHardFloat ? Triple::MuslEABIHF
- : Triple::MuslEABI);
- break;
- default: {
- arm::FloatABI DefaultABI = arm::getDefaultFloatABI(Triple);
- if (DefaultABI != arm::FloatABI::Invalid &&
- isHardFloat != (DefaultABI == arm::FloatABI::Hard)) {
- Arg *ABIArg =
- Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
- options::OPT_mfloat_abi_EQ);
- assert(ABIArg && "Non-default float abi expected to be from arg");
- D.Diag(diag::err_drv_unsupported_opt_for_target)
- << ABIArg->getAsString(Args) << Triple.getTriple();
- }
- break;
- }
- }
-
+ tools::arm::setArchNameInTriple(getDriver(), Args, InputType, Triple);
+ tools::arm::setFloatABIInTriple(getDriver(), Args, Triple);
return Triple.getTriple();
}
}
@@ -873,66 +754,89 @@ void ToolChain::addProfileRTLibs(const llvm::opt::ArgList &Args,
ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
const ArgList &Args) const {
+ if (runtimeLibType)
+ return *runtimeLibType;
+
const Arg* A = Args.getLastArg(options::OPT_rtlib_EQ);
StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_RTLIB;
// Only use "platform" in tests to override CLANG_DEFAULT_RTLIB!
if (LibName == "compiler-rt")
- return ToolChain::RLT_CompilerRT;
+ runtimeLibType = ToolChain::RLT_CompilerRT;
else if (LibName == "libgcc")
- return ToolChain::RLT_Libgcc;
+ runtimeLibType = ToolChain::RLT_Libgcc;
else if (LibName == "platform")
- return GetDefaultRuntimeLibType();
+ runtimeLibType = GetDefaultRuntimeLibType();
+ else {
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
- if (A)
- getDriver().Diag(diag::err_drv_invalid_rtlib_name) << A->getAsString(Args);
+ runtimeLibType = GetDefaultRuntimeLibType();
+ }
- return GetDefaultRuntimeLibType();
+ return *runtimeLibType;
}
ToolChain::UnwindLibType ToolChain::GetUnwindLibType(
const ArgList &Args) const {
+ if (unwindLibType)
+ return *unwindLibType;
+
const Arg *A = Args.getLastArg(options::OPT_unwindlib_EQ);
StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_UNWINDLIB;
if (LibName == "none")
- return ToolChain::UNW_None;
+ unwindLibType = ToolChain::UNW_None;
else if (LibName == "platform" || LibName == "") {
ToolChain::RuntimeLibType RtLibType = GetRuntimeLibType(Args);
- if (RtLibType == ToolChain::RLT_CompilerRT)
- return ToolChain::UNW_None;
- else if (RtLibType == ToolChain::RLT_Libgcc)
- return ToolChain::UNW_Libgcc;
+ if (RtLibType == ToolChain::RLT_CompilerRT) {
+ if (getTriple().isAndroid() || getTriple().isOSAIX())
+ unwindLibType = ToolChain::UNW_CompilerRT;
+ else
+ unwindLibType = ToolChain::UNW_None;
+ } else if (RtLibType == ToolChain::RLT_Libgcc)
+ unwindLibType = ToolChain::UNW_Libgcc;
} else if (LibName == "libunwind") {
if (GetRuntimeLibType(Args) == RLT_Libgcc)
getDriver().Diag(diag::err_drv_incompatible_unwindlib);
- return ToolChain::UNW_CompilerRT;
+ unwindLibType = ToolChain::UNW_CompilerRT;
} else if (LibName == "libgcc")
- return ToolChain::UNW_Libgcc;
+ unwindLibType = ToolChain::UNW_Libgcc;
+ else {
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_unwindlib_name)
+ << A->getAsString(Args);
- if (A)
- getDriver().Diag(diag::err_drv_invalid_unwindlib_name)
- << A->getAsString(Args);
+ unwindLibType = GetDefaultUnwindLibType();
+ }
- return GetDefaultUnwindLibType();
+ return *unwindLibType;
}
ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
+ if (cxxStdlibType)
+ return *cxxStdlibType;
+
const Arg *A = Args.getLastArg(options::OPT_stdlib_EQ);
StringRef LibName = A ? A->getValue() : CLANG_DEFAULT_CXX_STDLIB;
// Only use "platform" in tests to override CLANG_DEFAULT_CXX_STDLIB!
if (LibName == "libc++")
- return ToolChain::CST_Libcxx;
+ cxxStdlibType = ToolChain::CST_Libcxx;
else if (LibName == "libstdc++")
- return ToolChain::CST_Libstdcxx;
+ cxxStdlibType = ToolChain::CST_Libstdcxx;
else if (LibName == "platform")
- return GetDefaultCXXStdlibType();
+ cxxStdlibType = GetDefaultCXXStdlibType();
+ else {
+ if (A)
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
- if (A)
- getDriver().Diag(diag::err_drv_invalid_stdlib_name) << A->getAsString(Args);
+ cxxStdlibType = GetDefaultCXXStdlibType();
+ }
- return GetDefaultCXXStdlibType();
+ return *cxxStdlibType;
}
/// Utility function to add a system include directory to CC1 arguments.
@@ -975,6 +879,29 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
}
}
+std::string ToolChain::detectLibcxxVersion(StringRef IncludePath) const {
+ std::error_code EC;
+ int MaxVersion = 0;
+ std::string MaxVersionString;
+ SmallString<128> Path(IncludePath);
+ llvm::sys::path::append(Path, "c++");
+ for (llvm::vfs::directory_iterator LI = getVFS().dir_begin(Path, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ int Version;
+ if (VersionText[0] == 'v' &&
+ !VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
+ if (Version > MaxVersion) {
+ MaxVersion = Version;
+ MaxVersionString = std::string(VersionText);
+ }
+ }
+ }
+ if (!MaxVersion)
+ return "";
+ return MaxVersionString;
+}
+
void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Header search paths should be handled by each of the subclasses.
@@ -993,7 +920,8 @@ void ToolChain::AddClangCXXStdlibIsystemArgs(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
DriverArgs.ClaimAllArgs(options::OPT_stdlibxx_isystem);
- if (!DriverArgs.hasArg(options::OPT_nostdincxx))
+ if (!DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdincxx,
+ options::OPT_nostdlibinc))
for (const auto &P :
DriverArgs.getAllArgValues(options::OPT_stdlibxx_isystem))
addSystemInclude(DriverArgs, CC1Args, P);
@@ -1096,6 +1024,11 @@ void ToolChain::AddCudaIncludeArgs(const ArgList &DriverArgs,
void ToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
+llvm::SmallVector<std::string, 12>
+ToolChain::getHIPDeviceLibs(const ArgList &DriverArgs) const {
+ return {};
+}
+
void ToolChain::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {}
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 36fe578fcb3d..3000b8416adf 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -176,7 +176,9 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
/// AIX - AIX tool chain which can call as(1) and ld(1) directly.
AIX::AIX(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
: ToolChain(D, Triple, Args) {
- getFilePaths().push_back(getDriver().SysRoot + "/usr/lib");
+ ParseInlineAsmUsingAsmParser = Args.hasFlag(
+ options::OPT_fintegrated_as, options::OPT_fno_integrated_as, true);
+ getLibraryPaths().push_back(getDriver().SysRoot + "/usr/lib");
}
// Returns the effective header sysroot path to use.
@@ -221,6 +223,7 @@ void AIX::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
switch (GetCXXStdlibType(Args)) {
case ToolChain::CST_Libcxx:
CmdArgs.push_back("-lc++");
+ CmdArgs.push_back("-lc++abi");
return;
case ToolChain::CST_Libstdcxx:
llvm::report_fatal_error("linking libstdc++ unimplemented on AIX");
diff --git a/clang/lib/Driver/ToolChains/AIX.h b/clang/lib/Driver/ToolChains/AIX.h
index d4e593255736..d1ec6d10fb3a 100644
--- a/clang/lib/Driver/ToolChains/AIX.h
+++ b/clang/lib/Driver/ToolChains/AIX.h
@@ -59,6 +59,9 @@ public:
AIX(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ bool parseInlineAsmUsingAsmParser() const override {
+ return ParseInlineAsmUsingAsmParser;
+ }
bool isPICDefault() const override { return true; }
bool isPIEDefault() const override { return false; }
bool isPICDefaultForced() const override { return true; }
@@ -74,12 +77,20 @@ public:
RuntimeLibType GetDefaultRuntimeLibType() const override;
+ // Set default DWARF version to 3 for now as latest AIX OS supports version 3.
+ unsigned GetDefaultDwarfVersion() const override { return 3; }
+
+ llvm::DebuggerKind getDefaultDebuggerTuning() const override {
+ return llvm::DebuggerKind::DBX;
+ }
+
protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
private:
llvm::StringRef GetHeaderSysroot(const llvm::opt::ArgList &DriverArgs) const;
+ bool ParseInlineAsmUsingAsmParser;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.cpp b/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 0971a2da62a3..d63c5e12c4af 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -8,13 +8,20 @@
#include "AMDGPU.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/LineIterator.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/VirtualFileSystem.h"
+#include <system_error>
+
+#define AMDGPU_ARCH_PROGRAM_NAME "amdgpu-arch"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -22,6 +29,48 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+// Look for sub-directory starts with PackageName under ROCm candidate path.
+// If there is one and only one matching sub-directory found, append the
+// sub-directory to Path. If there is no matching sub-directory or there are
+// more than one matching sub-directories, diagnose them. Returns the full
+// path of the package if there is only one matching sub-directory, otherwise
+// returns an empty string.
+llvm::SmallString<0>
+RocmInstallationDetector::findSPACKPackage(const Candidate &Cand,
+ StringRef PackageName) {
+ if (!Cand.isSPACK())
+ return {};
+ std::error_code EC;
+ std::string Prefix = Twine(PackageName + "-" + Cand.SPACKReleaseStr).str();
+ llvm::SmallVector<llvm::SmallString<0>> SubDirs;
+ for (llvm::vfs::directory_iterator File = D.getVFS().dir_begin(Cand.Path, EC),
+ FileEnd;
+ File != FileEnd && !EC; File.increment(EC)) {
+ llvm::StringRef FileName = llvm::sys::path::filename(File->path());
+ if (FileName.startswith(Prefix)) {
+ SubDirs.push_back(FileName);
+ if (SubDirs.size() > 1)
+ break;
+ }
+ }
+ if (SubDirs.size() == 1) {
+ auto PackagePath = Cand.Path;
+ llvm::sys::path::append(PackagePath, SubDirs[0]);
+ return PackagePath;
+ }
+ if (SubDirs.size() == 0 && Verbose) {
+ llvm::errs() << "SPACK package " << Prefix << " not found at " << Cand.Path
+ << '\n';
+ return {};
+ }
+
+ if (SubDirs.size() > 1 && Verbose) {
+ llvm::errs() << "Cannot use SPACK package " << Prefix << " at " << Cand.Path
+ << " due to multiple installations for the same version\n";
+ }
+ return {};
+}
+
void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
assert(!Path.empty());
@@ -50,6 +99,8 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
OpenCL = FilePath;
} else if (BaseName == "hip") {
HIP = FilePath;
+ } else if (BaseName == "asanrtl") {
+ AsanRTL = FilePath;
} else if (BaseName == "oclc_finite_only_off") {
FiniteOnly.Off = FilePath;
} else if (BaseName == "oclc_finite_only_on") {
@@ -114,13 +165,37 @@ bool RocmInstallationDetector::parseHIPVersionFile(llvm::StringRef V) {
return false;
}
-// For candidate specified by --rocm-path we do not do strict check.
-SmallVector<RocmInstallationDetector::Candidate, 4>
+/// \returns a list of candidate directories for ROCm installation, which is
+/// cached and populated only once.
+const SmallVectorImpl<RocmInstallationDetector::Candidate> &
RocmInstallationDetector::getInstallationPathCandidates() {
- SmallVector<Candidate, 4> Candidates;
+
+ // Return the cached candidate list if it has already been populated.
+ if (!ROCmSearchDirs.empty())
+ return ROCmSearchDirs;
+
+ auto DoPrintROCmSearchDirs = [&]() {
+ if (PrintROCmSearchDirs)
+ for (auto Cand : ROCmSearchDirs) {
+ llvm::errs() << "ROCm installation search path";
+ if (Cand.isSPACK())
+ llvm::errs() << " (Spack " << Cand.SPACKReleaseStr << ")";
+ llvm::errs() << ": " << Cand.Path << '\n';
+ }
+ };
+
+ // For candidate specified by --rocm-path we do not do strict check, i.e.,
+ // checking existence of HIP version file and device library files.
if (!RocmPathArg.empty()) {
- Candidates.emplace_back(RocmPathArg.str());
- return Candidates;
+ ROCmSearchDirs.emplace_back(RocmPathArg.str());
+ DoPrintROCmSearchDirs();
+ return ROCmSearchDirs;
+ } else if (const char *RocmPathEnv = ::getenv("ROCM_PATH")) {
+ if (!StringRef(RocmPathEnv).empty()) {
+ ROCmSearchDirs.emplace_back(RocmPathEnv);
+ DoPrintROCmSearchDirs();
+ return ROCmSearchDirs;
+ }
}
// Try to find relative to the compiler binary.
@@ -129,41 +204,120 @@ RocmInstallationDetector::getInstallationPathCandidates() {
// Check both a normal Unix prefix position of the clang binary, as well as
// the Windows-esque layout the ROCm packages use with the host architecture
// subdirectory of bin.
+ auto DeduceROCmPath = [](StringRef ClangPath) {
+ // Strip off directory (usually bin)
+ StringRef ParentDir = llvm::sys::path::parent_path(ClangPath);
+ StringRef ParentName = llvm::sys::path::filename(ParentDir);
+
+ // Some builds use bin/{host arch}, so go up again.
+ if (ParentName == "bin") {
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+ ParentName = llvm::sys::path::filename(ParentDir);
+ }
- // Strip off directory (usually bin)
- StringRef ParentDir = llvm::sys::path::parent_path(InstallDir);
- StringRef ParentName = llvm::sys::path::filename(ParentDir);
+ // Detect ROCm packages built with SPACK.
+ // clang is installed at
+ // <rocm_root>/llvm-amdgpu-<rocm_release_string>-<hash>/bin directory.
+ // We only consider the parent directory of llvm-amdgpu package as ROCm
+ // installation candidate for SPACK.
+ if (ParentName.startswith("llvm-amdgpu-")) {
+ auto SPACKPostfix =
+ ParentName.drop_front(strlen("llvm-amdgpu-")).split('-');
+ auto SPACKReleaseStr = SPACKPostfix.first;
+ if (!SPACKReleaseStr.empty()) {
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+ return Candidate(ParentDir.str(), /*StrictChecking=*/true,
+ SPACKReleaseStr);
+ }
+ }
- // Some builds use bin/{host arch}, so go up again.
- if (ParentName == "bin") {
- ParentDir = llvm::sys::path::parent_path(ParentDir);
- ParentName = llvm::sys::path::filename(ParentDir);
+ // Some versions of the rocm llvm package install to /opt/rocm/llvm/bin
+ // Some versions of the aomp package install to /opt/rocm/aomp/bin
+ if (ParentName == "llvm" || ParentName.startswith("aomp"))
+ ParentDir = llvm::sys::path::parent_path(ParentDir);
+
+ return Candidate(ParentDir.str(), /*StrictChecking=*/true);
+ };
+
+ // Deduce ROCm path by the path used to invoke clang. Do not resolve symbolic
+ // link of clang itself.
+ ROCmSearchDirs.emplace_back(DeduceROCmPath(InstallDir));
+
+ // Deduce ROCm path by the real path of the invoked clang, resolving symbolic
+ // link of clang itself.
+ llvm::SmallString<256> RealClangPath;
+ llvm::sys::fs::real_path(D.getClangProgramPath(), RealClangPath);
+ auto ParentPath = llvm::sys::path::parent_path(RealClangPath);
+ if (ParentPath != InstallDir)
+ ROCmSearchDirs.emplace_back(DeduceROCmPath(ParentPath));
+
+ // Device library may be installed in clang or resource directory.
+ auto ClangRoot = llvm::sys::path::parent_path(InstallDir);
+ auto RealClangRoot = llvm::sys::path::parent_path(ParentPath);
+ ROCmSearchDirs.emplace_back(ClangRoot.str(), /*StrictChecking=*/true);
+ if (RealClangRoot != ClangRoot)
+ ROCmSearchDirs.emplace_back(RealClangRoot.str(), /*StrictChecking=*/true);
+ ROCmSearchDirs.emplace_back(D.ResourceDir,
+ /*StrictChecking=*/true);
+
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/opt/rocm",
+ /*StrictChecking=*/true);
+
+ // Find the latest /opt/rocm-{release} directory.
+ std::error_code EC;
+ std::string LatestROCm;
+ llvm::VersionTuple LatestVer;
+ // Get ROCm version from ROCm directory name.
+ auto GetROCmVersion = [](StringRef DirName) {
+ llvm::VersionTuple V;
+ std::string VerStr = DirName.drop_front(strlen("rocm-")).str();
+ // The ROCm directory name follows the format of
+ // rocm-{major}.{minor}.{subMinor}[-{build}]
+ std::replace(VerStr.begin(), VerStr.end(), '-', '.');
+ V.tryParse(VerStr);
+ return V;
+ };
+ for (llvm::vfs::directory_iterator
+ File = D.getVFS().dir_begin(D.SysRoot + "/opt", EC),
+ FileEnd;
+ File != FileEnd && !EC; File.increment(EC)) {
+ llvm::StringRef FileName = llvm::sys::path::filename(File->path());
+ if (!FileName.startswith("rocm-"))
+ continue;
+ if (LatestROCm.empty()) {
+ LatestROCm = FileName.str();
+ LatestVer = GetROCmVersion(LatestROCm);
+ continue;
+ }
+ auto Ver = GetROCmVersion(FileName);
+ if (LatestVer < Ver) {
+ LatestROCm = FileName.str();
+ LatestVer = Ver;
+ }
}
+ if (!LatestROCm.empty())
+ ROCmSearchDirs.emplace_back(D.SysRoot + "/opt/" + LatestROCm,
+ /*StrictChecking=*/true);
- // Some versions of the rocm llvm package install to /opt/rocm/llvm/bin
- if (ParentName == "llvm")
- ParentDir = llvm::sys::path::parent_path(ParentDir);
-
- Candidates.emplace_back(ParentDir.str(), /*StrictChecking=*/true);
-
- // Device library may be installed in clang resource directory.
- Candidates.emplace_back(D.ResourceDir, /*StrictChecking=*/true);
-
- Candidates.emplace_back(D.SysRoot + "/opt/rocm", /*StrictChecking=*/true);
- return Candidates;
+ DoPrintROCmSearchDirs();
+ return ROCmSearchDirs;
}
RocmInstallationDetector::RocmInstallationDetector(
const Driver &D, const llvm::Triple &HostTriple,
const llvm::opt::ArgList &Args, bool DetectHIPRuntime, bool DetectDeviceLib)
: D(D) {
+ Verbose = Args.hasArg(options::OPT_v);
RocmPathArg = Args.getLastArgValue(clang::driver::options::OPT_rocm_path_EQ);
+ PrintROCmSearchDirs =
+ Args.hasArg(clang::driver::options::OPT_print_rocm_search_dirs);
RocmDeviceLibPathArg =
Args.getAllArgValues(clang::driver::options::OPT_rocm_device_lib_path_EQ);
+ HIPPathArg = Args.getLastArgValue(clang::driver::options::OPT_hip_path_EQ);
if (auto *A = Args.getLastArg(clang::driver::options::OPT_hip_version_EQ)) {
HIPVersionArg = A->getValue();
- unsigned Major = 0;
- unsigned Minor = 0;
+ unsigned Major = ~0U;
+ unsigned Minor = ~0U;
SmallVector<StringRef, 3> Parts;
HIPVersionArg.split(Parts, '.');
if (Parts.size())
@@ -174,7 +328,9 @@ RocmInstallationDetector::RocmInstallationDetector(
VersionPatch = Parts[2].str();
if (VersionPatch.empty())
VersionPatch = "0";
- if (Major == 0 || Minor == 0)
+ if (Major != ~0U && Minor == ~0U)
+ Minor = 0;
+ if (Major == ~0U || Minor == ~0U)
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << HIPVersionArg;
@@ -222,8 +378,8 @@ void RocmInstallationDetector::detectDeviceLibrary() {
// exist for each frontend project, and differ depending on which build
// system produced the packages. Standalone OpenCL builds also have a
// different directory structure from the ROCm OpenCL package.
- auto Candidates = getInstallationPathCandidates();
- for (const auto &Candidate : Candidates) {
+ auto &ROCmDirs = getInstallationPathCandidates();
+ for (const auto &Candidate : ROCmDirs) {
auto CandidatePath = Candidate.Path;
// Check device library exists at the given path.
@@ -276,13 +432,21 @@ void RocmInstallationDetector::detectDeviceLibrary() {
}
void RocmInstallationDetector::detectHIPRuntime() {
- auto Candidates = getInstallationPathCandidates();
+ SmallVector<Candidate, 4> HIPSearchDirs;
+ if (!HIPPathArg.empty())
+ HIPSearchDirs.emplace_back(HIPPathArg.str(), /*StrictChecking=*/true);
+ else
+ HIPSearchDirs.append(getInstallationPathCandidates());
auto &FS = D.getVFS();
- for (const auto &Candidate : Candidates) {
+ for (const auto &Candidate : HIPSearchDirs) {
InstallPath = Candidate.Path;
if (InstallPath.empty() || !FS.exists(InstallPath))
continue;
+ // HIP runtime built by SPACK is installed to
+ // <rocm_root>/hip-<rocm_release_string>-<hash> directory.
+ auto SPACKPath = findSPACKPackage(Candidate, "hip");
+ InstallPath = SPACKPath.empty() ? InstallPath : SPACKPath;
BinPath = InstallPath;
llvm::sys::path::append(BinPath, "bin");
@@ -413,7 +577,7 @@ AMDGPUToolChain::AMDGPUToolChain(const Driver &D, const llvm::Triple &Triple,
// and errors for the last invalid code object version options.
// It is done here to avoid repeated warning or error messages for
// each tool invocation.
- (void)getOrCheckAMDGPUCodeObjectVersion(D, Args, /*Diagnose=*/true);
+ checkAMDGPUCodeObjectVersion(D, Args);
}
Tool *AMDGPUToolChain::buildLinker() const {
@@ -488,8 +652,8 @@ llvm::DenormalMode AMDGPUToolChain::getDefaultDenormalModeForType(
auto Arch = getProcessorFromTargetID(getTriple(), JA.getOffloadingArch());
auto Kind = llvm::AMDGPU::parseArchAMDGCN(Arch);
if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
- DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero,
+ DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero,
+ options::OPT_fno_gpu_flush_denormals_to_zero,
getDefaultDenormsAreZeroForTarget(Kind)))
return llvm::DenormalMode::getPreserveSign();
@@ -547,17 +711,99 @@ AMDGPUToolChain::getGPUArch(const llvm::opt::ArgList &DriverArgs) const {
getTriple(), DriverArgs.getLastArgValue(options::OPT_mcpu_EQ));
}
-void AMDGPUToolChain::checkTargetID(
- const llvm::opt::ArgList &DriverArgs) const {
+AMDGPUToolChain::ParsedTargetIDType
+AMDGPUToolChain::getParsedTargetID(const llvm::opt::ArgList &DriverArgs) const {
StringRef TargetID = DriverArgs.getLastArgValue(options::OPT_mcpu_EQ);
if (TargetID.empty())
- return;
+ return {None, None, None};
llvm::StringMap<bool> FeatureMap;
auto OptionalGpuArch = parseTargetID(getTriple(), TargetID, &FeatureMap);
- if (!OptionalGpuArch) {
- getDriver().Diag(clang::diag::err_drv_bad_target_id) << TargetID;
+ if (!OptionalGpuArch)
+ return {TargetID.str(), None, None};
+
+ return {TargetID.str(), OptionalGpuArch.getValue().str(), FeatureMap};
+}
+
+void AMDGPUToolChain::checkTargetID(
+ const llvm::opt::ArgList &DriverArgs) const {
+ auto PTID = getParsedTargetID(DriverArgs);
+ if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
+ getDriver().Diag(clang::diag::err_drv_bad_target_id)
+ << PTID.OptionalTargetID.getValue();
+ }
+}
+
+llvm::Error
+AMDGPUToolChain::detectSystemGPUs(const ArgList &Args,
+ SmallVector<std::string, 1> &GPUArchs) const {
+ std::string Program;
+ if (Arg *A = Args.getLastArg(options::OPT_amdgpu_arch_tool_EQ))
+ Program = A->getValue();
+ else
+ Program = GetProgramPath(AMDGPU_ARCH_PROGRAM_NAME);
+ llvm::SmallString<64> OutputFile;
+ llvm::sys::fs::createTemporaryFile("print-system-gpus", "" /* No Suffix */,
+ OutputFile);
+ llvm::FileRemover OutputRemover(OutputFile.c_str());
+ llvm::Optional<llvm::StringRef> Redirects[] = {
+ {""},
+ OutputFile.str(),
+ {""},
+ };
+
+ std::string ErrorMessage;
+ if (int Result = llvm::sys::ExecuteAndWait(
+ Program.c_str(), {}, {}, Redirects, /* SecondsToWait */ 0,
+ /*MemoryLimit*/ 0, &ErrorMessage)) {
+ if (Result > 0) {
+ ErrorMessage = "Exited with error code " + std::to_string(Result);
+ } else if (Result == -1) {
+ ErrorMessage = "Execute failed: " + ErrorMessage;
+ } else {
+ ErrorMessage = "Crashed: " + ErrorMessage;
+ }
+
+ return llvm::createStringError(std::error_code(),
+ Program + ": " + ErrorMessage);
+ }
+
+ llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> OutputBuf =
+ llvm::MemoryBuffer::getFile(OutputFile.c_str());
+ if (!OutputBuf) {
+ return llvm::createStringError(OutputBuf.getError(),
+ "Failed to read stdout of " + Program +
+ ": " + OutputBuf.getError().message());
+ }
+
+ for (llvm::line_iterator LineIt(**OutputBuf); !LineIt.is_at_end(); ++LineIt) {
+ GPUArchs.push_back(LineIt->str());
}
+ return llvm::Error::success();
+}
+
+llvm::Error AMDGPUToolChain::getSystemGPUArch(const ArgList &Args,
+ std::string &GPUArch) const {
+ // detect the AMDGPU installed in system
+ SmallVector<std::string, 1> GPUArchs;
+ auto Err = detectSystemGPUs(Args, GPUArchs);
+ if (Err) {
+ return Err;
+ }
+ if (GPUArchs.empty()) {
+ return llvm::createStringError(std::error_code(),
+ "No AMD GPU detected in the system");
+ }
+ GPUArch = GPUArchs[0];
+ if (GPUArchs.size() > 1) {
+ bool AllSame = std::all_of(
+ GPUArchs.begin(), GPUArchs.end(),
+ [&](const StringRef &GPUArch) { return GPUArch == GPUArchs.front(); });
+ if (!AllSame)
+ return llvm::createStringError(
+ std::error_code(), "Multiple AMD GPUs found with different archs");
+ }
+ return llvm::Error::success();
}
void ROCMToolChain::addClangTargetOptions(
@@ -605,47 +851,40 @@ void ROCMToolChain::addClangTargetOptions(
DriverArgs.hasArg(options::OPT_cl_fp32_correctly_rounded_divide_sqrt);
// Add the OpenCL specific bitcode library.
- CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getOpenCLPath()));
+ llvm::SmallVector<std::string, 12> BCLibs;
+ BCLibs.push_back(RocmInstallation.getOpenCLPath().str());
// Add the generic set of libraries.
- RocmInstallation.addCommonBitcodeLibCC1Args(
- DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
- UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
+ BCLibs.append(RocmInstallation.getCommonBitcodeLibs(
+ DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
+ FastRelaxedMath, CorrectSqrt));
+
+ llvm::for_each(BCLibs, [&](StringRef BCFile) {
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
+ });
}
-void RocmInstallationDetector::addCommonBitcodeLibCC1Args(
- const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
- StringRef LibDeviceFile, bool Wave64, bool DAZ, bool FiniteOnly,
- bool UnsafeMathOpt, bool FastRelaxedMath, bool CorrectSqrt) const {
- static const char LinkBitcodeFlag[] = "-mlink-builtin-bitcode";
-
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(getOCMLPath()));
-
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(getOCKLPath()));
-
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(getDenormalsAreZeroPath(DAZ)));
-
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(
- getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath)));
+llvm::SmallVector<std::string, 12>
+RocmInstallationDetector::getCommonBitcodeLibs(
+ const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, bool Wave64,
+ bool DAZ, bool FiniteOnly, bool UnsafeMathOpt, bool FastRelaxedMath,
+ bool CorrectSqrt) const {
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(
- getFiniteOnlyPath(FiniteOnly || FastRelaxedMath)));
+ llvm::SmallVector<std::string, 12> BCLibs;
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(
- DriverArgs.MakeArgString(getCorrectlyRoundedSqrtPath(CorrectSqrt)));
+ auto AddBCLib = [&](StringRef BCFile) { BCLibs.push_back(BCFile.str()); };
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(getWavefrontSize64Path(Wave64)));
+ AddBCLib(getOCMLPath());
+ AddBCLib(getOCKLPath());
+ AddBCLib(getDenormalsAreZeroPath(DAZ));
+ AddBCLib(getUnsafeMathPath(UnsafeMathOpt || FastRelaxedMath));
+ AddBCLib(getFiniteOnlyPath(FiniteOnly || FastRelaxedMath));
+ AddBCLib(getCorrectlyRoundedSqrtPath(CorrectSqrt));
+ AddBCLib(getWavefrontSize64Path(Wave64));
+ AddBCLib(LibDeviceFile);
- CC1Args.push_back(LinkBitcodeFlag);
- CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
+ return BCLibs;
}
bool AMDGPUToolChain::shouldSkipArgument(const llvm::opt::Arg *A) const {
diff --git a/clang/lib/Driver/ToolChains/AMDGPU.h b/clang/lib/Driver/ToolChains/AMDGPU.h
index 55ef6e01967e..50ed3b3ded9a 100644
--- a/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -64,6 +64,13 @@ public:
bool IsIntegratedAssemblerDefault() const override { return true; }
bool IsMathErrnoDefault() const override { return false; }
+ bool useIntegratedAs() const override { return true; }
+ bool isCrossCompiling() const override { return true; }
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault() const override { return false; }
+ bool isPICDefaultForced() const override { return false; }
+ bool SupportsProfiling() const override { return false; }
+
llvm::opt::DerivedArgList *
TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
Action::OffloadKind DeviceOffloadKind) const override;
@@ -93,12 +100,32 @@ public:
/// Should skip argument.
bool shouldSkipArgument(const llvm::opt::Arg *Arg) const;
+ /// Uses amdgpu_arch tool to get arch of the system GPU. Will return error
+ /// if unable to find one.
+ llvm::Error getSystemGPUArch(const llvm::opt::ArgList &Args,
+ std::string &GPUArch) const;
+
protected:
/// Check and diagnose invalid target ID specified by -mcpu.
- void checkTargetID(const llvm::opt::ArgList &DriverArgs) const;
+ virtual void checkTargetID(const llvm::opt::ArgList &DriverArgs) const;
+
+ /// The struct type returned by getParsedTargetID.
+ struct ParsedTargetIDType {
+ Optional<std::string> OptionalTargetID;
+ Optional<std::string> OptionalGPUArch;
+ Optional<llvm::StringMap<bool>> OptionalFeatures;
+ };
+
+ /// Get target ID, GPU arch, and target ID features if the target ID is
+ /// specified and valid.
+ ParsedTargetIDType
+ getParsedTargetID(const llvm::opt::ArgList &DriverArgs) const;
/// Get GPU arch from -mcpu without checking.
StringRef getGPUArch(const llvm::opt::ArgList &DriverArgs) const;
+
+ llvm::Error detectSystemGPUs(const llvm::opt::ArgList &Args,
+ SmallVector<std::string, 1> &GPUArchs) const;
};
class LLVM_LIBRARY_VISIBILITY ROCMToolChain : public AMDGPUToolChain {
diff --git a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
new file mode 100644
index 000000000000..fe1d19c2dd67
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -0,0 +1,304 @@
+//===- AMDGPUOpenMP.cpp - AMDGPUOpenMP ToolChain Implementation -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "AMDGPUOpenMP.h"
+#include "AMDGPU.h"
+#include "CommonArgs.h"
+#include "clang/Basic/DiagnosticDriver.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/FormatAdapters.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+namespace {
+
+static const char *getOutputFileName(Compilation &C, StringRef Base,
+ const char *Postfix,
+ const char *Extension) {
+ const char *OutputFileName;
+ if (C.getDriver().isSaveTempsEnabled()) {
+ OutputFileName =
+ C.getArgs().MakeArgString(Base.str() + Postfix + "." + Extension);
+ } else {
+ std::string TmpName =
+ C.getDriver().GetTemporaryPath(Base.str() + Postfix, Extension);
+ OutputFileName = C.addTempFile(C.getArgs().MakeArgString(TmpName));
+ }
+ return OutputFileName;
+}
+
+static void addLLCOptArg(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ StringRef OOpt = "0";
+ if (A->getOption().matches(options::OPT_O4) ||
+ A->getOption().matches(options::OPT_Ofast))
+ OOpt = "3";
+ else if (A->getOption().matches(options::OPT_O0))
+ OOpt = "0";
+ else if (A->getOption().matches(options::OPT_O)) {
+ // Clang and opt support -Os/-Oz; llc only supports -O0, -O1, -O2 and -O3
+ // so we map -Os/-Oz to -O2.
+ // Only clang supports -Og, and maps it to -O1.
+ // We map anything else to -O2.
+ OOpt = llvm::StringSwitch<const char *>(A->getValue())
+ .Case("1", "1")
+ .Case("2", "2")
+ .Case("3", "3")
+ .Case("s", "2")
+ .Case("z", "2")
+ .Case("g", "1")
+ .Default("0");
+ }
+ CmdArgs.push_back(Args.MakeArgString("-O" + OOpt));
+ }
+}
+
+static bool checkSystemForAMDGPU(const ArgList &Args, const AMDGPUToolChain &TC,
+ std::string &GPUArch) {
+ if (auto Err = TC.getSystemGPUArch(Args, GPUArch)) {
+ std::string ErrMsg =
+ llvm::formatv("{0}", llvm::fmt_consume(std::move(Err)));
+ TC.getDriver().Diag(diag::err_drv_undetermined_amdgpu_arch) << ErrMsg;
+ return false;
+ }
+
+ return true;
+}
+} // namespace
+
+const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const ArgList &Args, StringRef SubArchName,
+ StringRef OutputFilePrefix) const {
+ ArgStringList CmdArgs;
+
+ for (const auto &II : Inputs)
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ // Add an intermediate output file.
+ CmdArgs.push_back("-o");
+ const char *OutputFileName =
+ getOutputFileName(C, OutputFilePrefix, "-linked", "bc");
+ CmdArgs.push_back(OutputFileName);
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Exec, CmdArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(OutputFileName))));
+ return OutputFileName;
+}
+
+const char *AMDGCN::OpenMPLinker::constructLlcCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args, llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix, const char *InputFileName,
+ bool OutputIsAsm) const {
+ // Construct llc command.
+ ArgStringList LlcArgs;
+ // The input to llc is the output from opt.
+ LlcArgs.push_back(InputFileName);
+ // Pass optimization arg to llc.
+ addLLCOptArg(Args, LlcArgs);
+ LlcArgs.push_back("-mtriple=amdgcn-amd-amdhsa");
+ LlcArgs.push_back(Args.MakeArgString("-mcpu=" + SubArchName));
+ LlcArgs.push_back(
+ Args.MakeArgString(Twine("-filetype=") + (OutputIsAsm ? "asm" : "obj")));
+
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ LlcArgs.push_back(A->getValue(0));
+ }
+
+ // Add output filename
+ LlcArgs.push_back("-o");
+ const char *LlcOutputFile =
+ getOutputFileName(C, OutputFilePrefix, "", OutputIsAsm ? "s" : "o");
+ LlcArgs.push_back(LlcOutputFile);
+ const char *Llc = Args.MakeArgString(getToolChain().GetProgramPath("llc"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Llc, LlcArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(LlcOutputFile))));
+ return LlcOutputFile;
+}
+
+void AMDGCN::OpenMPLinker::constructLldCommand(
+ Compilation &C, const JobAction &JA, const InputInfoList &Inputs,
+ const InputInfo &Output, const llvm::opt::ArgList &Args,
+ const char *InputFileName) const {
+ // Construct lld command.
+ // The output from ld.lld is an HSA code object file.
+ ArgStringList LldArgs{"-flavor", "gnu", "--no-undefined",
+ "-shared", "-o", Output.getFilename(),
+ InputFileName};
+
+ const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Lld, LldArgs, Inputs,
+ InputInfo(&JA, Args.MakeArgString(Output.getFilename()))));
+}
+
+// For amdgcn the inputs of the linker job are device bitcode and output is
+// object file. It calls llvm-link, opt, llc, then lld steps.
+void AMDGCN::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &TC = getToolChain();
+ assert(getToolChain().getTriple().isAMDGCN() && "Unsupported target");
+
+ const toolchains::AMDGPUOpenMPToolChain &AMDGPUOpenMPTC =
+ static_cast<const toolchains::AMDGPUOpenMPToolChain &>(TC);
+
+ std::string GPUArch = Args.getLastArgValue(options::OPT_march_EQ).str();
+ if (GPUArch.empty()) {
+ if (!checkSystemForAMDGPU(Args, AMDGPUOpenMPTC, GPUArch))
+ return;
+ }
+
+ // Prefix for temporary file name.
+ std::string Prefix;
+ for (const auto &II : Inputs)
+ if (II.isFilename())
+ Prefix = llvm::sys::path::stem(II.getFilename()).str() + "-" + GPUArch;
+ assert(Prefix.length() && "no linker inputs are files ");
+
+ // Each command outputs different files.
+ const char *LLVMLinkCommand =
+ constructLLVMLinkCommand(C, JA, Inputs, Args, GPUArch, Prefix);
+
+ // Produce readable assembly if save-temps is enabled.
+ if (C.getDriver().isSaveTempsEnabled())
+ constructLlcCommand(C, JA, Inputs, Args, GPUArch, Prefix, LLVMLinkCommand,
+ /*OutputIsAsm=*/true);
+ const char *LlcCommand = constructLlcCommand(C, JA, Inputs, Args, GPUArch,
+ Prefix, LLVMLinkCommand);
+ constructLldCommand(C, JA, Inputs, Output, Args, LlcCommand);
+}
+
+AMDGPUOpenMPToolChain::AMDGPUOpenMPToolChain(const Driver &D,
+ const llvm::Triple &Triple,
+ const ToolChain &HostTC,
+ const ArgList &Args)
+ : ROCMToolChain(D, Triple, Args), HostTC(HostTC) {
+ // Lookup binaries into the driver directory, this is used to
+ // discover the clang-offload-bundler executable.
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+void AMDGPUOpenMPToolChain::addClangTargetOptions(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const {
+ HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
+
+ std::string GPUArch = DriverArgs.getLastArgValue(options::OPT_march_EQ).str();
+ if (GPUArch.empty()) {
+ if (!checkSystemForAMDGPU(DriverArgs, *this, GPUArch))
+ return;
+ }
+
+ assert(DeviceOffloadingKind == Action::OFK_OpenMP &&
+ "Only OpenMP offloading kinds are supported.");
+
+ CC1Args.push_back("-target-cpu");
+ CC1Args.push_back(DriverArgs.MakeArgStringRef(GPUArch));
+ CC1Args.push_back("-fcuda-is-device");
+
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return;
+
+ std::string BitcodeSuffix;
+ if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
+ options::OPT_fno_openmp_target_new_runtime, false))
+ BitcodeSuffix = "new-amdgcn-" + GPUArch;
+ else
+ BitcodeSuffix = "amdgcn-" + GPUArch;
+
+ addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
+ getTriple());
+}
+
+llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
+ const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL =
+ HostTC.TranslateArgs(Args, BoundArch, DeviceOffloadKind);
+ if (!DAL)
+ DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ if (DeviceOffloadKind != Action::OFK_OpenMP) {
+ for (Arg *A : Args) {
+ DAL->append(A);
+ }
+ }
+
+ if (!BoundArch.empty()) {
+ DAL->eraseArg(options::OPT_march_EQ);
+ DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
+ BoundArch);
+ }
+
+ return DAL;
+}
+
+Tool *AMDGPUOpenMPToolChain::buildLinker() const {
+ assert(getTriple().isAMDGCN());
+ return new tools::AMDGCN::OpenMPLinker(*this);
+}
+
+void AMDGPUOpenMPToolChain::addClangWarningOptions(
+ ArgStringList &CC1Args) const {
+ HostTC.addClangWarningOptions(CC1Args);
+}
+
+ToolChain::CXXStdlibType
+AMDGPUOpenMPToolChain::GetCXXStdlibType(const ArgList &Args) const {
+ return HostTC.GetCXXStdlibType(Args);
+}
+
+void AMDGPUOpenMPToolChain::AddClangSystemIncludeArgs(
+ const ArgList &DriverArgs, ArgStringList &CC1Args) const {
+ HostTC.AddClangSystemIncludeArgs(DriverArgs, CC1Args);
+}
+
+void AMDGPUOpenMPToolChain::AddIAMCUIncludeArgs(const ArgList &Args,
+ ArgStringList &CC1Args) const {
+ HostTC.AddIAMCUIncludeArgs(Args, CC1Args);
+}
+
+SanitizerMask AMDGPUOpenMPToolChain::getSupportedSanitizers() const {
+ // The AMDGPUOpenMPToolChain only supports sanitizers in the sense that it
+ // allows sanitizer arguments on the command line if they are supported by the
+ // host toolchain. The AMDGPUOpenMPToolChain will actually ignore any command
+ // line arguments for any of these "supported" sanitizers. That means that no
+ // sanitization of device code is actually supported at this time.
+ //
+ // This behavior is necessary because the host and device toolchains
+ // invocations often share the command line, so the device toolchain must
+ // tolerate flags meant only for the host toolchain.
+ return HostTC.getSupportedSanitizers();
+}
+
+VersionTuple
+AMDGPUOpenMPToolChain::computeMSVCVersion(const Driver *D,
+ const ArgList &Args) const {
+ return HostTC.computeMSVCVersion(D, Args);
+}
diff --git a/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h
new file mode 100644
index 000000000000..effca7e212cc
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/AMDGPUOpenMP.h
@@ -0,0 +1,106 @@
+//===- AMDGPUOpenMP.h - AMDGPUOpenMP ToolChain Implementation -*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPUOPENMP_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPUOPENMP_H
+
+#include "AMDGPU.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+
+namespace tools {
+
+namespace AMDGCN {
+// Runs llvm-link/opt/llc/lld, which links multiple LLVM bitcode, together with
+// device library, then compiles it to ISA in a shared object.
+class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
+public:
+ OpenMPLinker(const ToolChain &TC)
+ : Tool("AMDGCN::OpenMPLinker", "amdgcn-link", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+
+private:
+ /// \return llvm-link output file name.
+ const char *constructLLVMLinkCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix) const;
+
+ /// \return llc output file name.
+ const char *constructLlcCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ llvm::StringRef SubArchName,
+ llvm::StringRef OutputFilePrefix,
+ const char *InputFileName,
+ bool OutputIsAsm = false) const;
+
+ void constructLldCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs, const InputInfo &Output,
+ const llvm::opt::ArgList &Args,
+ const char *InputFileName) const;
+};
+
+} // end namespace AMDGCN
+} // end namespace tools
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY AMDGPUOpenMPToolChain final
+ : public ROCMToolChain {
+public:
+ AMDGPUOpenMPToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ToolChain &HostTC,
+ const llvm::opt::ArgList &Args);
+
+ const llvm::Triple *getAuxTriple() const override {
+ return &HostTC.getTriple();
+ }
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ VersionTuple
+ computeMSVCVersion(const Driver *D,
+ const llvm::opt::ArgList &Args) const override;
+
+ const ToolChain &HostTC;
+
+protected:
+ Tool *buildLinker() const override;
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AMDGPUOPENMP_H
diff --git a/clang/lib/Driver/ToolChains/AVR.cpp b/clang/lib/Driver/ToolChains/AVR.cpp
index ae56b7b5249e..f147292038a8 100644
--- a/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/clang/lib/Driver/ToolChains/AVR.cpp
@@ -8,9 +8,9 @@
#include "AVR.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
@@ -32,247 +32,248 @@ const struct {
StringRef Name;
std::string SubPath;
StringRef Family;
+ unsigned DataAddr;
} MCUInfo[] = {
- {"at90s1200", "", "avr1"},
- {"attiny11", "", "avr1"},
- {"attiny12", "", "avr1"},
- {"attiny15", "", "avr1"},
- {"attiny28", "", "avr1"},
- {"at90s2313", "tiny-stack", "avr2"},
- {"at90s2323", "tiny-stack", "avr2"},
- {"at90s2333", "tiny-stack", "avr2"},
- {"at90s2343", "tiny-stack", "avr2"},
- {"at90s4433", "tiny-stack", "avr2"},
- {"attiny22", "tiny-stack", "avr2"},
- {"attiny26", "tiny-stack", "avr2"},
- {"at90s4414", "", "avr2"},
- {"at90s4434", "", "avr2"},
- {"at90s8515", "", "avr2"},
- {"at90c8534", "", "avr2"},
- {"at90s8535", "", "avr2"},
- {"attiny13", "avr25/tiny-stack", "avr25"},
- {"attiny13a", "avr25/tiny-stack", "avr25"},
- {"attiny2313", "avr25/tiny-stack", "avr25"},
- {"attiny2313a", "avr25/tiny-stack", "avr25"},
- {"attiny24", "avr25/tiny-stack", "avr25"},
- {"attiny24a", "avr25/tiny-stack", "avr25"},
- {"attiny25", "avr25/tiny-stack", "avr25"},
- {"attiny261", "avr25/tiny-stack", "avr25"},
- {"attiny261a", "avr25/tiny-stack", "avr25"},
- {"at86rf401", "avr25", "avr25"},
- {"ata5272", "avr25", "avr25"},
- {"attiny4313", "avr25", "avr25"},
- {"attiny44", "avr25", "avr25"},
- {"attiny44a", "avr25", "avr25"},
- {"attiny84", "avr25", "avr25"},
- {"attiny84a", "avr25", "avr25"},
- {"attiny45", "avr25", "avr25"},
- {"attiny85", "avr25", "avr25"},
- {"attiny441", "avr25", "avr25"},
- {"attiny461", "avr25", "avr25"},
- {"attiny461a", "avr25", "avr25"},
- {"attiny841", "avr25", "avr25"},
- {"attiny861", "avr25", "avr25"},
- {"attiny861a", "avr25", "avr25"},
- {"attiny87", "avr25", "avr25"},
- {"attiny43u", "avr25", "avr25"},
- {"attiny48", "avr25", "avr25"},
- {"attiny88", "avr25", "avr25"},
- {"attiny828", "avr25", "avr25"},
- {"at43usb355", "avr3", "avr3"},
- {"at76c711", "avr3", "avr3"},
- {"atmega103", "avr31", "avr31"},
- {"at43usb320", "avr31", "avr31"},
- {"attiny167", "avr35", "avr35"},
- {"at90usb82", "avr35", "avr35"},
- {"at90usb162", "avr35", "avr35"},
- {"ata5505", "avr35", "avr35"},
- {"atmega8u2", "avr35", "avr35"},
- {"atmega16u2", "avr35", "avr35"},
- {"atmega32u2", "avr35", "avr35"},
- {"attiny1634", "avr35", "avr35"},
- {"atmega8", "avr4", "avr4"},
- {"ata6289", "avr4", "avr4"},
- {"atmega8a", "avr4", "avr4"},
- {"ata6285", "avr4", "avr4"},
- {"ata6286", "avr4", "avr4"},
- {"atmega48", "avr4", "avr4"},
- {"atmega48a", "avr4", "avr4"},
- {"atmega48pa", "avr4", "avr4"},
- {"atmega48pb", "avr4", "avr4"},
- {"atmega48p", "avr4", "avr4"},
- {"atmega88", "avr4", "avr4"},
- {"atmega88a", "avr4", "avr4"},
- {"atmega88p", "avr4", "avr4"},
- {"atmega88pa", "avr4", "avr4"},
- {"atmega88pb", "avr4", "avr4"},
- {"atmega8515", "avr4", "avr4"},
- {"atmega8535", "avr4", "avr4"},
- {"atmega8hva", "avr4", "avr4"},
- {"at90pwm1", "avr4", "avr4"},
- {"at90pwm2", "avr4", "avr4"},
- {"at90pwm2b", "avr4", "avr4"},
- {"at90pwm3", "avr4", "avr4"},
- {"at90pwm3b", "avr4", "avr4"},
- {"at90pwm81", "avr4", "avr4"},
- {"ata5790", "avr5", "avr5"},
- {"ata5795", "avr5", "avr5"},
- {"atmega16", "avr5", "avr5"},
- {"atmega16a", "avr5", "avr5"},
- {"atmega161", "avr5", "avr5"},
- {"atmega162", "avr5", "avr5"},
- {"atmega163", "avr5", "avr5"},
- {"atmega164a", "avr5", "avr5"},
- {"atmega164p", "avr5", "avr5"},
- {"atmega164pa", "avr5", "avr5"},
- {"atmega165", "avr5", "avr5"},
- {"atmega165a", "avr5", "avr5"},
- {"atmega165p", "avr5", "avr5"},
- {"atmega165pa", "avr5", "avr5"},
- {"atmega168", "avr5", "avr5"},
- {"atmega168a", "avr5", "avr5"},
- {"atmega168p", "avr5", "avr5"},
- {"atmega168pa", "avr5", "avr5"},
- {"atmega168pb", "avr5", "avr5"},
- {"atmega169", "avr5", "avr5"},
- {"atmega169a", "avr5", "avr5"},
- {"atmega169p", "avr5", "avr5"},
- {"atmega169pa", "avr5", "avr5"},
- {"atmega32", "avr5", "avr5"},
- {"atmega32a", "avr5", "avr5"},
- {"atmega323", "avr5", "avr5"},
- {"atmega324a", "avr5", "avr5"},
- {"atmega324p", "avr5", "avr5"},
- {"atmega324pa", "avr5", "avr5"},
- {"atmega325", "avr5", "avr5"},
- {"atmega325a", "avr5", "avr5"},
- {"atmega325p", "avr5", "avr5"},
- {"atmega325pa", "avr5", "avr5"},
- {"atmega3250", "avr5", "avr5"},
- {"atmega3250a", "avr5", "avr5"},
- {"atmega3250p", "avr5", "avr5"},
- {"atmega3250pa", "avr5", "avr5"},
- {"atmega328", "avr5", "avr5"},
- {"atmega328p", "avr5", "avr5"},
- {"atmega329", "avr5", "avr5"},
- {"atmega329a", "avr5", "avr5"},
- {"atmega329p", "avr5", "avr5"},
- {"atmega329pa", "avr5", "avr5"},
- {"atmega3290", "avr5", "avr5"},
- {"atmega3290a", "avr5", "avr5"},
- {"atmega3290p", "avr5", "avr5"},
- {"atmega3290pa", "avr5", "avr5"},
- {"atmega406", "avr5", "avr5"},
- {"atmega64", "avr5", "avr5"},
- {"atmega64a", "avr5", "avr5"},
- {"atmega640", "avr5", "avr5"},
- {"atmega644", "avr5", "avr5"},
- {"atmega644a", "avr5", "avr5"},
- {"atmega644p", "avr5", "avr5"},
- {"atmega644pa", "avr5", "avr5"},
- {"atmega645", "avr5", "avr5"},
- {"atmega645a", "avr5", "avr5"},
- {"atmega645p", "avr5", "avr5"},
- {"atmega649", "avr5", "avr5"},
- {"atmega649a", "avr5", "avr5"},
- {"atmega649p", "avr5", "avr5"},
- {"atmega6450", "avr5", "avr5"},
- {"atmega6450a", "avr5", "avr5"},
- {"atmega6450p", "avr5", "avr5"},
- {"atmega6490", "avr5", "avr5"},
- {"atmega6490a", "avr5", "avr5"},
- {"atmega6490p", "avr5", "avr5"},
- {"atmega64rfr2", "avr5", "avr5"},
- {"atmega644rfr2", "avr5", "avr5"},
- {"atmega16hva", "avr5", "avr5"},
- {"atmega16hva2", "avr5", "avr5"},
- {"atmega16hvb", "avr5", "avr5"},
- {"atmega16hvbrevb", "avr5", "avr5"},
- {"atmega32hvb", "avr5", "avr5"},
- {"atmega32hvbrevb", "avr5", "avr5"},
- {"atmega64hve", "avr5", "avr5"},
- {"at90can32", "avr5", "avr5"},
- {"at90can64", "avr5", "avr5"},
- {"at90pwm161", "avr5", "avr5"},
- {"at90pwm216", "avr5", "avr5"},
- {"at90pwm316", "avr5", "avr5"},
- {"atmega32c1", "avr5", "avr5"},
- {"atmega64c1", "avr5", "avr5"},
- {"atmega16m1", "avr5", "avr5"},
- {"atmega32m1", "avr5", "avr5"},
- {"atmega64m1", "avr5", "avr5"},
- {"atmega16u4", "avr5", "avr5"},
- {"atmega32u4", "avr5", "avr5"},
- {"atmega32u6", "avr5", "avr5"},
- {"at90usb646", "avr5", "avr5"},
- {"at90usb647", "avr5", "avr5"},
- {"at90scr100", "avr5", "avr5"},
- {"at94k", "avr5", "avr5"},
- {"m3000", "avr5", "avr5"},
- {"atmega128", "avr51", "avr51"},
- {"atmega128a", "avr51", "avr51"},
- {"atmega1280", "avr51", "avr51"},
- {"atmega1281", "avr51", "avr51"},
- {"atmega1284", "avr51", "avr51"},
- {"atmega1284p", "avr51", "avr51"},
- {"atmega128rfa1", "avr51", "avr51"},
- {"atmega128rfr2", "avr51", "avr51"},
- {"atmega1284rfr2", "avr51", "avr51"},
- {"at90can128", "avr51", "avr51"},
- {"at90usb1286", "avr51", "avr51"},
- {"at90usb1287", "avr51", "avr51"},
- {"atmega2560", "avr6", "avr6"},
- {"atmega2561", "avr6", "avr6"},
- {"atmega256rfr2", "avr6", "avr6"},
- {"atmega2564rfr2", "avr6", "avr6"},
- {"attiny4", "avrtiny", "avrtiny"},
- {"attiny5", "avrtiny", "avrtiny"},
- {"attiny9", "avrtiny", "avrtiny"},
- {"attiny10", "avrtiny", "avrtiny"},
- {"attiny20", "avrtiny", "avrtiny"},
- {"attiny40", "avrtiny", "avrtiny"},
- {"atxmega16a4", "avrxmega2", "avrxmega2"},
- {"atxmega16a4u", "avrxmega2", "avrxmega2"},
- {"atxmega16c4", "avrxmega2", "avrxmega2"},
- {"atxmega16d4", "avrxmega2", "avrxmega2"},
- {"atxmega32a4", "avrxmega2", "avrxmega2"},
- {"atxmega32a4u", "avrxmega2", "avrxmega2"},
- {"atxmega32c4", "avrxmega2", "avrxmega2"},
- {"atxmega32d4", "avrxmega2", "avrxmega2"},
- {"atxmega32e5", "avrxmega2", "avrxmega2"},
- {"atxmega16e5", "avrxmega2", "avrxmega2"},
- {"atxmega8e5", "avrxmega2", "avrxmega2"},
- {"atxmega64a3u", "avrxmega4", "avrxmega4"},
- {"atxmega64a4u", "avrxmega4", "avrxmega4"},
- {"atxmega64b1", "avrxmega4", "avrxmega4"},
- {"atxmega64b3", "avrxmega4", "avrxmega4"},
- {"atxmega64c3", "avrxmega4", "avrxmega4"},
- {"atxmega64d3", "avrxmega4", "avrxmega4"},
- {"atxmega64d4", "avrxmega4", "avrxmega4"},
- {"atxmega64a1", "avrxmega5", "avrxmega5"},
- {"atxmega64a1u", "avrxmega5", "avrxmega5"},
- {"atxmega128a3", "avrxmega6", "avrxmega6"},
- {"atxmega128a3u", "avrxmega6", "avrxmega6"},
- {"atxmega128b1", "avrxmega6", "avrxmega6"},
- {"atxmega128b3", "avrxmega6", "avrxmega6"},
- {"atxmega128c3", "avrxmega6", "avrxmega6"},
- {"atxmega128d3", "avrxmega6", "avrxmega6"},
- {"atxmega128d4", "avrxmega6", "avrxmega6"},
- {"atxmega192a3", "avrxmega6", "avrxmega6"},
- {"atxmega192a3u", "avrxmega6", "avrxmega6"},
- {"atxmega192c3", "avrxmega6", "avrxmega6"},
- {"atxmega192d3", "avrxmega6", "avrxmega6"},
- {"atxmega256a3", "avrxmega6", "avrxmega6"},
- {"atxmega256a3u", "avrxmega6", "avrxmega6"},
- {"atxmega256a3b", "avrxmega6", "avrxmega6"},
- {"atxmega256a3bu", "avrxmega6", "avrxmega6"},
- {"atxmega256c3", "avrxmega6", "avrxmega6"},
- {"atxmega256d3", "avrxmega6", "avrxmega6"},
- {"atxmega384c3", "avrxmega6", "avrxmega6"},
- {"atxmega384d3", "avrxmega6", "avrxmega6"},
- {"atxmega128a1", "avrxmega7", "avrxmega7"},
- {"atxmega128a1u", "avrxmega7", "avrxmega7"},
- {"atxmega128a4u", "avrxmega7", "avrxmega7"},
+ {"at90s1200", "", "avr1", 0},
+ {"attiny11", "", "avr1", 0},
+ {"attiny12", "", "avr1", 0},
+ {"attiny15", "", "avr1", 0},
+ {"attiny28", "", "avr1", 0},
+ {"at90s2313", "tiny-stack", "avr2", 0x800060},
+ {"at90s2323", "tiny-stack", "avr2", 0x800060},
+ {"at90s2333", "tiny-stack", "avr2", 0x800060},
+ {"at90s2343", "tiny-stack", "avr2", 0x800060},
+ {"at90s4433", "tiny-stack", "avr2", 0x800060},
+ {"attiny22", "tiny-stack", "avr2", 0x800060},
+ {"attiny26", "tiny-stack", "avr2", 0x800060},
+ {"at90s4414", "", "avr2", 0x800060},
+ {"at90s4434", "", "avr2", 0x800060},
+ {"at90s8515", "", "avr2", 0x800060},
+ {"at90c8534", "", "avr2", 0x800060},
+ {"at90s8535", "", "avr2", 0x800060},
+ {"attiny13", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny13a", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny2313", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny2313a", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny24", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny24a", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny25", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny261", "avr25/tiny-stack", "avr25", 0x800060},
+ {"attiny261a", "avr25/tiny-stack", "avr25", 0x800060},
+ {"at86rf401", "avr25", "avr25", 0x800060},
+ {"ata5272", "avr25", "avr25", 0x800100},
+ {"attiny4313", "avr25", "avr25", 0x800060},
+ {"attiny44", "avr25", "avr25", 0x800060},
+ {"attiny44a", "avr25", "avr25", 0x800060},
+ {"attiny84", "avr25", "avr25", 0x800060},
+ {"attiny84a", "avr25", "avr25", 0x800060},
+ {"attiny45", "avr25", "avr25", 0x800060},
+ {"attiny85", "avr25", "avr25", 0x800060},
+ {"attiny441", "avr25", "avr25", 0x800100},
+ {"attiny461", "avr25", "avr25", 0x800060},
+ {"attiny461a", "avr25", "avr25", 0x800060},
+ {"attiny841", "avr25", "avr25", 0x800100},
+ {"attiny861", "avr25", "avr25", 0x800060},
+ {"attiny861a", "avr25", "avr25", 0x800060},
+ {"attiny87", "avr25", "avr25", 0x800100},
+ {"attiny43u", "avr25", "avr25", 0x800060},
+ {"attiny48", "avr25", "avr25", 0x800100},
+ {"attiny88", "avr25", "avr25", 0x800100},
+ {"attiny828", "avr25", "avr25", 0x800100},
+ {"at43usb355", "avr3", "avr3", 0x800100},
+ {"at76c711", "avr3", "avr3", 0x800060},
+ {"atmega103", "avr31", "avr31", 0x800060},
+ {"at43usb320", "avr31", "avr31", 0x800060},
+ {"attiny167", "avr35", "avr35", 0x800100},
+ {"at90usb82", "avr35", "avr35", 0x800100},
+ {"at90usb162", "avr35", "avr35", 0x800100},
+ {"ata5505", "avr35", "avr35", 0x800100},
+ {"atmega8u2", "avr35", "avr35", 0x800100},
+ {"atmega16u2", "avr35", "avr35", 0x800100},
+ {"atmega32u2", "avr35", "avr35", 0x800100},
+ {"attiny1634", "avr35", "avr35", 0x800100},
+ {"atmega8", "avr4", "avr4", 0x800060},
+ {"ata6289", "avr4", "avr4", 0x800100},
+ {"atmega8a", "avr4", "avr4", 0x800060},
+ {"ata6285", "avr4", "avr4", 0x800100},
+ {"ata6286", "avr4", "avr4", 0x800100},
+ {"atmega48", "avr4", "avr4", 0x800100},
+ {"atmega48a", "avr4", "avr4", 0x800100},
+ {"atmega48pa", "avr4", "avr4", 0x800100},
+ {"atmega48pb", "avr4", "avr4", 0x800100},
+ {"atmega48p", "avr4", "avr4", 0x800100},
+ {"atmega88", "avr4", "avr4", 0x800100},
+ {"atmega88a", "avr4", "avr4", 0x800100},
+ {"atmega88p", "avr4", "avr4", 0x800100},
+ {"atmega88pa", "avr4", "avr4", 0x800100},
+ {"atmega88pb", "avr4", "avr4", 0x800100},
+ {"atmega8515", "avr4", "avr4", 0x800060},
+ {"atmega8535", "avr4", "avr4", 0x800060},
+ {"atmega8hva", "avr4", "avr4", 0x800100},
+ {"at90pwm1", "avr4", "avr4", 0x800100},
+ {"at90pwm2", "avr4", "avr4", 0x800100},
+ {"at90pwm2b", "avr4", "avr4", 0x800100},
+ {"at90pwm3", "avr4", "avr4", 0x800100},
+ {"at90pwm3b", "avr4", "avr4", 0x800100},
+ {"at90pwm81", "avr4", "avr4", 0x800100},
+ {"ata5790", "avr5", "avr5", 0x800100},
+ {"ata5795", "avr5", "avr5", 0x800100},
+ {"atmega16", "avr5", "avr5", 0x800060},
+ {"atmega16a", "avr5", "avr5", 0x800060},
+ {"atmega161", "avr5", "avr5", 0x800060},
+ {"atmega162", "avr5", "avr5", 0x800100},
+ {"atmega163", "avr5", "avr5", 0x800060},
+ {"atmega164a", "avr5", "avr5", 0x800100},
+ {"atmega164p", "avr5", "avr5", 0x800100},
+ {"atmega164pa", "avr5", "avr5", 0x800100},
+ {"atmega165", "avr5", "avr5", 0x800100},
+ {"atmega165a", "avr5", "avr5", 0x800100},
+ {"atmega165p", "avr5", "avr5", 0x800100},
+ {"atmega165pa", "avr5", "avr5", 0x800100},
+ {"atmega168", "avr5", "avr5", 0x800100},
+ {"atmega168a", "avr5", "avr5", 0x800100},
+ {"atmega168p", "avr5", "avr5", 0x800100},
+ {"atmega168pa", "avr5", "avr5", 0x800100},
+ {"atmega168pb", "avr5", "avr5", 0x800100},
+ {"atmega169", "avr5", "avr5", 0x800100},
+ {"atmega169a", "avr5", "avr5", 0x800100},
+ {"atmega169p", "avr5", "avr5", 0x800100},
+ {"atmega169pa", "avr5", "avr5", 0x800100},
+ {"atmega32", "avr5", "avr5", 0x800060},
+ {"atmega32a", "avr5", "avr5", 0x800060},
+ {"atmega323", "avr5", "avr5", 0x800060},
+ {"atmega324a", "avr5", "avr5", 0x800100},
+ {"atmega324p", "avr5", "avr5", 0x800100},
+ {"atmega324pa", "avr5", "avr5", 0x800100},
+ {"atmega325", "avr5", "avr5", 0x800100},
+ {"atmega325a", "avr5", "avr5", 0x800100},
+ {"atmega325p", "avr5", "avr5", 0x800100},
+ {"atmega325pa", "avr5", "avr5", 0x800100},
+ {"atmega3250", "avr5", "avr5", 0x800100},
+ {"atmega3250a", "avr5", "avr5", 0x800100},
+ {"atmega3250p", "avr5", "avr5", 0x800100},
+ {"atmega3250pa", "avr5", "avr5", 0x800100},
+ {"atmega328", "avr5", "avr5", 0x800100},
+ {"atmega328p", "avr5", "avr5", 0x800100},
+ {"atmega329", "avr5", "avr5", 0x800100},
+ {"atmega329a", "avr5", "avr5", 0x800100},
+ {"atmega329p", "avr5", "avr5", 0x800100},
+ {"atmega329pa", "avr5", "avr5", 0x800100},
+ {"atmega3290", "avr5", "avr5", 0x800100},
+ {"atmega3290a", "avr5", "avr5", 0x800100},
+ {"atmega3290p", "avr5", "avr5", 0x800100},
+ {"atmega3290pa", "avr5", "avr5", 0x800100},
+ {"atmega406", "avr5", "avr5", 0x800100},
+ {"atmega64", "avr5", "avr5", 0x800100},
+ {"atmega64a", "avr5", "avr5", 0x800100},
+ {"atmega640", "avr5", "avr5", 0x800200},
+ {"atmega644", "avr5", "avr5", 0x800100},
+ {"atmega644a", "avr5", "avr5", 0x800100},
+ {"atmega644p", "avr5", "avr5", 0x800100},
+ {"atmega644pa", "avr5", "avr5", 0x800100},
+ {"atmega645", "avr5", "avr5", 0x800100},
+ {"atmega645a", "avr5", "avr5", 0x800100},
+ {"atmega645p", "avr5", "avr5", 0x800100},
+ {"atmega649", "avr5", "avr5", 0x800100},
+ {"atmega649a", "avr5", "avr5", 0x800100},
+ {"atmega649p", "avr5", "avr5", 0x800100},
+ {"atmega6450", "avr5", "avr5", 0x800100},
+ {"atmega6450a", "avr5", "avr5", 0x800100},
+ {"atmega6450p", "avr5", "avr5", 0x800100},
+ {"atmega6490", "avr5", "avr5", 0x800100},
+ {"atmega6490a", "avr5", "avr5", 0x800100},
+ {"atmega6490p", "avr5", "avr5", 0x800100},
+ {"atmega64rfr2", "avr5", "avr5", 0x800200},
+ {"atmega644rfr2", "avr5", "avr5", 0x800200},
+ {"atmega16hva", "avr5", "avr5", 0x800100},
+ {"atmega16hva2", "avr5", "avr5", 0x800100},
+ {"atmega16hvb", "avr5", "avr5", 0x800100},
+ {"atmega16hvbrevb", "avr5", "avr5", 0x800100},
+ {"atmega32hvb", "avr5", "avr5", 0x800100},
+ {"atmega32hvbrevb", "avr5", "avr5", 0x800100},
+ {"atmega64hve", "avr5", "avr5", 0x800100},
+ {"at90can32", "avr5", "avr5", 0x800100},
+ {"at90can64", "avr5", "avr5", 0x800100},
+ {"at90pwm161", "avr5", "avr5", 0x800100},
+ {"at90pwm216", "avr5", "avr5", 0x800100},
+ {"at90pwm316", "avr5", "avr5", 0x800100},
+ {"atmega32c1", "avr5", "avr5", 0x800100},
+ {"atmega64c1", "avr5", "avr5", 0x800100},
+ {"atmega16m1", "avr5", "avr5", 0x800100},
+ {"atmega32m1", "avr5", "avr5", 0x800100},
+ {"atmega64m1", "avr5", "avr5", 0x800100},
+ {"atmega16u4", "avr5", "avr5", 0x800100},
+ {"atmega32u4", "avr5", "avr5", 0x800100},
+ {"atmega32u6", "avr5", "avr5", 0x800100},
+ {"at90usb646", "avr5", "avr5", 0x800100},
+ {"at90usb647", "avr5", "avr5", 0x800100},
+ {"at90scr100", "avr5", "avr5", 0x800100},
+ {"at94k", "avr5", "avr5", 0x800060},
+ {"m3000", "avr5", "avr5", 0x800060},
+ {"atmega128", "avr51", "avr51", 0x800100},
+ {"atmega128a", "avr51", "avr51", 0x800100},
+ {"atmega1280", "avr51", "avr51", 0x800200},
+ {"atmega1281", "avr51", "avr51", 0x800200},
+ {"atmega1284", "avr51", "avr51", 0x800100},
+ {"atmega1284p", "avr51", "avr51", 0x800100},
+ {"atmega128rfa1", "avr51", "avr51", 0x800200},
+ {"atmega128rfr2", "avr51", "avr51", 0x800200},
+ {"atmega1284rfr2", "avr51", "avr51", 0x800200},
+ {"at90can128", "avr51", "avr51", 0x800200},
+ {"at90usb1286", "avr51", "avr51", 0x800200},
+ {"at90usb1287", "avr51", "avr51", 0x800200},
+ {"atmega2560", "avr6", "avr6", 0x800200},
+ {"atmega2561", "avr6", "avr6", 0x800200},
+ {"atmega256rfr2", "avr6", "avr6", 0x800200},
+ {"atmega2564rfr2", "avr6", "avr6", 0x800200},
+ {"attiny4", "avrtiny", "avrtiny", 0x800040},
+ {"attiny5", "avrtiny", "avrtiny", 0x800040},
+ {"attiny9", "avrtiny", "avrtiny", 0x800040},
+ {"attiny10", "avrtiny", "avrtiny", 0x800040},
+ {"attiny20", "avrtiny", "avrtiny", 0x800040},
+ {"attiny40", "avrtiny", "avrtiny", 0x800040},
+ {"atxmega16a4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega16a4u", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega16c4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega16d4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32a4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32a4u", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32c4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32d4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32e5", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega16e5", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega8e5", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega64a3u", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64a4u", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64b1", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64b3", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64c3", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64d3", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64d4", "avrxmega4", "avrxmega4", 0x802000},
+ {"atxmega64a1", "avrxmega5", "avrxmega5", 0x802000},
+ {"atxmega64a1u", "avrxmega5", "avrxmega5", 0x802000},
+ {"atxmega128a3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128a3u", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128b1", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128b3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128c3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128d3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128d4", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega192a3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega192a3u", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega192c3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega192d3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256a3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256a3u", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256a3b", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256a3bu", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256c3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega256d3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega384c3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega384d3", "avrxmega6", "avrxmega6", 0x802000},
+ {"atxmega128a1", "avrxmega7", "avrxmega7", 0x802000},
+ {"atxmega128a1u", "avrxmega7", "avrxmega7", 0x802000},
+ {"atxmega128a4u", "avrxmega7", "avrxmega7", 0x802000},
};
std::string GetMCUSubPath(StringRef MCUName) {
@@ -289,11 +290,11 @@ llvm::Optional<StringRef> GetMCUFamilyName(StringRef MCUName) {
return Optional<StringRef>();
}
-llvm::Optional<unsigned> GetMCUSectionAddressData(StringRef MCU) {
- return llvm::StringSwitch<llvm::Optional<unsigned>>(MCU)
- .Case("atmega328", Optional<unsigned>(0x800100))
- .Case("atmega328p", Optional<unsigned>(0x800100))
- .Default(Optional<unsigned>());
+llvm::Optional<unsigned> GetMCUSectionAddressData(StringRef MCUName) {
+ for (const auto &MCU : MCUInfo)
+ if (MCU.Name == MCUName && MCU.DataAddr > 0)
+ return Optional<unsigned>(MCU.DataAddr);
+ return Optional<unsigned>();
}
const StringRef PossibleAVRLibcLocations[] = {
@@ -334,10 +335,12 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
// No avr-libc found and so no runtime linked.
D.Diag(diag::warn_drv_avr_libc_not_found);
} else { // We have enough information to link stdlibs
- std::string GCCRoot = std::string(GCCInstallation.getInstallPath());
+ std::string GCCRoot(GCCInstallation.getInstallPath());
+ std::string GCCParentPath(GCCInstallation.getParentLibPath());
std::string LibcRoot = AVRLibcRoot.getValue();
std::string SubPath = GetMCUSubPath(CPU);
+ getProgramPaths().push_back(GCCParentPath + "/../bin");
getFilePaths().push_back(LibcRoot + std::string("/lib/") + SubPath);
getFilePaths().push_back(GCCRoot + std::string("/") + SubPath);
@@ -350,6 +353,23 @@ AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
}
}
+void AVRToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc) ||
+ DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Omit if there is no avr-libc installed.
+ Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
+ if (!AVRLibcRoot.hasValue())
+ return;
+
+ // Add 'avr-libc/include' to clang system include paths if applicable.
+ std::string AVRInc = AVRLibcRoot.getValue() + "/include";
+ if (llvm::sys::fs::is_directory(AVRInc))
+ addSystemInclude(DriverArgs, CC1Args, AVRInc);
+}
+
Tool *AVRToolChain::buildLinker() const {
return new tools::AVR::Linker(getTriple(), *this, LinkStdlib);
}
@@ -419,9 +439,10 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
llvm::Optional<std::string> AVRToolChain::findAVRLibcInstallation() const {
for (StringRef PossiblePath : PossibleAVRLibcLocations) {
+ std::string Path = getDriver().SysRoot + PossiblePath.str();
// Return the first avr-libc installation that exists.
- if (llvm::sys::fs::is_directory(PossiblePath))
- return Optional<std::string>(std::string(PossiblePath));
+ if (llvm::sys::fs::is_directory(Path))
+ return Optional<std::string>(Path);
}
return llvm::None;
diff --git a/clang/lib/Driver/ToolChains/AVR.h b/clang/lib/Driver/ToolChains/AVR.h
index a3198b249580..f612aa691182 100644
--- a/clang/lib/Driver/ToolChains/AVR.h
+++ b/clang/lib/Driver/ToolChains/AVR.h
@@ -10,7 +10,7 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_AVR_H
#include "Gnu.h"
-#include "InputInfo.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Tool.h"
@@ -22,6 +22,9 @@ class LLVM_LIBRARY_VISIBILITY AVRToolChain : public Generic_ELF {
public:
AVRToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
protected:
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/Ananas.cpp b/clang/lib/Driver/ToolChains/Ananas.cpp
index e5e33fe24874..be1476a7636c 100644
--- a/clang/lib/Driver/ToolChains/Ananas.cpp
+++ b/clang/lib/Driver/ToolChains/Ananas.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "Ananas.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Option/ArgList.h"
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index a5e632fd8cdb..ed8c7e94b013 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -41,19 +41,19 @@ std::string aarch64::getAArch64TargetCPU(const ArgList &Args,
if (CPU == "native")
return std::string(llvm::sys::getHostCPUName());
- // arm64e requires v8.3a and only runs on apple-a12 and later CPUs.
- if (Triple.isArm64e())
- return "apple-a12";
-
if (CPU.size())
return CPU;
if (Triple.isTargetMachineMac() &&
Triple.getArch() == llvm::Triple::aarch64) {
- // Apple Silicon macs default to A12 CPUs.
- return "apple-a12";
+ // Apple Silicon macs default to M1 CPUs.
+ return "apple-m1";
}
+ // arm64e requires v8.3a and only runs on apple-a12 and later CPUs.
+ if (Triple.isArm64e())
+ return "apple-a12";
+
// Make sure we pick the appropriate Apple CPU if -arch is used or when
// targetting a Darwin OS.
if (Args.getLastArg(options::OPT_arch) || Triple.isOSDarwin())
@@ -185,12 +185,25 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
void aarch64::getAArch64TargetFeatures(const Driver &D,
const llvm::Triple &Triple,
const ArgList &Args,
- std::vector<StringRef> &Features) {
+ std::vector<StringRef> &Features,
+ bool ForAS) {
Arg *A;
bool success = true;
// Enable NEON by default.
Features.push_back("+neon");
- if ((A = Args.getLastArg(options::OPT_march_EQ)))
+ llvm::StringRef WaMArch = "";
+ if (ForAS)
+ for (const auto *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler))
+ for (StringRef Value : A->getValues())
+ if (Value.startswith("-march="))
+ WaMArch = Value.substr(7);
+ // Call getAArch64ArchFeaturesFromMarch only if "-Wa,-march=" or
+ // "-Xassembler -march" is detected. Otherwise it may return false
+ // and causes Clang to error out.
+ if (WaMArch.size())
+ success = getAArch64ArchFeaturesFromMarch(D, WaMArch, Args, Features);
+ else if ((A = Args.getLastArg(options::OPT_march_EQ)))
success = getAArch64ArchFeaturesFromMarch(D, A->getValue(), Args, Features);
else if ((A = Args.getLastArg(options::OPT_mcpu_EQ)))
success = getAArch64ArchFeaturesFromMcpu(D, A->getValue(), Args, Features);
@@ -235,11 +248,17 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
StringRef Scope = A->getValue();
bool EnableRetBr = false;
bool EnableBlr = false;
- if (Scope != "none" && Scope != "all") {
+ bool DisableComdat = false;
+ if (Scope != "none") {
SmallVector<StringRef, 4> Opts;
Scope.split(Opts, ",");
for (auto Opt : Opts) {
Opt = Opt.trim();
+ if (Opt == "all") {
+ EnableBlr = true;
+ EnableRetBr = true;
+ continue;
+ }
if (Opt == "retbr") {
EnableRetBr = true;
continue;
@@ -248,19 +267,27 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
EnableBlr = true;
continue;
}
+ if (Opt == "comdat") {
+ DisableComdat = false;
+ continue;
+ }
+ if (Opt == "nocomdat") {
+ DisableComdat = true;
+ continue;
+ }
D.Diag(diag::err_invalid_sls_hardening)
<< Scope << A->getAsString(Args);
break;
}
- } else if (Scope == "all") {
- EnableRetBr = true;
- EnableBlr = true;
}
if (EnableRetBr)
Features.push_back("+harden-sls-retbr");
if (EnableBlr)
Features.push_back("+harden-sls-blr");
+ if (DisableComdat) {
+ Features.push_back("+harden-sls-nocomdat");
+ }
}
// En/disable crc
diff --git a/clang/lib/Driver/ToolChains/Arch/AArch64.h b/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 713af870d69f..d47c402d4a42 100644
--- a/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -22,7 +22,8 @@ namespace aarch64 {
void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
- std::vector<llvm::StringRef> &Features);
+ std::vector<llvm::StringRef> &Features,
+ bool ForAS);
std::string getAArch64TargetCPU(const llvm::opt::ArgList &Args,
const llvm::Triple &Triple, llvm::opt::Arg *&A);
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index ef590db1eecd..4ab547fabe43 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -50,11 +50,14 @@ void arm::getARMArchCPUFromArgs(const ArgList &Args, llvm::StringRef &Arch,
for (const Arg *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
- StringRef Value = A->getValue();
- if (Value.startswith("-mcpu="))
- CPU = Value.substr(6);
- if (Value.startswith("-march="))
- Arch = Value.substr(7);
+ // Use getValues because -Wa can have multiple arguments
+ // e.g. -Wa,-mcpu=foo,-mcpu=bar
+ for (StringRef Value : A->getValues()) {
+ if (Value.startswith("-mcpu="))
+ CPU = Value.substr(6);
+ if (Value.startswith("-march="))
+ Arch = Value.substr(7);
+ }
}
}
@@ -163,6 +166,132 @@ arm::ReadTPMode arm::getReadTPMode(const Driver &D, const ArgList &Args) {
return ReadTPMode::Soft;
}
+void arm::setArchNameInTriple(const Driver &D, const ArgList &Args,
+ types::ID InputType, llvm::Triple &Triple) {
+ StringRef MCPU, MArch;
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ MCPU = A->getValue();
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ MArch = A->getValue();
+
+ std::string CPU = Triple.isOSBinFormatMachO()
+ ? tools::arm::getARMCPUForMArch(MArch, Triple).str()
+ : tools::arm::getARMTargetCPU(MCPU, MArch, Triple);
+ StringRef Suffix = tools::arm::getLLVMArchSuffixForARM(CPU, MArch, Triple);
+
+ bool IsBigEndian = Triple.getArch() == llvm::Triple::armeb ||
+ Triple.getArch() == llvm::Triple::thumbeb;
+ // Handle pseudo-target flags '-mlittle-endian'/'-EL' and
+ // '-mbig-endian'/'-EB'.
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian)) {
+ IsBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ }
+ std::string ArchName = IsBigEndian ? "armeb" : "arm";
+
+ // FIXME: Thumb should just be another -target-feaure, not in the triple.
+ bool IsMProfile =
+ llvm::ARM::parseArchProfile(Suffix) == llvm::ARM::ProfileKind::M;
+ bool ThumbDefault = IsMProfile ||
+ // Thumb2 is the default for V7 on Darwin.
+ (llvm::ARM::parseArchVersion(Suffix) == 7 &&
+ Triple.isOSBinFormatMachO()) ||
+ // FIXME: this is invalid for WindowsCE
+ Triple.isOSWindows();
+
+ // Check if ARM ISA was explicitly selected (using -mno-thumb or -marm) for
+ // M-Class CPUs/architecture variants, which is not supported.
+ bool ARMModeRequested =
+ !Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, ThumbDefault);
+ if (IsMProfile && ARMModeRequested) {
+ if (MCPU.size())
+ D.Diag(diag::err_cpu_unsupported_isa) << CPU << "ARM";
+ else
+ D.Diag(diag::err_arch_unsupported_isa)
+ << tools::arm::getARMArch(MArch, Triple) << "ARM";
+ }
+
+ // Check to see if an explicit choice to use thumb has been made via
+ // -mthumb. For assembler files we must check for -mthumb in the options
+ // passed to the assembler via -Wa or -Xassembler.
+ bool IsThumb = false;
+ if (InputType != types::TY_PP_Asm)
+ IsThumb =
+ Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, ThumbDefault);
+ else {
+ // Ideally we would check for these flags in
+ // CollectArgsForIntegratedAssembler but we can't change the ArchName at
+ // that point.
+ llvm::StringRef WaMArch, WaMCPU;
+ for (const auto *A :
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ for (StringRef Value : A->getValues()) {
+ // There is no assembler equivalent of -mno-thumb, -marm, or -mno-arm.
+ if (Value == "-mthumb")
+ IsThumb = true;
+ else if (Value.startswith("-march="))
+ WaMArch = Value.substr(7);
+ else if (Value.startswith("-mcpu="))
+ WaMCPU = Value.substr(6);
+ }
+ }
+
+ if (WaMCPU.size() || WaMArch.size()) {
+ // The way this works means that we prefer -Wa,-mcpu's architecture
+ // over -Wa,-march. Which matches the compiler behaviour.
+ Suffix = tools::arm::getLLVMArchSuffixForARM(WaMCPU, WaMArch, Triple);
+ }
+ }
+
+ // Assembly files should start in ARM mode, unless arch is M-profile, or
+ // -mthumb has been passed explicitly to the assembler. Windows is always
+ // thumb.
+ if (IsThumb || IsMProfile || Triple.isOSWindows()) {
+ if (IsBigEndian)
+ ArchName = "thumbeb";
+ else
+ ArchName = "thumb";
+ }
+ Triple.setArchName(ArchName + Suffix.str());
+}
+
+void arm::setFloatABIInTriple(const Driver &D, const ArgList &Args,
+ llvm::Triple &Triple) {
+ bool isHardFloat =
+ (arm::getARMFloatABI(D, Triple, Args) == arm::FloatABI::Hard);
+
+ switch (Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABI:
+ case llvm::Triple::GNUEABIHF:
+ Triple.setEnvironment(isHardFloat ? llvm::Triple::GNUEABIHF
+ : llvm::Triple::GNUEABI);
+ break;
+ case llvm::Triple::EABI:
+ case llvm::Triple::EABIHF:
+ Triple.setEnvironment(isHardFloat ? llvm::Triple::EABIHF
+ : llvm::Triple::EABI);
+ break;
+ case llvm::Triple::MuslEABI:
+ case llvm::Triple::MuslEABIHF:
+ Triple.setEnvironment(isHardFloat ? llvm::Triple::MuslEABIHF
+ : llvm::Triple::MuslEABI);
+ break;
+ default: {
+ arm::FloatABI DefaultABI = arm::getDefaultFloatABI(Triple);
+ if (DefaultABI != arm::FloatABI::Invalid &&
+ isHardFloat != (DefaultABI == arm::FloatABI::Hard)) {
+ Arg *ABIArg =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ);
+ assert(ABIArg && "Non-default float abi expected to be from arg");
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ABIArg->getAsString(Args) << Triple.getTriple();
+ }
+ break;
+ }
+ }
+}
+
arm::FloatABI arm::getARMFloatABI(const ToolChain &TC, const ArgList &Args) {
return arm::getARMFloatABI(TC.getDriver(), TC.getEffectiveTriple(), Args);
}
@@ -290,8 +419,8 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
arm::FloatABI ABI = arm::getARMFloatABI(D, Triple, Args);
arm::ReadTPMode ThreadPointer = arm::getReadTPMode(D, Args);
- const Arg *WaCPU = nullptr, *WaFPU = nullptr;
- const Arg *WaHDiv = nullptr, *WaArch = nullptr;
+ llvm::Optional<std::pair<const Arg *, StringRef>> WaCPU, WaFPU, WaHDiv,
+ WaArch;
// This vector will accumulate features from the architecture
// extension suffixes on -mcpu and -march (e.g. the 'bar' in
@@ -325,15 +454,18 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
// to the assembler correctly.
for (const Arg *A :
Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
- StringRef Value = A->getValue();
- if (Value.startswith("-mfpu=")) {
- WaFPU = A;
- } else if (Value.startswith("-mcpu=")) {
- WaCPU = A;
- } else if (Value.startswith("-mhwdiv=")) {
- WaHDiv = A;
- } else if (Value.startswith("-march=")) {
- WaArch = A;
+ // We use getValues here because you can have many options per -Wa
+ // We will keep the last one we find for each of these
+ for (StringRef Value : A->getValues()) {
+ if (Value.startswith("-mfpu=")) {
+ WaFPU = std::make_pair(A, Value.substr(6));
+ } else if (Value.startswith("-mcpu=")) {
+ WaCPU = std::make_pair(A, Value.substr(6));
+ } else if (Value.startswith("-mhwdiv=")) {
+ WaHDiv = std::make_pair(A, Value.substr(8));
+ } else if (Value.startswith("-march=")) {
+ WaArch = std::make_pair(A, Value.substr(7));
+ }
}
}
}
@@ -353,8 +485,8 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (CPUArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< CPUArg->getAsString(Args);
- CPUName = StringRef(WaCPU->getValue()).substr(6);
- CPUArg = WaCPU;
+ CPUName = WaCPU->second;
+ CPUArg = WaCPU->first;
} else if (CPUArg)
CPUName = CPUArg->getValue();
@@ -363,11 +495,12 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (ArchArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< ArchArg->getAsString(Args);
- ArchName = StringRef(WaArch->getValue()).substr(7);
- checkARMArchName(D, WaArch, Args, ArchName, CPUName, ExtensionFeatures,
- Triple, ArchArgFPUID);
- // FIXME: Set Arch.
- D.Diag(clang::diag::warn_drv_unused_argument) << WaArch->getAsString(Args);
+ ArchName = WaArch->second;
+ // This will set any features after the base architecture.
+ checkARMArchName(D, WaArch->first, Args, ArchName, CPUName,
+ ExtensionFeatures, Triple, ArchArgFPUID);
+ // The base architecture was handled in ToolChain::ComputeLLVMTriple because
+ // triple is read only by this point.
} else if (ArchArg) {
ArchName = ArchArg->getValue();
checkARMArchName(D, ArchArg, Args, ArchName, CPUName, ExtensionFeatures,
@@ -399,8 +532,7 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (FPUArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< FPUArg->getAsString(Args);
- (void)getARMFPUFeatures(D, WaFPU, Args, StringRef(WaFPU->getValue()).substr(6),
- Features);
+ (void)getARMFPUFeatures(D, WaFPU->first, Args, WaFPU->second, Features);
} else if (FPUArg) {
FPUID = getARMFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
} else if (Triple.isAndroid() && getARMSubArchVersionNumber(Triple) >= 7) {
@@ -409,6 +541,14 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (!llvm::ARM::getFPUFeatures(FPUID, Features))
D.Diag(clang::diag::err_drv_clang_unsupported)
<< std::string("-mfpu=") + AndroidFPU;
+ } else {
+ if (!ForAS) {
+ std::string CPU = arm::getARMTargetCPU(CPUName, ArchName, Triple);
+ llvm::ARM::ArchKind ArchKind =
+ arm::getLLVMArchKindForARM(CPU, ArchName, Triple);
+ FPUID = llvm::ARM::getDefaultFPU(CPU, ArchKind);
+ (void)llvm::ARM::getFPUFeatures(FPUID, Features);
+ }
}
// Now we've finished accumulating features from arch, cpu and fpu,
@@ -423,8 +563,7 @@ void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
if (HDivArg)
D.Diag(clang::diag::warn_drv_unused_argument)
<< HDivArg->getAsString(Args);
- getARMHWDivFeatures(D, WaHDiv, Args,
- StringRef(WaHDiv->getValue()).substr(8), Features);
+ getARMHWDivFeatures(D, WaHDiv->first, Args, WaHDiv->second, Features);
} else if (HDivArg)
getARMHWDivFeatures(D, HDivArg, Args, HDivArg->getValue(), Features);
@@ -487,34 +626,73 @@ fp16_fml_fallthrough:
Features.push_back("-crc");
}
- // For Arch >= ARMv8.0 && A profile: crypto = sha2 + aes
+ // For Arch >= ARMv8.0 && A or R profile: crypto = sha2 + aes
+ // Rather than replace within the feature vector, determine whether each
+ // algorithm is enabled and append this to the end of the vector.
+ // The algorithms can be controlled by their specific feature or the crypto
+ // feature, so their status can be determined by the last occurance of
+ // either in the vector. This allows one to supercede the other.
+ // e.g. +crypto+noaes in -march/-mcpu should enable sha2, but not aes
// FIXME: this needs reimplementation after the TargetParser rewrite
- auto CryptoIt = llvm::find_if(llvm::reverse(Features), [](const StringRef F) {
- return F.contains("crypto");
- });
- if (CryptoIt != Features.rend()) {
- if (CryptoIt->take_front() == "+") {
- StringRef ArchSuffix = arm::getLLVMArchSuffixForARM(
- arm::getARMTargetCPU(CPUName, ArchName, Triple), ArchName, Triple);
- if (llvm::ARM::parseArchVersion(ArchSuffix) >= 8 &&
- llvm::ARM::parseArchProfile(ArchSuffix) ==
- llvm::ARM::ProfileKind::A) {
- if (ArchName.find_lower("+nosha2") == StringRef::npos &&
- CPUName.find_lower("+nosha2") == StringRef::npos)
- Features.push_back("+sha2");
- if (ArchName.find_lower("+noaes") == StringRef::npos &&
- CPUName.find_lower("+noaes") == StringRef::npos)
- Features.push_back("+aes");
- } else {
+ bool HasSHA2 = false;
+ bool HasAES = false;
+ const auto ItCrypto =
+ llvm::find_if(llvm::reverse(Features), [](const StringRef F) {
+ return F.contains("crypto");
+ });
+ const auto ItSHA2 =
+ llvm::find_if(llvm::reverse(Features), [](const StringRef F) {
+ return F.contains("crypto") || F.contains("sha2");
+ });
+ const auto ItAES =
+ llvm::find_if(llvm::reverse(Features), [](const StringRef F) {
+ return F.contains("crypto") || F.contains("aes");
+ });
+ const bool FoundSHA2 = ItSHA2 != Features.rend();
+ const bool FoundAES = ItAES != Features.rend();
+ if (FoundSHA2)
+ HasSHA2 = ItSHA2->take_front() == "+";
+ if (FoundAES)
+ HasAES = ItAES->take_front() == "+";
+ if (ItCrypto != Features.rend()) {
+ if (HasSHA2 && HasAES)
+ Features.push_back("+crypto");
+ else
+ Features.push_back("-crypto");
+ if (HasSHA2)
+ Features.push_back("+sha2");
+ else
+ Features.push_back("-sha2");
+ if (HasAES)
+ Features.push_back("+aes");
+ else
+ Features.push_back("-aes");
+ }
+
+ if (HasSHA2 || HasAES) {
+ StringRef ArchSuffix = arm::getLLVMArchSuffixForARM(
+ arm::getARMTargetCPU(CPUName, ArchName, Triple), ArchName, Triple);
+ llvm::ARM::ProfileKind ArchProfile =
+ llvm::ARM::parseArchProfile(ArchSuffix);
+ if (!((llvm::ARM::parseArchVersion(ArchSuffix) >= 8) &&
+ (ArchProfile == llvm::ARM::ProfileKind::A ||
+ ArchProfile == llvm::ARM::ProfileKind::R))) {
+ if (HasSHA2)
+ D.Diag(clang::diag::warn_target_unsupported_extension)
+ << "sha2"
+ << llvm::ARM::getArchName(llvm::ARM::parseArch(ArchSuffix));
+ if (HasAES)
D.Diag(clang::diag::warn_target_unsupported_extension)
- << "crypto"
+ << "aes"
<< llvm::ARM::getArchName(llvm::ARM::parseArch(ArchSuffix));
- // With -fno-integrated-as -mfpu=crypto-neon-fp-armv8 some assemblers such as the GNU assembler
- // will permit the use of crypto instructions as the fpu will override the architecture.
- // We keep the crypto feature in this case to preserve compatibility.
- // In all other cases we remove the crypto feature.
- if (!Args.hasArg(options::OPT_fno_integrated_as))
- Features.push_back("-crypto");
+ // With -fno-integrated-as -mfpu=crypto-neon-fp-armv8 some assemblers such
+ // as the GNU assembler will permit the use of crypto instructions as the
+ // fpu will override the architecture. We keep the crypto feature in this
+ // case to preserve compatibility. In all other cases we remove the crypto
+ // feature.
+ if (!Args.hasArg(options::OPT_fno_integrated_as)) {
+ Features.push_back("-sha2");
+ Features.push_back("-aes");
}
}
}
@@ -618,11 +796,17 @@ fp16_fml_fallthrough:
StringRef Scope = A->getValue();
bool EnableRetBr = false;
bool EnableBlr = false;
- if (Scope != "none" && Scope != "all") {
+ bool DisableComdat = false;
+ if (Scope != "none") {
SmallVector<StringRef, 4> Opts;
Scope.split(Opts, ",");
for (auto Opt : Opts) {
Opt = Opt.trim();
+ if (Opt == "all") {
+ EnableBlr = true;
+ EnableRetBr = true;
+ continue;
+ }
if (Opt == "retbr") {
EnableRetBr = true;
continue;
@@ -631,13 +815,18 @@ fp16_fml_fallthrough:
EnableBlr = true;
continue;
}
+ if (Opt == "comdat") {
+ DisableComdat = false;
+ continue;
+ }
+ if (Opt == "nocomdat") {
+ DisableComdat = true;
+ continue;
+ }
D.Diag(diag::err_invalid_sls_hardening)
<< Scope << A->getAsString(Args);
break;
}
- } else if (Scope == "all") {
- EnableRetBr = true;
- EnableBlr = true;
}
if (EnableRetBr || EnableBlr)
@@ -649,6 +838,9 @@ fp16_fml_fallthrough:
Features.push_back("+harden-sls-retbr");
if (EnableBlr)
Features.push_back("+harden-sls-blr");
+ if (DisableComdat) {
+ Features.push_back("+harden-sls-nocomdat");
+ }
}
}
diff --git a/clang/lib/Driver/ToolChains/Arch/ARM.h b/clang/lib/Driver/ToolChains/Arch/ARM.h
index 02d91cdaee13..8e7c10ecd5d6 100644
--- a/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -51,7 +51,11 @@ FloatABI getDefaultFloatABI(const llvm::Triple &Triple);
FloatABI getARMFloatABI(const ToolChain &TC, const llvm::opt::ArgList &Args);
FloatABI getARMFloatABI(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
+void setFloatABIInTriple(const Driver &D, const llvm::opt::ArgList &Args,
+ llvm::Triple &triple);
ReadTPMode getReadTPMode(const Driver &D, const llvm::opt::ArgList &Args);
+void setArchNameInTriple(const Driver &D, const llvm::opt::ArgList &Args,
+ types::ID InputType, llvm::Triple &Triple);
bool useAAPCSForMachO(const llvm::Triple &T);
void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/Arch/M68k.cpp b/clang/lib/Driver/ToolChains/Arch/M68k.cpp
new file mode 100644
index 000000000000..119e24cedbab
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Arch/M68k.cpp
@@ -0,0 +1,125 @@
+//===--- M68k.cpp - M68k Helpers for Tools -------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "M68k.h"
+#include "ToolChains/CommonArgs.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Regex.h"
+#include <sstream>
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+/// getM68kTargetCPU - Get the (LLVM) name of the 68000 cpu we are targeting.
+std::string m68k::getM68kTargetCPU(const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ // The canonical CPU name is captalize. However, we allow
+ // starting with lower case or numbers only
+ StringRef CPUName = A->getValue();
+
+ if (CPUName == "native") {
+ std::string CPU = std::string(llvm::sys::getHostCPUName());
+ if (!CPU.empty() && CPU != "generic")
+ return CPU;
+ }
+
+ if (CPUName == "common")
+ return "generic";
+
+ return llvm::StringSwitch<std::string>(CPUName)
+ .Cases("m68000", "68000", "M68000")
+ .Cases("m68010", "68010", "M68010")
+ .Cases("m68020", "68020", "M68020")
+ .Cases("m68030", "68030", "M68030")
+ .Cases("m68040", "68040", "M68040")
+ .Cases("m68060", "68060", "M68060")
+ .Default(CPUName.str());
+ }
+ // FIXME: Throw error when multiple sub-architecture flag exist
+ if (Args.hasArg(clang::driver::options::OPT_m68000))
+ return "M68000";
+ if (Args.hasArg(clang::driver::options::OPT_m68010))
+ return "M68010";
+ if (Args.hasArg(clang::driver::options::OPT_m68020))
+ return "M68020";
+ if (Args.hasArg(clang::driver::options::OPT_m68030))
+ return "M68030";
+ if (Args.hasArg(clang::driver::options::OPT_m68040))
+ return "M68040";
+ if (Args.hasArg(clang::driver::options::OPT_m68060))
+ return "M68060";
+
+ return "";
+}
+
+void m68k::getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args,
+ std::vector<StringRef> &Features) {
+
+ m68k::FloatABI FloatABI = m68k::getM68kFloatABI(D, Args);
+ if (FloatABI == m68k::FloatABI::Soft)
+ Features.push_back("-hard-float");
+
+ // Handle '-ffixed-<register>' flags
+ if (Args.hasArg(options::OPT_ffixed_a0))
+ Features.push_back("+reserve-a0");
+ if (Args.hasArg(options::OPT_ffixed_a1))
+ Features.push_back("+reserve-a1");
+ if (Args.hasArg(options::OPT_ffixed_a2))
+ Features.push_back("+reserve-a2");
+ if (Args.hasArg(options::OPT_ffixed_a3))
+ Features.push_back("+reserve-a3");
+ if (Args.hasArg(options::OPT_ffixed_a4))
+ Features.push_back("+reserve-a4");
+ if (Args.hasArg(options::OPT_ffixed_a5))
+ Features.push_back("+reserve-a5");
+ if (Args.hasArg(options::OPT_ffixed_a6))
+ Features.push_back("+reserve-a6");
+ if (Args.hasArg(options::OPT_ffixed_d0))
+ Features.push_back("+reserve-d0");
+ if (Args.hasArg(options::OPT_ffixed_d1))
+ Features.push_back("+reserve-d1");
+ if (Args.hasArg(options::OPT_ffixed_d2))
+ Features.push_back("+reserve-d2");
+ if (Args.hasArg(options::OPT_ffixed_d3))
+ Features.push_back("+reserve-d3");
+ if (Args.hasArg(options::OPT_ffixed_d4))
+ Features.push_back("+reserve-d4");
+ if (Args.hasArg(options::OPT_ffixed_d5))
+ Features.push_back("+reserve-d5");
+ if (Args.hasArg(options::OPT_ffixed_d6))
+ Features.push_back("+reserve-d6");
+ if (Args.hasArg(options::OPT_ffixed_d7))
+ Features.push_back("+reserve-d7");
+}
+
+m68k::FloatABI m68k::getM68kFloatABI(const Driver &D, const ArgList &Args) {
+ m68k::FloatABI ABI = m68k::FloatABI::Invalid;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float)) {
+
+ if (A->getOption().matches(options::OPT_msoft_float))
+ ABI = m68k::FloatABI::Soft;
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ ABI = m68k::FloatABI::Hard;
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (ABI == m68k::FloatABI::Invalid)
+ ABI = m68k::FloatABI::Hard;
+
+ return ABI;
+}
diff --git a/clang/lib/Driver/ToolChains/Arch/M68k.h b/clang/lib/Driver/ToolChains/Arch/M68k.h
new file mode 100644
index 000000000000..41d53efb940b
--- /dev/null
+++ b/clang/lib/Driver/ToolChains/Arch/M68k.h
@@ -0,0 +1,42 @@
+//===--- M68k.h - M68k-specific Tool Helpers -----------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_M680X0_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_M680X0_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace m68k {
+
+enum class FloatABI {
+ Invalid,
+ Soft,
+ Hard,
+};
+
+FloatABI getM68kFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+
+std::string getM68kTargetCPU(const llvm::opt::ArgList &Args);
+
+void getM68kTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<llvm::StringRef> &Features);
+
+} // end namespace m68k
+} // end namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_M680X0_H
diff --git a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index ffae47e5672e..ade93d6881a7 100644
--- a/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -63,7 +63,7 @@ isExperimentalExtension(StringRef Ext) {
Ext == "zbr" || Ext == "zbs" || Ext == "zbt" || Ext == "zbproposedc")
return RISCVExtensionVersion{"0", "93"};
if (Ext == "v" || Ext == "zvamo" || Ext == "zvlsseg")
- return RISCVExtensionVersion{"1", "0"};
+ return RISCVExtensionVersion{"0", "10"};
if (Ext == "zfh")
return RISCVExtensionVersion{"0", "1"};
return None;
@@ -258,10 +258,13 @@ static void getExtensionFeatures(const Driver &D,
<< MArch << Error << Ext;
return;
}
- if (Ext == "zvamo" || Ext == "zvlsseg") {
+ if (Ext == "zvlsseg") {
+ Features.push_back("+experimental-v");
+ Features.push_back("+experimental-zvlsseg");
+ } else if (Ext == "zvamo") {
Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvamo");
Features.push_back("+experimental-zvlsseg");
+ Features.push_back("+experimental-zvamo");
} else if (isExperimentalExtension(Ext))
Features.push_back(Args.MakeArgString("+experimental-" + Ext));
else
@@ -429,7 +432,6 @@ static bool getArchFeatures(const Driver &D, StringRef MArch,
break;
case 'v':
Features.push_back("+experimental-v");
- Features.push_back("+experimental-zvamo");
Features.push_back("+experimental-zvlsseg");
break;
}
@@ -610,17 +612,19 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
// rv64* -> lp64
StringRef MArch = getRISCVArch(Args, Triple);
- if (MArch.startswith_lower("rv32")) {
+ if (MArch.startswith_insensitive("rv32")) {
// FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_lower("d") || MArch.startswith_lower("rv32g"))
+ if (MArch.substr(4).contains_insensitive("d") ||
+ MArch.startswith_insensitive("rv32g"))
return "ilp32d";
- else if (MArch.startswith_lower("rv32e"))
+ else if (MArch.startswith_insensitive("rv32e"))
return "ilp32e";
else
return "ilp32";
- } else if (MArch.startswith_lower("rv64")) {
+ } else if (MArch.startswith_insensitive("rv64")) {
// FIXME: parse `March` to find `D` extension properly
- if (MArch.substr(4).contains_lower("d") || MArch.startswith_lower("rv64g"))
+ if (MArch.substr(4).contains_insensitive("d") ||
+ MArch.startswith_insensitive("rv64g"))
return "lp64d";
else
return "lp64";
@@ -696,11 +700,11 @@ StringRef riscv::getRISCVArch(const llvm::opt::ArgList &Args,
if (const Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
StringRef MABI = A->getValue();
- if (MABI.equals_lower("ilp32e"))
+ if (MABI.equals_insensitive("ilp32e"))
return "rv32e";
- else if (MABI.startswith_lower("ilp32"))
+ else if (MABI.startswith_insensitive("ilp32"))
return "rv32imafdc";
- else if (MABI.startswith_lower("lp64"))
+ else if (MABI.startswith_insensitive("lp64"))
return "rv64imafdc";
}
diff --git a/clang/lib/Driver/ToolChains/Arch/X86.cpp b/clang/lib/Driver/ToolChains/Arch/X86.cpp
index 94a53f9d9e46..12749c7ec871 100644
--- a/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -213,5 +213,24 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
// Now add any that the user explicitly requested on the command line,
// which may override the defaults.
- handleTargetFeaturesGroup(Args, Features, options::OPT_m_x86_Features_Group);
+ for (const Arg *A : Args.filtered(options::OPT_m_x86_Features_Group,
+ options::OPT_mgeneral_regs_only)) {
+ StringRef Name = A->getOption().getName();
+ A->claim();
+
+ // Skip over "-m".
+ assert(Name.startswith("m") && "Invalid feature name.");
+ Name = Name.substr(1);
+
+ // Replace -mgeneral-regs-only with -x87, -mmx, -sse
+ if (A->getOption().getID() == options::OPT_mgeneral_regs_only) {
+ Features.insert(Features.end(), {"-x87", "-mmx", "-sse"});
+ continue;
+ }
+
+ bool IsNegative = Name.startswith("no-");
+ if (IsNegative)
+ Name = Name.substr(3);
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
}
diff --git a/clang/lib/Driver/ToolChains/BareMetal.cpp b/clang/lib/Driver/ToolChains/BareMetal.cpp
index 7619dd30da5a..ce73e39d1456 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.cpp
+++ b/clang/lib/Driver/ToolChains/BareMetal.cpp
@@ -9,8 +9,8 @@
#include "BareMetal.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "Gnu.h"
+#include "clang/Driver/InputInfo.h"
#include "Arch/RISCV.h"
#include "clang/Driver/Compilation.h"
@@ -160,8 +160,9 @@ Tool *BareMetal::buildLinker() const {
std::string BareMetal::getCompilerRTPath() const { return getRuntimesDir(); }
-std::string BareMetal::getCompilerRTBasename(const llvm::opt::ArgList &,
- StringRef, FileType, bool) const {
+std::string BareMetal::buildCompilerRTBasename(const llvm::opt::ArgList &,
+ StringRef, FileType,
+ bool) const {
return ("libclang_rt.builtins-" + getTriple().getArchName() + ".a").str();
}
@@ -298,13 +299,14 @@ void baremetal::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Bstatic");
- CmdArgs.push_back(Args.MakeArgString("-L" + TC.getRuntimesDir()));
-
- TC.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs, {options::OPT_L, options::OPT_T_Group,
options::OPT_e, options::OPT_s, options::OPT_t,
options::OPT_Z_Flag, options::OPT_r});
+ TC.AddFilePathLibArgs(Args, CmdArgs);
+
+ CmdArgs.push_back(Args.MakeArgString("-L" + TC.getRuntimesDir()));
+
if (TC.ShouldLinkCXXStdlib(Args))
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
diff --git a/clang/lib/Driver/ToolChains/BareMetal.h b/clang/lib/Driver/ToolChains/BareMetal.h
index a6d4922a380f..d68c43c64c97 100644
--- a/clang/lib/Driver/ToolChains/BareMetal.h
+++ b/clang/lib/Driver/ToolChains/BareMetal.h
@@ -33,6 +33,11 @@ public:
protected:
Tool *buildLinker() const override;
+ std::string buildCompilerRTBasename(const llvm::opt::ArgList &Args,
+ StringRef Component,
+ FileType Type = ToolChain::FT_Static,
+ bool AddArch = true) const override;
+
public:
bool useIntegratedAs() const override { return true; }
bool isCrossCompiling() const override { return true; }
@@ -44,10 +49,6 @@ public:
StringRef getOSLibName() const override { return "baremetal"; }
std::string getCompilerRTPath() const override;
- std::string getCompilerRTBasename(const llvm::opt::ArgList &Args,
- StringRef Component,
- FileType Type = ToolChain::FT_Static,
- bool AddArch = true) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override {
return ToolChain::RLT_CompilerRT;
diff --git a/clang/lib/Driver/ToolChains/Clang.cpp b/clang/lib/Driver/ToolChains/Clang.cpp
index fdb8a58cd1b3..a4b53a640ab5 100644
--- a/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/clang/lib/Driver/ToolChains/Clang.cpp
@@ -10,6 +10,7 @@
#include "AMDGPU.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
+#include "Arch/M68k.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
@@ -19,7 +20,6 @@
#include "Arch/X86.h"
#include "CommonArgs.h"
#include "Hexagon.h"
-#include "InputInfo.h"
#include "MSP430.h"
#include "PS4CPU.h"
#include "clang/Basic/CharInfo.h"
@@ -29,6 +29,7 @@
#include "clang/Basic/Version.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/XRayArgs.h"
@@ -51,8 +52,9 @@ using namespace clang;
using namespace llvm::opt;
static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
- if (Arg *A =
- Args.getLastArg(clang::driver::options::OPT_C, options::OPT_CC)) {
+ if (Arg *A = Args.getLastArg(clang::driver::options::OPT_C, options::OPT_CC,
+ options::OPT_fminimize_whitespace,
+ options::OPT_fno_minimize_whitespace)) {
if (!Args.hasArg(options::OPT_E) && !Args.hasArg(options::OPT__SLASH_P) &&
!Args.hasArg(options::OPT__SLASH_EP) && !D.CCCIsCPP()) {
D.Diag(clang::diag::err_drv_argument_only_allowed_with)
@@ -343,7 +345,7 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Triple, Args, Features);
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, ForAS);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -365,11 +367,15 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::amdgcn:
amdgpu::getAMDGPUTargetFeatures(D, Triple, Args, Features);
break;
+ case llvm::Triple::m68k:
+ m68k::getM68kTargetFeatures(D, Triple, Args, Features);
+ break;
case llvm::Triple::msp430:
msp430::getMSP430TargetFeatures(D, Args, Features);
break;
case llvm::Triple::ve:
ve::getVETargetFeatures(D, Args, Features);
+ break;
}
for (auto Feature : unifyTargetFeatures(Features)) {
@@ -399,7 +405,7 @@ shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
/// master flag, -fexceptions and also language specific flags to enable/disable
/// C++ and Objective-C exceptions. This makes it possible to for example
/// disable C++ exceptions but enable Objective-C exceptions.
-static void addExceptionArgs(const ArgList &Args, types::ID InputType,
+static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
const ToolChain &TC, bool KernelOrKext,
const ObjCRuntime &objcRuntime,
ArgStringList &CmdArgs) {
@@ -414,13 +420,22 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
Args.ClaimAllArgs(options::OPT_fno_objc_exceptions);
Args.ClaimAllArgs(options::OPT_fcxx_exceptions);
Args.ClaimAllArgs(options::OPT_fno_cxx_exceptions);
- return;
+ Args.ClaimAllArgs(options::OPT_fasync_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_async_exceptions);
+ return false;
}
// See if the user explicitly enabled exceptions.
bool EH = Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions,
false);
+ bool EHa = Args.hasFlag(options::OPT_fasync_exceptions,
+ options::OPT_fno_async_exceptions, false);
+ if (EHa) {
+ CmdArgs.push_back("-fasync-exceptions");
+ EH = true;
+ }
+
// Obj-C exceptions are enabled by default, regardless of -fexceptions. This
// is not necessarily sensible, but follows GCC.
if (types::isObjC(InputType) &&
@@ -457,6 +472,7 @@ static void addExceptionArgs(const ArgList &Args, types::ID InputType,
if (EH)
CmdArgs.push_back("-fexceptions");
+ return EH;
}
static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
@@ -475,14 +491,6 @@ static bool ShouldEnableAutolink(const ArgList &Args, const ToolChain &TC,
Default);
}
-static bool ShouldDisableDwarfDirectory(const ArgList &Args,
- const ToolChain &TC) {
- bool UseDwarfDirectory =
- Args.hasFlag(options::OPT_fdwarf_directory_asm,
- options::OPT_fno_dwarf_directory_asm, TC.useIntegratedAs());
- return !UseDwarfDirectory;
-}
-
// Convert an arg of the form "-gN" or "-ggdbN" or one of their aliases
// to the corresponding DebugInfoKind.
static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
@@ -496,7 +504,7 @@ static codegenoptions::DebugInfoKind DebugLevelToInfoKind(const Arg &A) {
return codegenoptions::DebugLineTablesOnly;
if (A.getOption().matches(options::OPT_gline_directives_only))
return codegenoptions::DebugDirectivesOnly;
- return codegenoptions::LimitedDebugInfo;
+ return codegenoptions::DebugInfoConstructor;
}
static bool mustUseNonLeafFramePointerForTarget(const llvm::Triple &Triple) {
@@ -617,13 +625,16 @@ getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
/// Add a CC1 option to specify the debug compilation directory.
static void addDebugCompDirArg(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::vfs::FileSystem &VFS) {
- if (Arg *A = Args.getLastArg(options::OPT_fdebug_compilation_dir)) {
- CmdArgs.push_back("-fdebug-compilation-dir");
- CmdArgs.push_back(A->getValue());
+ if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
+ options::OPT_fdebug_compilation_dir_EQ)) {
+ if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
+ CmdArgs.push_back(Args.MakeArgString(Twine("-fdebug-compilation-dir=") +
+ A->getValue()));
+ else
+ A->render(Args, CmdArgs);
} else if (llvm::ErrorOr<std::string> CWD =
VFS.getCurrentWorkingDirectory()) {
- CmdArgs.push_back("-fdebug-compilation-dir");
- CmdArgs.push_back(Args.MakeArgString(*CWD));
+ CmdArgs.push_back(Args.MakeArgString("-fdebug-compilation-dir=" + *CWD));
}
}
@@ -657,16 +668,16 @@ static void addMacroPrefixMapArg(const Driver &D, const ArgList &Args,
}
/// Add a CC1 and CC1AS option to specify the coverage file path prefix map.
-static void addProfilePrefixMapArg(const Driver &D, const ArgList &Args,
+static void addCoveragePrefixMapArg(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs) {
for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
- options::OPT_fprofile_prefix_map_EQ)) {
+ options::OPT_fcoverage_prefix_map_EQ)) {
StringRef Map = A->getValue();
if (Map.find('=') == StringRef::npos)
D.Diag(diag::err_drv_invalid_argument_to_option)
<< Map << A->getOption().getName();
else
- CmdArgs.push_back(Args.MakeArgString("-fprofile-prefix-map=" + Map));
+ CmdArgs.push_back(Args.MakeArgString("-fcoverage-prefix-map=" + Map));
A->claim();
}
}
@@ -783,6 +794,20 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
PGOGenerateArg = nullptr;
}
+ if (TC.getTriple().isOSAIX()) {
+ if (PGOGenerateArg)
+ if (!D.isUsingLTO(false /*IsDeviceOffloadAction */) ||
+ D.getLTOMode() != LTOK_Full)
+ D.Diag(clang::diag::err_drv_argument_only_allowed_with)
+ << PGOGenerateArg->getSpelling() << "-flto";
+ if (ProfileGenerateArg)
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ProfileGenerateArg->getSpelling() << TC.getTriple().str();
+ if (Arg *ProfileSampleUseArg = getLastProfileSampleUseArg(Args))
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << ProfileSampleUseArg->getSpelling() << TC.getTriple().str();
+ }
+
if (ProfileGenerateArg) {
if (ProfileGenerateArg->getOption().matches(
options::OPT_fprofile_instr_generate_EQ))
@@ -860,6 +885,18 @@ static void addPGOAndCoverageFlags(const ToolChain &TC, Compilation &C,
CmdArgs.push_back("-fcoverage-mapping");
}
+ if (Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
+ options::OPT_fcoverage_compilation_dir_EQ)) {
+ if (A->getOption().matches(options::OPT_ffile_compilation_dir_EQ))
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-fcoverage-compilation-dir=") + A->getValue()));
+ else
+ A->render(Args, CmdArgs);
+ } else if (llvm::ErrorOr<std::string> CWD =
+ D.getVFS().getCurrentWorkingDirectory()) {
+ CmdArgs.push_back(Args.MakeArgString("-fcoverage-compilation-dir=" + *CWD));
+ }
+
if (Args.hasArg(options::OPT_fprofile_exclude_files_EQ)) {
auto *Arg = Args.getLastArg(options::OPT_fprofile_exclude_files_EQ);
if (!Args.hasArg(options::OPT_coverage))
@@ -983,6 +1020,14 @@ static unsigned DwarfVersionNum(StringRef ArgValue) {
.Default(0);
}
+// Find a DWARF format version option.
+// This function is a complementary for DwarfVersionNum().
+static const Arg *getDwarfNArg(const ArgList &Args) {
+ return Args.getLastArg(options::OPT_gdwarf_2, options::OPT_gdwarf_3,
+ options::OPT_gdwarf_4, options::OPT_gdwarf_5,
+ options::OPT_gdwarf);
+}
+
static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind DebugInfoKind,
unsigned DwarfVersion,
@@ -1022,6 +1067,9 @@ static void RenderDebugEnablingArgs(const ArgList &Args, ArgStringList &CmdArgs,
case llvm::DebuggerKind::SCE:
CmdArgs.push_back("-debugger-tuning=sce");
break;
+ case llvm::DebuggerKind::DBX:
+ CmdArgs.push_back("-debugger-tuning=dbx");
+ break;
default:
break;
}
@@ -1082,11 +1130,18 @@ static const char *RelocationModelName(llvm::Reloc::Model Model) {
static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs) {
- unsigned CodeObjVer = getOrCheckAMDGPUCodeObjectVersion(D, Args);
- CmdArgs.insert(CmdArgs.begin() + 1,
- Args.MakeArgString(Twine("--amdhsa-code-object-version=") +
- Twine(CodeObjVer)));
- CmdArgs.insert(CmdArgs.begin() + 1, "-mllvm");
+ // If no version was requested by the user, use the default value from the
+ // back end. This is consistent with the value returned from
+ // getAMDGPUCodeObjectVersion. This lets clang emit IR for amdgpu without
+ // requiring the corresponding llvm to have the AMDGPU target enabled,
+ // provided the user (e.g. front end tests) can use the default.
+ if (haveAMDGPUCodeObjectVersionArgument(D, Args)) {
+ unsigned CodeObjVer = getAMDGPUCodeObjectVersion(D, Args);
+ CmdArgs.insert(CmdArgs.begin() + 1,
+ Args.MakeArgString(Twine("--amdhsa-code-object-version=") +
+ Twine(CodeObjVer)));
+ CmdArgs.insert(CmdArgs.begin() + 1, "-mllvm");
+ }
}
void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
@@ -1375,7 +1430,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
}
addMacroPrefixMapArg(D, Args, CmdArgs);
- addProfilePrefixMapArg(D, Args, CmdArgs);
+ addCoveragePrefixMapArg(D, Args, CmdArgs);
}
// FIXME: Move to target hook.
@@ -1522,6 +1577,15 @@ static void renderRemarksOptions(const ArgList &Args, ArgStringList &CmdArgs,
}
}
+void AddAAPCSVolatileBitfieldArgs(const ArgList &Args, ArgStringList &CmdArgs) {
+ if (!Args.hasFlag(options::OPT_faapcs_bitfield_width,
+ options::OPT_fno_aapcs_bitfield_width, true))
+ CmdArgs.push_back("-fno-aapcs-bitfield-width");
+
+ if (Args.getLastArg(options::OPT_ForceAAPCSBitfieldLoad))
+ CmdArgs.push_back("-faapcs-bitfield-load");
+}
+
namespace {
void RenderARMABI(const llvm::Triple &Triple, const ArgList &Args,
ArgStringList &CmdArgs) {
@@ -1580,6 +1644,8 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
if (Args.getLastArg(options::OPT_mcmse))
CmdArgs.push_back("-mcmse");
+
+ AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@@ -1768,6 +1834,8 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Val;
}
+
+ AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@@ -1988,7 +2056,7 @@ static void SetRISCVSmallDataLimit(const ToolChain &TC, const ArgList &Args,
D.Diag(diag::warn_drv_unsupported_sdata);
}
} else if (Args.getLastArgValue(options::OPT_mcmodel_EQ)
- .equals_lower("large") &&
+ .equals_insensitive("large") &&
(Triple.getArch() == llvm::Triple::riscv64)) {
// Not support linker relaxation for RV64 with large code model.
SmallDataLimit = "0";
@@ -2219,8 +2287,8 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
if (!CompilationDatabase) {
std::error_code EC;
- auto File = std::make_unique<llvm::raw_fd_ostream>(Filename, EC,
- llvm::sys::fs::OF_Text);
+ auto File = std::make_unique<llvm::raw_fd_ostream>(
+ Filename, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
D.Diag(clang::diag::err_drv_compilationdatabase) << Filename
<< EC.message();
@@ -2294,7 +2362,8 @@ void Clang::DumpCompilationDatabaseFragmentToDir(
Twine(llvm::sys::path::filename(Input.getFilename())) + ".%%%%.json");
int FD;
SmallString<256> TempPath;
- Err = llvm::sys::fs::createUniqueFile(Path, FD, TempPath);
+ Err = llvm::sys::fs::createUniqueFile(Path, FD, TempPath,
+ llvm::sys::fs::OF_Text);
if (Err) {
Driver.Diag(diag::err_drv_compilationdatabase) << Path << Err.message();
return;
@@ -2304,6 +2373,17 @@ void Clang::DumpCompilationDatabaseFragmentToDir(
DumpCompilationDatabase(C, "", Target, Output, Input, Args);
}
+static bool CheckARMImplicitITArg(StringRef Value) {
+ return Value == "always" || Value == "never" || Value == "arm" ||
+ Value == "thumb";
+}
+
+static void AddARMImplicitITArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ StringRef Value) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-arm-implicit-it=" + Value));
+}
+
static void CollectArgsForIntegratedAssembler(Compilation &C,
const ArgList &Args,
ArgStringList &CmdArgs,
@@ -2320,27 +2400,6 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
DefaultIncrementalLinkerCompatible))
CmdArgs.push_back("-mincremental-linker-compatible");
- switch (C.getDefaultToolChain().getArch()) {
- case llvm::Triple::arm:
- case llvm::Triple::armeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
- if (Arg *A = Args.getLastArg(options::OPT_mimplicit_it_EQ)) {
- StringRef Value = A->getValue();
- if (Value == "always" || Value == "never" || Value == "arm" ||
- Value == "thumb") {
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-arm-implicit-it=" + Value));
- } else {
- D.Diag(diag::err_drv_unsupported_option_argument)
- << A->getOption().getName() << Value;
- }
- }
- break;
- default:
- break;
- }
-
// If you add more args here, also add them to the block below that
// starts with "// If CollectArgsForIntegratedAssembler() isn't called below".
@@ -2354,10 +2413,29 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
bool UseRelaxRelocations = C.getDefaultToolChain().useRelaxRelocations();
bool UseNoExecStack = C.getDefaultToolChain().isNoExecStackDefault();
const char *MipsTargetFeature = nullptr;
+ StringRef ImplicitIt;
for (const Arg *A :
- Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler)) {
+ Args.filtered(options::OPT_Wa_COMMA, options::OPT_Xassembler,
+ options::OPT_mimplicit_it_EQ)) {
A->claim();
+ if (A->getOption().getID() == options::OPT_mimplicit_it_EQ) {
+ switch (C.getDefaultToolChain().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::armeb:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ // Only store the value; the last value set takes effect.
+ ImplicitIt = A->getValue();
+ if (!CheckARMImplicitITArg(ImplicitIt))
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << ImplicitIt;
+ continue;
+ default:
+ break;
+ }
+ }
+
for (StringRef Value : A->getValues()) {
if (TakeNextArg) {
CmdArgs.push_back(Value.data());
@@ -2376,6 +2454,12 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
case llvm::Triple::thumbeb:
case llvm::Triple::arm:
case llvm::Triple::armeb:
+ if (Value.startswith("-mimplicit-it=")) {
+ // Only store the value; the last value set takes effect.
+ ImplicitIt = Value.split("=").second;
+ if (CheckARMImplicitITArg(ImplicitIt))
+ continue;
+ }
if (Value == "-mthumb")
// -mthumb has already been processed in ComputeLLVMTriple()
// recognize but skip over here.
@@ -2461,7 +2545,7 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
CmdArgs.push_back(Value.data());
} else {
RenderDebugEnablingArgs(Args, CmdArgs,
- codegenoptions::LimitedDebugInfo,
+ codegenoptions::DebugInfoConstructor,
DwarfVersion, llvm::DebuggerKind::Default);
}
} else if (Value.startswith("-mcpu") || Value.startswith("-mfpu") ||
@@ -2497,12 +2581,16 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
// -fdebug-compilation-dir (without '=') here.
CmdArgs.push_back("-fdebug-compilation-dir");
CmdArgs.push_back(Value.data());
+ } else if (Value == "--version") {
+ D.PrintVersion(C, llvm::outs());
} else {
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Value;
}
}
}
+ if (ImplicitIt.size())
+ AddARMImplicitITArgs(Args, CmdArgs, ImplicitIt);
if (UseRelaxRelocations)
CmdArgs.push_back("--mrelax-relocations");
if (UseNoExecStack)
@@ -2550,7 +2638,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
llvm::DenormalMode DenormalFPMath = DefaultDenormalFPMath;
llvm::DenormalMode DenormalFP32Math = DefaultDenormalFP32Math;
- StringRef FPContract = "";
+ StringRef FPContract = "on";
bool StrictFPModel = false;
@@ -2575,7 +2663,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
ReciprocalMath = false;
SignedZeros = true;
// -fno_fast_math restores default denormal and fpcontract handling
- FPContract = "";
+ FPContract = "on";
DenormalFPMath = llvm::DenormalMode::getIEEE();
// FIXME: The target may have picked a non-IEEE default mode here based on
@@ -2595,20 +2683,18 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// ffp-model= is a Driver option, it is entirely rewritten into more
// granular options before being passed into cc1.
// Use the gcc option in the switch below.
- if (!FPModel.empty() && !FPModel.equals(Val)) {
+ if (!FPModel.empty() && !FPModel.equals(Val))
D.Diag(clang::diag::warn_drv_overriding_flag_option)
<< Args.MakeArgString("-ffp-model=" + FPModel)
<< Args.MakeArgString("-ffp-model=" + Val);
- FPContract = "";
- }
if (Val.equals("fast")) {
optID = options::OPT_ffast_math;
FPModel = Val;
- FPContract = "fast";
+ FPContract = Val;
} else if (Val.equals("precise")) {
optID = options::OPT_ffp_contract;
FPModel = Val;
- FPContract = "fast";
+ FPContract = "on";
PreciseFPModel = true;
} else if (Val.equals("strict")) {
StrictFPModel = true;
@@ -2694,9 +2780,11 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_ffp_contract: {
StringRef Val = A->getValue();
if (PreciseFPModel) {
- // -ffp-model=precise enables ffp-contract=fast as a side effect
- // the FPContract value has already been set to a string literal
- // and the Val string isn't a pertinent value.
+ // When -ffp-model=precise is seen on the command line,
+ // the boolean PreciseFPModel is set to true which indicates
+ // "the current option is actually PreciseFPModel". The optID
+ // is changed to OPT_ffp_contract and FPContract is set to "on".
+ // the argument Val string is "precise": it shouldn't be checked.
;
} else if (Val.equals("fast") || Val.equals("on") || Val.equals("off"))
FPContract = Val;
@@ -2794,18 +2882,17 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
// -fno_fast_math restores default denormal and fpcontract handling
DenormalFPMath = DefaultDenormalFPMath;
DenormalFP32Math = llvm::DenormalMode::getIEEE();
- FPContract = "";
+ FPContract = "on";
break;
}
if (StrictFPModel) {
// If -ffp-model=strict has been specified on command line but
// subsequent options conflict then emit warning diagnostic.
- if (HonorINFs && HonorNaNs &&
- !AssociativeMath && !ReciprocalMath &&
- SignedZeros && TrappingMath && RoundingFPMath &&
- (FPContract.equals("off") || FPContract.empty()) &&
- DenormalFPMath == llvm::DenormalMode::getIEEE() &&
- DenormalFP32Math == llvm::DenormalMode::getIEEE())
+ if (HonorINFs && HonorNaNs && !AssociativeMath && !ReciprocalMath &&
+ SignedZeros && TrappingMath && RoundingFPMath &&
+ DenormalFPMath == llvm::DenormalMode::getIEEE() &&
+ DenormalFP32Math == llvm::DenormalMode::getIEEE() &&
+ FPContract.equals("off"))
// OK: Current Arg doesn't conflict with -ffp-model=strict
;
else {
@@ -3038,18 +3125,20 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
}
}
- // First support "tls" and "global" for X86 target.
- // TODO: Support "sysreg" for AArch64.
const std::string &TripleStr = EffectiveTriple.getTriple();
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_EQ)) {
StringRef Value = A->getValue();
if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
- if (Value != "tls" && Value != "global") {
+ if (EffectiveTriple.isX86() && Value != "tls" && Value != "global") {
+ D.Diag(diag::err_drv_invalid_value_with_suggestion)
+ << A->getOption().getName() << Value << "tls global";
+ return;
+ }
+ if (EffectiveTriple.isAArch64() && Value != "sysreg" && Value != "global") {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
- << A->getOption().getName() << Value
- << "valid arguments to '-mstack-protector-guard=' are:tls global";
+ << A->getOption().getName() << Value << "sysreg global";
return;
}
A->render(Args, CmdArgs);
@@ -3057,10 +3146,10 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_offset_EQ)) {
StringRef Value = A->getValue();
- if (!EffectiveTriple.isX86())
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
- unsigned Offset;
+ int Offset;
if (Value.getAsInteger(10, Offset)) {
D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Value;
return;
@@ -3070,13 +3159,16 @@ static void RenderSSPOptions(const Driver &D, const ToolChain &TC,
if (Arg *A = Args.getLastArg(options::OPT_mstack_protector_guard_reg_EQ)) {
StringRef Value = A->getValue();
- if (!EffectiveTriple.isX86())
+ if (!EffectiveTriple.isX86() && !EffectiveTriple.isAArch64())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
if (EffectiveTriple.isX86() && (Value != "fs" && Value != "gs")) {
D.Diag(diag::err_drv_invalid_value_with_suggestion)
- << A->getOption().getName() << Value
- << "for X86, valid arguments to '-mstack-protector-guard-reg=' are:fs gs";
+ << A->getOption().getName() << Value << "fs gs";
+ return;
+ }
+ if (EffectiveTriple.isAArch64() && Value != "sp_el0") {
+ D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Value;
return;
}
A->render(Args, CmdArgs);
@@ -3158,7 +3250,8 @@ static void RenderTrivialAutoVarInitOptions(const Driver &D,
}
}
-static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
+static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ types::ID InputType) {
// cl-denorms-are-zero is not forwarded. It is translated into a generic flag
// for denormal flushing handling based on the target.
const unsigned ForwardedArguments[] = {
@@ -3183,6 +3276,14 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs) {
for (const auto &Arg : ForwardedArguments)
if (const auto *A = Args.getLastArg(Arg))
CmdArgs.push_back(Args.MakeArgString(A->getOption().getPrefixedName()));
+
+ // Only add the default headers if we are compiling OpenCL sources.
+ if ((types::isOpenCL(InputType) ||
+ (Args.hasArg(options::OPT_cl_std_EQ) && types::isSrcFile(InputType))) &&
+ !Args.hasArg(options::OPT_cl_no_stdinc)) {
+ CmdArgs.push_back("-finclude-default-header");
+ CmdArgs.push_back("-fdeclare-opencl-builtins");
+ }
}
static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args,
@@ -3600,6 +3701,9 @@ static void RenderObjCOptions(const ToolChain &TC, const Driver &D,
WeakArg->render(Args, CmdArgs);
}
}
+
+ if (Args.hasArg(options::OPT_fobjc_disable_direct_methods_for_testing))
+ CmdArgs.push_back("-fobjc-disable-direct-methods-for-testing");
}
static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
@@ -3740,12 +3844,41 @@ static DwarfFissionKind getDebugFissionKind(const Driver &D,
return DwarfFissionKind::None;
}
+static void renderDwarfFormat(const Driver &D, const llvm::Triple &T,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ unsigned DwarfVersion) {
+ auto *DwarfFormatArg =
+ Args.getLastArg(options::OPT_gdwarf64, options::OPT_gdwarf32);
+ if (!DwarfFormatArg)
+ return;
+
+ if (DwarfFormatArg->getOption().matches(options::OPT_gdwarf64)) {
+ if (DwarfVersion < 3)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "DWARFv3 or greater";
+ else if (!T.isArch64Bit())
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "64 bit architecture";
+ else if (!T.isOSBinFormatELF())
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << DwarfFormatArg->getAsString(Args) << "ELF platforms";
+ }
+
+ DwarfFormatArg->render(Args, CmdArgs);
+}
+
static void renderDebugOptions(const ToolChain &TC, const Driver &D,
const llvm::Triple &T, const ArgList &Args,
bool EmitCodeView, bool IRInput,
ArgStringList &CmdArgs,
codegenoptions::DebugInfoKind &DebugInfoKind,
DwarfFissionKind &DwarfFission) {
+ // These two forms of profiling info can't be used together.
+ if (const Arg *A1 = Args.getLastArg(options::OPT_fpseudo_probe_for_profiling))
+ if (const Arg *A2 = Args.getLastArg(options::OPT_fdebug_info_for_profiling))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A1->getAsString(Args) << A2->getAsString(Args);
+
if (Args.hasFlag(options::OPT_fdebug_info_for_profiling,
options::OPT_fno_debug_info_for_profiling, false) &&
checkDebugInfoOption(
@@ -3780,7 +3913,7 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
// If the last option explicitly specified a debug-info level, use it.
if (checkDebugInfoOption(A, Args, D, TC) &&
@@ -3806,15 +3939,15 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
DebuggerTuning = llvm::DebuggerKind::LLDB;
else if (A->getOption().matches(options::OPT_gsce))
DebuggerTuning = llvm::DebuggerKind::SCE;
+ else if (A->getOption().matches(options::OPT_gdbx))
+ DebuggerTuning = llvm::DebuggerKind::DBX;
else
DebuggerTuning = llvm::DebuggerKind::GDB;
}
}
// If a -gdwarf argument appeared, remember it.
- const Arg *GDwarfN = Args.getLastArg(
- options::OPT_gdwarf_2, options::OPT_gdwarf_3, options::OPT_gdwarf_4,
- options::OPT_gdwarf_5, options::OPT_gdwarf);
+ const Arg *GDwarfN = getDwarfNArg(Args);
bool EmitDwarf = false;
if (GDwarfN) {
if (checkDebugInfoOption(GDwarfN, Args, D, TC))
@@ -3871,19 +4004,29 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
DebugInfoKind == codegenoptions::DebugDirectivesOnly)
DebugInfoKind = codegenoptions::NoDebugInfo;
- // We ignore flag -gstrict-dwarf for now.
+ // strict DWARF is set to false by default. But for DBX, we need it to be set
+ // as true by default.
+ if (const Arg *A = Args.getLastArg(options::OPT_gstrict_dwarf))
+ (void)checkDebugInfoOption(A, Args, D, TC);
+ if (Args.hasFlag(options::OPT_gstrict_dwarf, options::OPT_gno_strict_dwarf,
+ DebuggerTuning == llvm::DebuggerKind::DBX))
+ CmdArgs.push_back("-gstrict-dwarf");
+
// And we handle flag -grecord-gcc-switches later with DWARFDebugFlags.
Args.ClaimAllArgs(options::OPT_g_flags_Group);
// Column info is included by default for everything except SCE and
// CodeView. Clang doesn't track end columns, just starting columns, which,
// in theory, is fine for CodeView (and PDB). In practice, however, the
- // Microsoft debuggers don't handle missing end columns well, so it's better
- // not to include any column info.
+ // Microsoft debuggers don't handle missing end columns well, and the AIX
+ // debugger DBX also doesn't handle the columns well, so it's better not to
+ // include any column info.
if (const Arg *A = Args.getLastArg(options::OPT_gcolumn_info))
(void)checkDebugInfoOption(A, Args, D, TC);
if (!Args.hasFlag(options::OPT_gcolumn_info, options::OPT_gno_column_info,
- !EmitCodeView && DebuggerTuning != llvm::DebuggerKind::SCE))
+ !EmitCodeView &&
+ (DebuggerTuning != llvm::DebuggerKind::SCE &&
+ DebuggerTuning != llvm::DebuggerKind::DBX)))
CmdArgs.push_back("-gno-column-info");
// FIXME: Move backend command line options to the module.
@@ -3892,14 +4035,14 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (checkDebugInfoOption(A, Args, D, TC)) {
if (DebugInfoKind != codegenoptions::DebugLineTablesOnly &&
DebugInfoKind != codegenoptions::DebugDirectivesOnly) {
- DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ DebugInfoKind = codegenoptions::DebugInfoConstructor;
CmdArgs.push_back("-dwarf-ext-refs");
CmdArgs.push_back("-fmodule-format=obj");
}
}
- if (T.isOSBinFormatELF() && !SplitDWARFInlining)
- CmdArgs.push_back("-fno-split-dwarf-inlining");
+ if (T.isOSBinFormatELF() && SplitDWARFInlining)
+ CmdArgs.push_back("-fsplit-dwarf-inlining");
// After we've dealt with all combinations of things that could
// make DebugInfoKind be other than None or DebugLineTablesOnly,
@@ -3913,7 +4056,8 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (const Arg *A = Args.getLastArg(options::OPT_fstandalone_debug))
(void)checkDebugInfoOption(A, Args, D, TC);
- if (DebugInfoKind == codegenoptions::LimitedDebugInfo) {
+ if (DebugInfoKind == codegenoptions::LimitedDebugInfo ||
+ DebugInfoKind == codegenoptions::DebugInfoConstructor) {
if (Args.hasFlag(options::OPT_fno_eliminate_unused_debug_types,
options::OPT_feliminate_unused_debug_types, false))
DebugInfoKind = codegenoptions::UnusedTypeInfo;
@@ -4024,6 +4168,14 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
}
}
+ // To avoid join/split of directory+filename, the integrated assembler prefers
+ // the directory form of .file on all DWARF versions. GNU as doesn't allow the
+ // form before DWARF v5.
+ if (!Args.hasFlag(options::OPT_fdwarf_directory_asm,
+ options::OPT_fno_dwarf_directory_asm,
+ TC.useIntegratedAs() || EffectiveDWARFVersion >= 5))
+ CmdArgs.push_back("-fno-dwarf-directory-asm");
+
// Decide how to render forward declarations of template instantiations.
// SCE wants full descriptions, others just get them in the name.
if (DebuggerTuning == llvm::DebuggerKind::SCE)
@@ -4034,25 +4186,7 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (DebuggerTuning == llvm::DebuggerKind::SCE)
CmdArgs.push_back("-dwarf-explicit-import");
- auto *DwarfFormatArg =
- Args.getLastArg(options::OPT_gdwarf64, options::OPT_gdwarf32);
- if (DwarfFormatArg &&
- DwarfFormatArg->getOption().matches(options::OPT_gdwarf64)) {
- const llvm::Triple &RawTriple = TC.getTriple();
- if (EffectiveDWARFVersion < 3)
- D.Diag(diag::err_drv_argument_only_allowed_with)
- << DwarfFormatArg->getAsString(Args) << "DWARFv3 or greater";
- else if (!RawTriple.isArch64Bit())
- D.Diag(diag::err_drv_argument_only_allowed_with)
- << DwarfFormatArg->getAsString(Args) << "64 bit architecture";
- else if (!RawTriple.isOSBinFormatELF())
- D.Diag(diag::err_drv_argument_only_allowed_with)
- << DwarfFormatArg->getAsString(Args) << "ELF platforms";
- }
-
- if (DwarfFormatArg)
- DwarfFormatArg->render(Args, CmdArgs);
-
+ renderDwarfFormat(D, T, Args, CmdArgs, EffectiveDWARFVersion);
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, TC);
}
@@ -4077,9 +4211,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// include as part of the module. All other jobs are expected to have exactly
// one input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
+ bool IsCudaDevice = JA.isDeviceOffloading(Action::OFK_Cuda);
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
+ bool IsHIPDevice = JA.isDeviceOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
+ bool IsDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
+ JA.isDeviceOffloading(Action::OFK_Host));
+ bool IsUsingLTO = D.isUsingLTO(IsDeviceOffloadAction);
+ auto LTOMode = D.getLTOMode(IsDeviceOffloadAction);
// A header module compilation doesn't have a main input file, so invent a
// fake one as a placeholder.
@@ -4183,14 +4323,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Args.hasFlag(options::OPT_fsycl, options::OPT_fno_sycl, false)) {
- CmdArgs.push_back("-fsycl");
CmdArgs.push_back("-fsycl-is-device");
if (Arg *A = Args.getLastArg(options::OPT_sycl_std_EQ)) {
A->render(Args, CmdArgs);
} else {
- // Ensure the default version in SYCL mode is 1.2.1 (aka 2017)
- CmdArgs.push_back("-sycl-std=2017");
+ // Ensure the default version in SYCL mode is 2020.
+ CmdArgs.push_back("-sycl-std=2020");
}
}
@@ -4220,6 +4359,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// are provided.
TC.addClangWarningOptions(CmdArgs);
+ // FIXME: Subclass ToolChain for SPIR and move this to addClangWarningOptions.
+ if (Triple.isSPIR())
+ CmdArgs.push_back("-Wspir-compat");
+
// Select the appropriate action.
RewriteKind rewriteKind = RK_None;
@@ -4295,13 +4438,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-emit-llvm");
} else if (JA.getType() == types::TY_LLVM_BC ||
JA.getType() == types::TY_LTO_BC) {
- CmdArgs.push_back("-emit-llvm-bc");
+ // Emit textual llvm IR for AMDGPU offloading for -emit-llvm -S
+ if (Triple.isAMDGCN() && IsOpenMPDevice && Args.hasArg(options::OPT_S) &&
+ Args.hasArg(options::OPT_emit_llvm)) {
+ CmdArgs.push_back("-emit-llvm");
+ } else {
+ CmdArgs.push_back("-emit-llvm-bc");
+ }
} else if (JA.getType() == types::TY_IFS ||
JA.getType() == types::TY_IFS_CPP) {
StringRef ArgStr =
Args.hasArg(options::OPT_interface_stub_version_EQ)
? Args.getLastArgValue(options::OPT_interface_stub_version_EQ)
- : "experimental-ifs-v2";
+ : "ifs-v1";
CmdArgs.push_back("-emit-interface-stubs");
CmdArgs.push_back(
Args.MakeArgString(Twine("-interface-stub-version=") + ArgStr.str()));
@@ -4328,13 +4477,30 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_LLVM_BC)
CmdArgs.push_back("-emit-llvm-uselists");
- // Device-side jobs do not support LTO.
- bool isDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
- JA.isDeviceOffloading(Action::OFK_Host));
-
- if (D.isUsingLTO() && !isDeviceOffloadAction) {
- Args.AddLastArg(CmdArgs, options::OPT_flto, options::OPT_flto_EQ);
- CmdArgs.push_back("-flto-unit");
+ if (IsUsingLTO) {
+ if (!IsDeviceOffloadAction) {
+ if (Args.hasArg(options::OPT_flto))
+ CmdArgs.push_back("-flto");
+ else {
+ if (D.getLTOMode() == LTOK_Thin)
+ CmdArgs.push_back("-flto=thin");
+ else
+ CmdArgs.push_back("-flto=full");
+ }
+ CmdArgs.push_back("-flto-unit");
+ } else if (Triple.isAMDGPU()) {
+ // Only AMDGPU supports device-side LTO
+ assert(LTOMode == LTOK_Full || LTOMode == LTOK_Thin);
+ CmdArgs.push_back(Args.MakeArgString(
+ Twine("-flto=") + (LTOMode == LTOK_Thin ? "thin" : "full")));
+ CmdArgs.push_back("-flto-unit");
+ } else {
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << Args.getLastArg(options::OPT_foffload_lto,
+ options::OPT_foffload_lto_EQ)
+ ->getAsString(Args)
+ << Triple.getTriple();
+ }
}
}
@@ -4359,7 +4525,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Embed-bitcode option.
// Only white-listed flags below are allowed to be embedded.
- if (C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO() &&
+ if (C.getDriver().embedBitcodeInObject() && !IsUsingLTO &&
(isa<BackendJobAction>(JA) || isa<AssembleJobAction>(JA))) {
// Add flags implied by -fembed-bitcode.
Args.AddLastArg(CmdArgs, options::OPT_fembed_bitcode_EQ);
@@ -4476,7 +4642,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
return;
}
- if (C.getDriver().embedBitcodeMarkerOnly() && !C.getDriver().isUsingLTO())
+ if (C.getDriver().embedBitcodeMarkerOnly() && !IsUsingLTO)
CmdArgs.push_back("-fembed-bitcode=marker");
// We normally speed up the clang process a bit by skipping destructors at
@@ -4617,7 +4783,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_semantic_interposition);
if (RelocationModel != llvm::Reloc::Static && !IsPIE) {
// The supported targets need to call AsmPrinter::getSymbolPreferLocal.
- bool SupportsLocalAlias = Triple.isX86();
+ bool SupportsLocalAlias =
+ Triple.isAArch64() || Triple.isRISCV() || Triple.isX86();
if (!A)
CmdArgs.push_back("-fhalf-no-semantic-interposition");
else if (A->getOption().matches(options::OPT_fsemantic_interposition))
@@ -4654,26 +4821,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// LLVM Code Generator Options.
- if (Args.hasArg(options::OPT_frewrite_map_file) ||
- Args.hasArg(options::OPT_frewrite_map_file_EQ)) {
- for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file,
- options::OPT_frewrite_map_file_EQ)) {
- StringRef Map = A->getValue();
- if (!llvm::sys::fs::exists(Map)) {
- D.Diag(diag::err_drv_no_such_file) << Map;
- } else {
- CmdArgs.push_back("-frewrite-map-file");
- CmdArgs.push_back(A->getValue());
- A->claim();
- }
- }
- }
-
- if (Triple.isOSAIX() && Args.hasArg(options::OPT_maltivec)) {
- if (Args.getLastArg(options::OPT_mabi_EQ_vec_extabi)) {
- CmdArgs.push_back("-mabi=vec-extabi");
+ for (const Arg *A : Args.filtered(options::OPT_frewrite_map_file_EQ)) {
+ StringRef Map = A->getValue();
+ if (!llvm::sys::fs::exists(Map)) {
+ D.Diag(diag::err_drv_no_such_file) << Map;
} else {
- D.Diag(diag::err_aix_default_altivec_abi);
+ A->render(Args, CmdArgs);
+ A->claim();
}
}
@@ -4682,14 +4836,28 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Triple.isOSAIX())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getSpelling() << RawTriple.str();
- if (A->getOption().getID() == options::OPT_mabi_EQ_vec_default)
- D.Diag(diag::err_aix_default_altivec_abi);
+ if (A->getOption().getID() == options::OPT_mabi_EQ_vec_extabi)
+ CmdArgs.push_back("-mabi=vec-extabi");
+ else
+ CmdArgs.push_back("-mabi=vec-default");
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_mlong_double_128)) {
+ // Emit the unsupported option error until the Clang's library integration
+ // support for 128-bit long double is available for AIX.
+ if (Triple.isOSAIX())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getSpelling() << RawTriple.str();
}
if (Arg *A = Args.getLastArg(options::OPT_Wframe_larger_than_EQ)) {
StringRef v = A->getValue();
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back(Args.MakeArgString("-warn-stack-size=" + v));
+ // FIXME: Validate the argument here so we don't produce meaningless errors
+ // about -fwarn-stack-size=.
+ if (v.empty())
+ D.Diag(diag::err_drv_missing_argument) << A->getSpelling() << 1;
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fwarn-stack-size=" + v));
A->claim();
}
@@ -4808,12 +4976,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_ffine_grained_bitfield_accesses,
options::OPT_fno_fine_grained_bitfield_accesses);
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables,
+ options::OPT_fno_experimental_relative_cxx_abi_vtables);
+
// Handle segmented stacks.
- if (Args.hasArg(options::OPT_fsplit_stack))
- CmdArgs.push_back("-split-stacks");
+ if (Args.hasFlag(options::OPT_fsplit_stack, options::OPT_fno_split_stack,
+ false))
+ CmdArgs.push_back("-fsplit-stack");
+
+ // -fprotect-parens=0 is default.
+ if (Args.hasFlag(options::OPT_fprotect_parens,
+ options::OPT_fno_protect_parens, false))
+ CmdArgs.push_back("-fprotect-parens");
RenderFloatingPointOptions(TC, D, OFastEnabled, Args, CmdArgs, JA);
+ if (Arg *A = Args.getLastArg(options::OPT_fextend_args_EQ)) {
+ const llvm::Triple::ArchType Arch = TC.getArch();
+ if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
+ StringRef V = A->getValue();
+ if (V == "64")
+ CmdArgs.push_back("-fextend-arguments=64");
+ else if (V != "32")
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << A->getValue() << A->getOption().getName();
+ } else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getOption().getName() << TripleStr;
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_mdouble_EQ)) {
if (TC.getArch() == llvm::Triple::avr)
A->render(Args, CmdArgs);
@@ -4856,7 +5047,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getValue() << A->getOption().getName();
}
- if (!TC.useIntegratedAs())
+ // If toolchain choose to use MCAsmParser for inline asm don't pass the
+ // option to disable integrated-as explictly.
+ if (!TC.useIntegratedAs() && !TC.parseInlineAsmUsingAsmParser())
CmdArgs.push_back("-no-integrated-as");
if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
@@ -4869,9 +5062,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Enable -mconstructor-aliases except on darwin, where we have to work around
- // a linker bug (see <rdar://problem/7651567>), and CUDA device code, where
- // aliases aren't supported. Similarly, aliases aren't yet supported for AIX.
- if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX() && !RawTriple.isOSAIX())
+ // a linker bug (see <rdar://problem/7651567>), and CUDA/AMDGPU device code,
+ // where aliases aren't supported.
+ if (!RawTriple.isOSDarwin() && !RawTriple.isNVPTX() && !RawTriple.isAMDGPU())
CmdArgs.push_back("-mconstructor-aliases");
// Darwin's kernel doesn't support guard variables; just die if we
@@ -4909,21 +5102,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
- bool AsynchronousUnwindTables =
+ bool UnwindTables =
Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
options::OPT_fno_asynchronous_unwind_tables,
(TC.IsUnwindTablesDefault(Args) ||
TC.getSanitizerArgs().needsUnwindTables()) &&
!Freestanding);
- if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
- AsynchronousUnwindTables))
+ UnwindTables = Args.hasFlag(options::OPT_funwind_tables,
+ options::OPT_fno_unwind_tables, UnwindTables);
+ if (UnwindTables)
CmdArgs.push_back("-munwind-tables");
// Prepare `-aux-target-cpu` and `-aux-target-feature` unless
// `--gpu-use-aux-triple-only` is specified.
if (!Args.getLastArg(options::OPT_gpu_use_aux_triple_only) &&
- ((IsCuda && JA.isDeviceOffloading(Action::OFK_Cuda)) ||
- (IsHIP && JA.isDeviceOffloading(Action::OFK_HIP)))) {
+ (IsCudaDevice || IsHIPDevice)) {
const ArgList &HostArgs =
C.getArgsForToolChain(nullptr, StringRef(), Action::OFK_None);
std::string HostCPU =
@@ -4944,11 +5137,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
StringRef CM = A->getValue();
if (CM == "small" || CM == "kernel" || CM == "medium" || CM == "large" ||
- CM == "tiny")
- A->render(Args, CmdArgs);
- else
+ CM == "tiny") {
+ if (Triple.isOSAIX() && CM == "medium")
+ CmdArgs.push_back("-mcmodel=large");
+ else
+ A->render(Args, CmdArgs);
+ } else {
D.Diag(diag::err_drv_invalid_argument_to_option)
<< CM << A->getOption().getName();
+ }
}
if (Arg *A = Args.getLastArg(options::OPT_mtls_size_EQ)) {
@@ -4973,6 +5170,21 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderTargetOptions(Triple, Args, KernelOrKext, CmdArgs);
+ // FIXME: For now we want to demote any errors to warnings, when they have
+ // been raised for asking the wrong question of scalable vectors, such as
+ // asking for the fixed number of elements. This may happen because code that
+ // is not yet ported to work for scalable vectors uses the wrong interfaces,
+ // whereas the behaviour is actually correct. Emitting a warning helps bring
+ // up scalable vector support in an incremental way. When scalable vector
+ // support is stable enough, all uses of wrong interfaces should be considered
+ // as errors, but until then, we can live with a warning being emitted by the
+ // compiler. This way, Clang can be used to compile code with scalable vectors
+ // and identify possible issues.
+ if (isa<BackendJobAction>(JA)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-treat-scalable-fixed-error-as-warning");
+ }
+
// These two are potentially updated by AddClangCLArgs.
codegenoptions::DebugInfoKind DebugInfoKind = codegenoptions::NoDebugInfo;
bool EmitCodeView = false;
@@ -5037,11 +5249,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-H");
CmdArgs.push_back("-sys-header-deps");
}
+ Args.AddAllArgs(CmdArgs, options::OPT_fshow_skipped_includes);
if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
CmdArgs.push_back("-header-include-file");
- CmdArgs.push_back(D.CCPrintHeadersFilename ? D.CCPrintHeadersFilename
- : "-");
+ CmdArgs.push_back(!D.CCPrintHeadersFilename.empty()
+ ? D.CCPrintHeadersFilename.c_str()
+ : "-");
CmdArgs.push_back("-sys-header-deps");
}
Args.AddLastArg(CmdArgs, options::OPT_P);
@@ -5049,8 +5263,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (D.CCLogDiagnostics && !D.CCGenDiagnostics) {
CmdArgs.push_back("-diagnostic-log-file");
- CmdArgs.push_back(D.CCLogDiagnosticsFilename ? D.CCLogDiagnosticsFilename
- : "-");
+ CmdArgs.push_back(!D.CCLogDiagnosticsFilename.empty()
+ ? D.CCLogDiagnosticsFilename.c_str()
+ : "-");
}
// Give the gen diagnostics more chances to succeed, by avoiding intentional
@@ -5058,6 +5273,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (D.CCGenDiagnostics)
CmdArgs.push_back("-disable-pragma-debug-crash");
+ // Allow backend to put its diagnostic files in the same place as frontend
+ // crash diagnostics files.
+ if (Args.hasArg(options::OPT_fcrash_diagnostics_dir)) {
+ StringRef Dir = Args.getLastArgValue(options::OPT_fcrash_diagnostics_dir);
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString("-crash-diagnostics-dir=" + Dir));
+ }
+
bool UseSeparateSections = isUseSeparateSections(Triple);
if (Args.hasFlag(options::OPT_ffunction_sections,
@@ -5066,15 +5289,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Arg *A = Args.getLastArg(options::OPT_fbasic_block_sections_EQ)) {
+ StringRef Val = A->getValue();
if (Triple.isX86() && Triple.isOSBinFormatELF()) {
- StringRef Val = A->getValue();
if (Val != "all" && Val != "labels" && Val != "none" &&
!Val.startswith("list="))
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
else
A->render(Args, CmdArgs);
- } else {
+ } else if (Triple.isNVPTX()) {
+ // Do not pass the option to the GPU compilation. We still want it enabled
+ // for the host-side compilation, so seeing it here is not an error.
+ } else if (Val != "none") {
+ // =none is allowed everywhere. It's useful for overriding the option
+ // and is the same as not specifying the option.
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
}
@@ -5199,11 +5427,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
/*Default=*/false))
Args.AddLastArg(CmdArgs, options::OPT_ffixed_point);
+ if (Arg *A = Args.getLastArg(options::OPT_fcxx_abi_EQ))
+ A->render(Args, CmdArgs);
+
+ Args.AddLastArg(CmdArgs, options::OPT_fexperimental_relative_cxx_abi_vtables,
+ options::OPT_fno_experimental_relative_cxx_abi_vtables);
+
// Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
// (-ansi is equivalent to -std=c89 or -std=c++98).
//
// If a std is supplied, only add -trigraphs if it follows the
// option.
+ bool ImplyVCPPCVer = false;
bool ImplyVCPPCXXVer = false;
const Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi);
if (Std) {
@@ -5228,9 +5463,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// doesn't match. For the time being just ignore this for C++ inputs;
// eventually we want to do all the standard defaulting here instead of
// splitting it between the driver and clang -cc1.
- if (!types::isCXX(InputType))
- Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ, "-std=",
- /*Joined=*/true);
+ if (!types::isCXX(InputType)) {
+ if (!Args.hasArg(options::OPT__SLASH_std)) {
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ, "-std=",
+ /*Joined=*/true);
+ } else
+ ImplyVCPPCVer = true;
+ }
else if (IsWindowsMSVC)
ImplyVCPPCXXVer = true;
@@ -5280,9 +5519,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-gnu-keywords");
}
- if (ShouldDisableDwarfDirectory(Args, TC))
- CmdArgs.push_back("-fno-dwarf-directory-asm");
-
if (!ShouldEnableAutolink(Args, TC, JA))
CmdArgs.push_back("-fno-autolink");
@@ -5357,6 +5593,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_stack_size_section, RawTriple.isPS4()))
CmdArgs.push_back("-fstack-size-section");
+ if (Args.hasArg(options::OPT_fstack_usage)) {
+ CmdArgs.push_back("-stack-usage-file");
+
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ SmallString<128> OutputFilename(OutputOpt->getValue());
+ llvm::sys::path::replace_extension(OutputFilename, "su");
+ CmdArgs.push_back(Args.MakeArgString(OutputFilename));
+ } else
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine(getBaseInputStem(Args, Inputs)) + ".su"));
+ }
+
CmdArgs.push_back("-ferror-limit");
if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ))
CmdArgs.push_back(A->getValue());
@@ -5437,18 +5685,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args) << TripleStr;
}
- Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+
+ if (Args.hasFlag(options::OPT_fvisibility_inlines_hidden,
+ options::OPT_fno_visibility_inlines_hidden, false))
+ CmdArgs.push_back("-fvisibility-inlines-hidden");
+
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden_static_local_var,
options::OPT_fno_visibility_inlines_hidden_static_local_var);
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
+ if (Args.hasFlag(options::OPT_fno_operator_names,
+ options::OPT_foperator_names, false))
+ CmdArgs.push_back("-fno-operator-names");
+
// Forward -f (flag) options which we can pass directly.
Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
- Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
options::OPT_fno_emulated_tls);
@@ -5496,13 +5751,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
CmdArgs.push_back("-fopenmp-cuda-mode");
- // When in OpenMP offloading mode with NVPTX target, forward
- // cuda-parallel-target-regions flag
- if (Args.hasFlag(options::OPT_fopenmp_cuda_parallel_target_regions,
- options::OPT_fno_openmp_cuda_parallel_target_regions,
- /*Default=*/true))
- CmdArgs.push_back("-fopenmp-cuda-parallel-target-regions");
-
// When in OpenMP offloading mode with NVPTX target, check if full runtime
// is required.
if (Args.hasFlag(options::OPT_fopenmp_cuda_force_full_runtime,
@@ -5542,8 +5790,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_fpatchable_function_entry_EQ)) {
StringRef S0 = A->getValue(), S = S0;
unsigned Size, Offset = 0;
- if (!Triple.isAArch64() && Triple.getArch() != llvm::Triple::x86 &&
- Triple.getArch() != llvm::Triple::x86_64)
+ if (!Triple.isAArch64() && !Triple.isRISCV() && !Triple.isX86())
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< A->getAsString(Args) << TripleStr;
else if (S.consumeInteger(10, Size) ||
@@ -5591,6 +5838,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
(Args.hasArg(options::OPT_mkernel) && types::isCXX(InputType)))
CmdArgs.push_back("-fapple-kext");
+ Args.AddLastArg(CmdArgs, options::OPT_altivec_src_compat);
Args.AddLastArg(CmdArgs, options::OPT_flax_vector_conversions_EQ);
Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
@@ -5626,6 +5874,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (A->getOption().matches(options::OPT_freroll_loops))
CmdArgs.push_back("-freroll-loops");
+ Args.AddLastArg(CmdArgs, options::OPT_ffinite_loops,
+ options::OPT_fno_finite_loops);
+
Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
Args.AddLastArg(CmdArgs, options::OPT_funroll_loops,
options::OPT_fno_unroll_loops);
@@ -5681,7 +5932,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// Forward -cl options to -cc1
- RenderOpenCLOptions(Args, CmdArgs);
+ RenderOpenCLOptions(Args, CmdArgs, InputType);
if (IsHIP) {
if (Args.hasFlag(options::OPT_fhip_new_launch_api,
@@ -5693,6 +5944,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (IsCuda || IsHIP) {
+ if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
+ CmdArgs.push_back("-fgpu-rdc");
if (Args.hasFlag(options::OPT_fgpu_defer_diag,
options::OPT_fno_gpu_defer_diag, false))
CmdArgs.push_back("-fgpu-defer-diag");
@@ -5709,29 +5962,38 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(Twine("-fcf-protection=") + A->getValue()));
}
- // Forward -f options with positive and negative forms; we translate
- // these by hand.
- if (Arg *A = getLastProfileSampleUseArg(Args)) {
- auto *PGOArg = Args.getLastArg(
- options::OPT_fprofile_generate, options::OPT_fprofile_generate_EQ,
- options::OPT_fcs_profile_generate, options::OPT_fcs_profile_generate_EQ,
- options::OPT_fprofile_use, options::OPT_fprofile_use_EQ);
- if (PGOArg)
- D.Diag(diag::err_drv_argument_not_allowed_with)
- << "SampleUse with PGO options";
+ // Forward -f options with positive and negative forms; we translate these by
+ // hand. Do not propagate PGO options to the GPU-side compilations as the
+ // profile info is for the host-side compilation only.
+ if (!(IsCudaDevice || IsHIPDevice)) {
+ if (Arg *A = getLastProfileSampleUseArg(Args)) {
+ auto *PGOArg = Args.getLastArg(
+ options::OPT_fprofile_generate, options::OPT_fprofile_generate_EQ,
+ options::OPT_fcs_profile_generate,
+ options::OPT_fcs_profile_generate_EQ, options::OPT_fprofile_use,
+ options::OPT_fprofile_use_EQ);
+ if (PGOArg)
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "SampleUse with PGO options";
+
+ StringRef fname = A->getValue();
+ if (!llvm::sys::fs::exists(fname))
+ D.Diag(diag::err_drv_no_such_file) << fname;
+ else
+ A->render(Args, CmdArgs);
+ }
+ Args.AddLastArg(CmdArgs, options::OPT_fprofile_remapping_file_EQ);
- StringRef fname = A->getValue();
- if (!llvm::sys::fs::exists(fname))
- D.Diag(diag::err_drv_no_such_file) << fname;
- else
- A->render(Args, CmdArgs);
+ if (Args.hasFlag(options::OPT_fpseudo_probe_for_profiling,
+ options::OPT_fno_pseudo_probe_for_profiling, false)) {
+ CmdArgs.push_back("-fpseudo-probe-for-profiling");
+ // Enforce -funique-internal-linkage-names if it's not explicitly turned
+ // off.
+ if (Args.hasFlag(options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names, true))
+ CmdArgs.push_back("-funique-internal-linkage-names");
+ }
}
- Args.AddLastArg(CmdArgs, options::OPT_fprofile_remapping_file_EQ);
-
- if (Args.hasFlag(options::OPT_fpseudo_probe_for_profiling,
- options::OPT_fno_pseudo_probe_for_profiling, false))
- CmdArgs.push_back("-fpseudo-probe-for-profiling");
-
RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -5806,6 +6068,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_use_line_directives, false))
CmdArgs.push_back("-fuse-line-directives");
+ // -fno-minimize-whitespace is default.
+ if (Args.hasFlag(options::OPT_fminimize_whitespace,
+ options::OPT_fno_minimize_whitespace, false)) {
+ types::ID InputType = Inputs[0].getType();
+ if (!isDerivedFromC(InputType))
+ D.Diag(diag::err_drv_minws_unsupported_input_type)
+ << types::getTypeName(InputType);
+ CmdArgs.push_back("-fminimize-whitespace");
+ }
+
// -fms-extensions=0 is default.
if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC))
@@ -5848,6 +6120,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString("-fms-compatibility-version=" + MSVT.getAsString()));
bool IsMSVC2015Compatible = MSVT.getMajor() >= 19;
+ if (ImplyVCPPCVer) {
+ StringRef LanguageStandard;
+ if (const Arg *StdArg = Args.getLastArg(options::OPT__SLASH_std)) {
+ Std = StdArg;
+ LanguageStandard = llvm::StringSwitch<StringRef>(StdArg->getValue())
+ .Case("c11", "-std=c11")
+ .Case("c17", "-std=c17")
+ .Default("");
+ if (LanguageStandard.empty())
+ D.Diag(clang::diag::warn_drv_unused_argument)
+ << StdArg->getAsString(Args);
+ }
+ CmdArgs.push_back(LanguageStandard.data());
+ }
if (ImplyVCPPCXXVer) {
StringRef LanguageStandard;
if (const Arg *StdArg = Args.getLastArg(options::OPT__SLASH_std)) {
@@ -5855,7 +6141,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
LanguageStandard = llvm::StringSwitch<StringRef>(StdArg->getValue())
.Case("c++14", "-std=c++14")
.Case("c++17", "-std=c++17")
- .Case("c++latest", "-std=c++20")
+ .Case("c++20", "-std=c++20")
+ .Case("c++latest", "-std=c++2b")
.Default("");
if (LanguageStandard.empty())
D.Diag(clang::diag::warn_drv_unused_argument)
@@ -5942,13 +6229,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
Input, CmdArgs);
+ if (types::isObjC(Input.getType()) &&
+ Args.hasFlag(options::OPT_fobjc_encode_cxx_class_template_spec,
+ options::OPT_fno_objc_encode_cxx_class_template_spec,
+ !Runtime.isNeXTFamily()))
+ CmdArgs.push_back("-fobjc-encode-cxx-class-template-spec");
+
if (Args.hasFlag(options::OPT_fapplication_extension,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-fapplication-extension");
// Handle GCC-style exception args.
+ bool EH = false;
if (!C.getDriver().IsCLMode())
- addExceptionArgs(Args, InputType, TC, KernelOrKext, Runtime, CmdArgs);
+ EH = addExceptionArgs(Args, InputType, TC, KernelOrKext, Runtime, CmdArgs);
// Handle exception personalities
Arg *A = Args.getLastArg(
@@ -6077,7 +6371,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -finput_charset=UTF-8 is default. Reject others
if (Arg *inputCharset = Args.getLastArg(options::OPT_finput_charset_EQ)) {
StringRef value = inputCharset->getValue();
- if (!value.equals_lower("utf-8"))
+ if (!value.equals_insensitive("utf-8"))
D.Diag(diag::err_drv_invalid_value) << inputCharset->getAsString(Args)
<< value;
}
@@ -6085,7 +6379,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fexec_charset=UTF-8 is default. Reject others
if (Arg *execCharset = Args.getLastArg(options::OPT_fexec_charset_EQ)) {
StringRef value = execCharset->getValue();
- if (!value.equals_lower("utf-8"))
+ if (!value.equals_insensitive("utf-8"))
D.Diag(diag::err_drv_invalid_value) << execCharset->getAsString(Args)
<< value;
}
@@ -6255,7 +6549,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// be added so both IR can be captured.
if ((C.getDriver().isSaveTempsEnabled() ||
JA.isHostOffloading(Action::OFK_OpenMP)) &&
- !(C.getDriver().embedBitcodeInObject() && !C.getDriver().isUsingLTO()) &&
+ !(C.getDriver().embedBitcodeInObject() && !IsUsingLTO) &&
isa<CompileJobAction>(JA))
CmdArgs.push_back("-disable-llvm-passes");
@@ -6317,9 +6611,31 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fcuda-short-ptr");
}
+ if (IsCuda || IsHIP) {
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+ auto CUID = cast<InputAction>(SourceAction)->getId();
+ if (!CUID.empty())
+ CmdArgs.push_back(Args.MakeArgString(Twine("-cuid=") + Twine(CUID)));
+ }
+
if (IsHIP)
CmdArgs.push_back("-fcuda-allow-variadic-functions");
+ if (IsCudaDevice || IsHIPDevice) {
+ StringRef InlineThresh =
+ Args.getLastArgValue(options::OPT_fgpu_inline_threshold_EQ);
+ if (!InlineThresh.empty()) {
+ std::string ArgStr =
+ std::string("-inline-threshold=") + InlineThresh.str();
+ CmdArgs.append({"-mllvm", Args.MakeArgStringRef(ArgStr)});
+ }
+ }
+
// OpenMP offloading device jobs take the argument -fopenmp-host-ir-file-path
// to specify the result of the compile phase on the host, so the meaningful
// device declarations can be identified. Also, -fopenmp-is-device is passed
@@ -6337,7 +6653,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
if (Args.hasFlag(options::OPT_munsafe_fp_atomics,
- options::OPT_mno_unsafe_fp_atomics))
+ options::OPT_mno_unsafe_fp_atomics, /*Default=*/false))
CmdArgs.push_back("-munsafe-fp-atomics");
}
@@ -6366,7 +6682,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (VirtualFunctionElimination) {
// VFE requires full LTO (currently, this might be relaxed to allow ThinLTO
// in the future).
- if (D.getLTOMode() != LTOK_Full)
+ if (LTOMode != LTOK_Full)
D.Diag(diag::err_drv_argument_only_allowed_with)
<< "-fvirtual-function-elimination"
<< "-flto=full";
@@ -6385,16 +6701,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
if (WholeProgramVTables) {
- if (!D.isUsingLTO())
+ // Propagate -fwhole-program-vtables if this is an LTO compile.
+ if (IsUsingLTO)
+ CmdArgs.push_back("-fwhole-program-vtables");
+ // Check if we passed LTO options but they were suppressed because this is a
+ // device offloading action, or we passed device offload LTO options which
+ // were suppressed because this is not the device offload action.
+ // Otherwise, issue an error.
+ else if (!D.isUsingLTO(!IsDeviceOffloadAction))
D.Diag(diag::err_drv_argument_only_allowed_with)
<< "-fwhole-program-vtables"
<< "-flto";
- CmdArgs.push_back("-fwhole-program-vtables");
}
bool DefaultsSplitLTOUnit =
(WholeProgramVTables || Sanitize.needsLTO()) &&
- (D.getLTOMode() == LTOK_Full || TC.canSplitThinLTOUnit());
+ (LTOMode == LTOK_Full || TC.canSplitThinLTOUnit());
bool SplitLTOUnit =
Args.hasFlag(options::OPT_fsplit_lto_unit,
options::OPT_fno_split_lto_unit, DefaultsSplitLTOUnit);
@@ -6440,7 +6762,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Enable order file instrumentation when ThinLTO is not on. When ThinLTO is
// on, we need to pass these flags as linker flags and that will be handled
// outside of the compiler.
- if (!D.isUsingLTO()) {
+ if (!IsUsingLTO) {
CmdArgs.push_back("-mllvm");
CmdArgs.push_back("-enable-order-file-instrumentation");
}
@@ -6481,6 +6803,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-target-feature");
CmdArgs.push_back("-outline-atomics");
}
+ } else if (Triple.isAArch64() &&
+ getToolChain().IsAArch64OutlineAtomicsDefault(Args)) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+outline-atomics");
}
if (Args.hasFlag(options::OPT_faddrsig, options::OPT_fno_addrsig,
@@ -6492,6 +6818,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
!TC.getTriple().isAndroid() && TC.useIntegratedAs()))
CmdArgs.push_back("-faddrsig");
+ if ((Triple.isOSBinFormatELF() || Triple.isOSBinFormatMachO()) &&
+ (EH || UnwindTables || DebugInfoKind != codegenoptions::NoDebugInfo))
+ CmdArgs.push_back("-D__GCC_HAVE_DWARF2_CFI_ASM=1");
+
if (Arg *A = Args.getLastArg(options::OPT_fsymbol_partition_EQ)) {
std::string Str = A->getAsString(Args);
if (!TC.getTriple().isOSBinFormatELF())
@@ -6534,23 +6864,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Input.getInputArg().renderAsInput(Args, CmdArgs);
}
- // Finally add the compile command to the compilation.
- if (Args.hasArg(options::OPT__SLASH_fallback) &&
- Output.getType() == types::TY_Object &&
- (InputType == types::TY_C || InputType == types::TY_CXX)) {
- auto CLCommand =
- getCLFallback()->GetCommand(C, JA, Output, Inputs, Args, LinkingOutput);
- C.addCommand(std::make_unique<FallbackCommand>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
- Output, std::move(CLCommand)));
- } else if (Args.hasArg(options::OPT__SLASH_fallback) &&
- isa<PrecompileJobAction>(JA)) {
- // In /fallback builds, run the main compilation even if the pch generation
- // fails, so that the main compilation's fallback to cl.exe runs.
- C.addCommand(std::make_unique<ForceSuccessCommand>(
- JA, *this, ResponseFileSupport::AtFileUTF8(), Exec, CmdArgs, Inputs,
- Output));
- } else if (D.CC1Main && !D.CCGenDiagnostics) {
+ if (D.CC1Main && !D.CCGenDiagnostics) {
// Invoke the CC1 directly in this process
C.addCommand(std::make_unique<CC1Command>(JA, *this,
ResponseFileSupport::AtFileUTF8(),
@@ -6878,7 +7192,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
options::OPT_gline_tables_only)) {
*EmitCodeView = true;
if (DebugInfoArg->getOption().matches(options::OPT__SLASH_Z7))
- *DebugInfoKind = codegenoptions::LimitedDebugInfo;
+ *DebugInfoKind = codegenoptions::DebugInfoConstructor;
else
*DebugInfoKind = codegenoptions::DebugLineTablesOnly;
} else {
@@ -6916,11 +7230,7 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
if (Args.hasFlag(options::OPT__SLASH_Zc_dllexportInlines_,
options::OPT__SLASH_Zc_dllexportInlines,
false)) {
- if (Args.hasArg(options::OPT__SLASH_fallback)) {
- D.Diag(clang::diag::err_drv_dllexport_inlines_and_fallback);
- } else {
- CmdArgs.push_back("-fno-dllexport-inlines");
- }
+ CmdArgs.push_back("-fno-dllexport-inlines");
}
Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
@@ -6989,22 +7299,24 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
if (!Args.hasArg(options::OPT_fdiagnostics_format_EQ)) {
CmdArgs.push_back("-fdiagnostics-format");
- if (Args.hasArg(options::OPT__SLASH_fallback))
- CmdArgs.push_back("msvc-fallback");
- else
- CmdArgs.push_back("msvc");
+ CmdArgs.push_back("msvc");
}
if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
StringRef GuardArgs = A->getValue();
- // The only valid options are "cf", "cf,nochecks", and "cf-".
- if (GuardArgs.equals_lower("cf")) {
+ // The only valid options are "cf", "cf,nochecks", "cf-", "ehcont" and
+ // "ehcont-".
+ if (GuardArgs.equals_insensitive("cf")) {
// Emit CFG instrumentation and the table of address-taken functions.
CmdArgs.push_back("-cfguard");
- } else if (GuardArgs.equals_lower("cf,nochecks")) {
+ } else if (GuardArgs.equals_insensitive("cf,nochecks")) {
// Emit only the table of address-taken functions.
CmdArgs.push_back("-cfguard-no-checks");
- } else if (GuardArgs.equals_lower("cf-")) {
+ } else if (GuardArgs.equals_insensitive("ehcont")) {
+ // Emit EH continuation table.
+ CmdArgs.push_back("-ehcontguard");
+ } else if (GuardArgs.equals_insensitive("cf-") ||
+ GuardArgs.equals_insensitive("ehcont-")) {
// Do nothing, but we might want to emit a security warning in future.
} else {
D.Diag(diag::err_drv_invalid_value) << A->getSpelling() << GuardArgs;
@@ -7012,13 +7324,6 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
}
}
-visualstudio::Compiler *Clang::getCLFallback() const {
- if (!CLFallback)
- CLFallback.reset(new visualstudio::Compiler(getToolChain()));
- return CLFallback.get();
-}
-
-
const char *Clang::getBaseInputName(const ArgList &Args,
const InputInfo &Input) {
return Args.MakeArgString(llvm::sys::path::filename(Input.getBaseInput()));
@@ -7151,18 +7456,14 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Forward -g and handle debug info related flags, assuming we are dealing
// with an actual assembly file.
bool WantDebug = false;
- unsigned DwarfVersion = 0;
Args.ClaimAllArgs(options::OPT_g_Group);
- if (Arg *A = Args.getLastArg(options::OPT_g_Group)) {
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
WantDebug = !A->getOption().matches(options::OPT_g0) &&
!A->getOption().matches(options::OPT_ggdb0);
- if (WantDebug)
- DwarfVersion = DwarfVersionNum(A->getSpelling());
- }
- unsigned DefaultDwarfVersion = ParseDebugDefaultVersion(getToolChain(), Args);
- if (DwarfVersion == 0)
- DwarfVersion = DefaultDwarfVersion;
+ unsigned DwarfVersion = ParseDebugDefaultVersion(getToolChain(), Args);
+ if (const Arg *GDwarfN = getDwarfNArg(Args))
+ DwarfVersion = DwarfVersionNum(GDwarfN->getSpelling());
if (DwarfVersion == 0)
DwarfVersion = getToolChain().GetDefaultDwarfVersion();
@@ -7175,7 +7476,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// the guard for source type, however there is a test which asserts
// that some assembler invocation receives no -debug-info-kind,
// and it's not clear whether that test is just overly restrictive.
- DebugInfoKind = (WantDebug ? codegenoptions::LimitedDebugInfo
+ DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
: codegenoptions::NoDebugInfo);
// Add the -fdebug-compilation-dir flag if needed.
addDebugCompDirArg(Args, CmdArgs, C.getDriver().getVFS());
@@ -7192,6 +7493,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
RenderDebugEnablingArgs(Args, CmdArgs, DebugInfoKind, DwarfVersion,
llvm::DebuggerKind::Default);
+ renderDwarfFormat(D, Triple, Args, CmdArgs, DwarfVersion);
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, getToolChain());
@@ -7365,10 +7667,16 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
});
}
Triples += Action::GetOffloadKindName(CurKind);
- Triples += '-';
- Triples += CurTC->getTriple().normalize();
- if (CurKind == Action::OFK_HIP && CurDep->getOffloadingArch()) {
- Triples += '-';
+ Triples += "-";
+ std::string NormalizedTriple = CurTC->getTriple().normalize();
+ Triples += NormalizedTriple;
+
+ if (CurDep->getOffloadingArch() != nullptr) {
+ // If OffloadArch is present it can only appear as the 6th hypen
+ // sepearated field of Bundle Entry ID. So, pad required number of
+ // hyphens in Triple.
+ for (int i = 4 - StringRef(NormalizedTriple).count("-"); i > 0; i--)
+ Triples += "-";
Triples += CurDep->getOffloadingArch();
}
}
@@ -7438,11 +7746,17 @@ void OffloadBundler::ConstructJobMultipleOutputs(
auto &Dep = DepInfo[I];
Triples += Action::GetOffloadKindName(Dep.DependentOffloadKind);
- Triples += '-';
- Triples += Dep.DependentToolChain->getTriple().normalize();
- if (Dep.DependentOffloadKind == Action::OFK_HIP &&
- !Dep.DependentBoundArch.empty()) {
- Triples += '-';
+ Triples += "-";
+ std::string NormalizedTriple =
+ Dep.DependentToolChain->getTriple().normalize();
+ Triples += NormalizedTriple;
+
+ if (!Dep.DependentBoundArch.empty()) {
+ // If OffloadArch is present it can only appear as the 6th hypen
+ // sepearated field of Bundle Entry ID. So, pad required number of
+ // hyphens in Triple.
+ for (int i = 4 - StringRef(NormalizedTriple).count("-"); i > 0; i--)
+ Triples += "-";
Triples += Dep.DependentBoundArch;
}
}
diff --git a/clang/lib/Driver/ToolChains/Clang.h b/clang/lib/Driver/ToolChains/Clang.h
index a607e3c27de9..d4b4988b4a8c 100644
--- a/clang/lib/Driver/ToolChains/Clang.h
+++ b/clang/lib/Driver/ToolChains/Clang.h
@@ -88,10 +88,6 @@ private:
codegenoptions::DebugInfoKind *DebugInfoKind,
bool *EmitCodeView) const;
- visualstudio::Compiler *getCLFallback() const;
-
- mutable std::unique_ptr<visualstudio::Compiler> CLFallback;
-
mutable std::unique_ptr<llvm::raw_fd_ostream> CompilationDatabase = nullptr;
void DumpCompilationDatabase(Compilation &C, StringRef Filename,
StringRef Target,
diff --git a/clang/lib/Driver/ToolChains/CloudABI.cpp b/clang/lib/Driver/ToolChains/CloudABI.cpp
index 3efca8776260..9ee46ac857f0 100644
--- a/clang/lib/Driver/ToolChains/CloudABI.cpp
+++ b/clang/lib/Driver/ToolChains/CloudABI.cpp
@@ -7,10 +7,10 @@
//===----------------------------------------------------------------------===//
#include "CloudABI.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Option/ArgList.h"
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.cpp b/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 6a95aa5ec628..83cab3ac00cb 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -9,6 +9,7 @@
#include "CommonArgs.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
+#include "Arch/M68k.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/SystemZ.h"
@@ -16,7 +17,6 @@
#include "Arch/X86.h"
#include "HIP.h"
#include "Hexagon.h"
-#include "InputInfo.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
@@ -26,6 +26,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
@@ -372,6 +373,9 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
return A->getValue();
return "";
+ case llvm::Triple::m68k:
+ return m68k::getM68kTargetCPU(Args);
+
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -399,9 +403,14 @@ std::string tools::getCPUName(const ArgList &Args, const llvm::Triple &T,
if (!TargetCPUName.empty())
return TargetCPUName;
- if (T.isOSAIX())
- TargetCPUName = "pwr4";
- else if (T.getArch() == llvm::Triple::ppc64le)
+ if (T.isOSAIX()) {
+ unsigned major, minor, unused_micro;
+ T.getOSVersion(major, minor, unused_micro);
+ // The minimal arch level moved from pwr4 for AIX7.1 to
+ // pwr7 for AIX7.2.
+ TargetCPUName =
+ (major < 7 || (major == 7 && minor < 2)) ? "pwr4" : "pwr7";
+ } else if (T.getArch() == llvm::Triple::ppc64le)
TargetCPUName = "ppc64le";
else if (T.getArch() == llvm::Triple::ppc64)
TargetCPUName = "ppc64";
@@ -541,6 +550,8 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back("-plugin-opt=-debugger-tune=lldb");
else if (A->getOption().matches(options::OPT_gsce))
CmdArgs.push_back("-plugin-opt=-debugger-tune=sce");
+ else if (A->getOption().matches(options::OPT_gdbx))
+ CmdArgs.push_back("-plugin-opt=-debugger-tune=dbx");
else
CmdArgs.push_back("-plugin-opt=-debugger-tune=gdb");
}
@@ -605,6 +616,11 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back("-plugin-opt=new-pass-manager");
}
+ // Pass an option to enable pseudo probe emission.
+ if (Args.hasFlag(options::OPT_fpseudo_probe_for_profiling,
+ options::OPT_fno_pseudo_probe_for_profiling, false))
+ CmdArgs.push_back("-plugin-opt=pseudo-probe-for-profiling");
+
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
@@ -713,11 +729,6 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
// the option, so don't try to pass it.
if (TC.getTriple().getOS() == llvm::Triple::Solaris)
return true;
- // Myriad is static linking only. Furthermore, some versions of its
- // linker have the bug where --export-dynamic overrides -static, so
- // don't use --export-dynamic on that platform.
- if (TC.getTriple().getVendor() == llvm::Triple::Myriad)
- return true;
SmallString<128> SanRT(TC.getCompilerRT(Args, Sanitizer));
if (llvm::sys::fs::exists(SanRT + ".syms")) {
CmdArgs.push_back(Args.MakeArgString("--dynamic-list=" + SanRT + ".syms"));
@@ -727,6 +738,9 @@ static bool addSanitizerDynamicList(const ToolChain &TC, const ArgList &Args,
}
static const char *getAsNeededOption(const ToolChain &TC, bool as_needed) {
+ assert(!TC.getTriple().isOSAIX() &&
+ "AIX linker does not support any form of --as-needed option yet.");
+
// While the Solaris 11.2 ld added --as-needed/--no-as-needed as aliases
// for the native forms -z ignore/-z record, they are missing in Illumos,
// so always use the native form.
@@ -755,10 +769,9 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
}
CmdArgs.push_back("-lm");
// There's no libdl on all OSes.
- if (!TC.getTriple().isOSFreeBSD() &&
- !TC.getTriple().isOSNetBSD() &&
+ if (!TC.getTriple().isOSFreeBSD() && !TC.getTriple().isOSNetBSD() &&
!TC.getTriple().isOSOpenBSD() &&
- TC.getTriple().getOS() != llvm::Triple::RTEMS)
+ TC.getTriple().getOS() != llvm::Triple::RTEMS)
CmdArgs.push_back("-ldl");
// Required for backtrace on some OSes
if (TC.getTriple().isOSFreeBSD() ||
@@ -800,8 +813,12 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
}
if (SanArgs.needsTsanRt() && SanArgs.linkRuntimes())
SharedRuntimes.push_back("tsan");
- if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes())
- SharedRuntimes.push_back("hwasan");
+ if (SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
+ if (SanArgs.needsHwasanAliasesRt())
+ SharedRuntimes.push_back("hwasan_aliases");
+ else
+ SharedRuntimes.push_back("hwasan");
+ }
}
// The stats_client library is also statically linked into DSOs.
@@ -831,9 +848,15 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
}
if (!SanArgs.needsSharedRt() && SanArgs.needsHwasanRt() && SanArgs.linkRuntimes()) {
- StaticRuntimes.push_back("hwasan");
- if (SanArgs.linkCXXRuntimes())
- StaticRuntimes.push_back("hwasan_cxx");
+ if (SanArgs.needsHwasanAliasesRt()) {
+ StaticRuntimes.push_back("hwasan_aliases");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("hwasan_aliases_cxx");
+ } else {
+ StaticRuntimes.push_back("hwasan");
+ if (SanArgs.linkCXXRuntimes())
+ StaticRuntimes.push_back("hwasan_cxx");
+ }
}
if (SanArgs.needsDfsanRt() && SanArgs.linkRuntimes())
StaticRuntimes.push_back("dfsan");
@@ -1005,12 +1028,13 @@ const char *tools::SplitDebugName(const JobAction &JA, const ArgList &Args,
return Args.MakeArgString(T);
} else {
// Use the compilation dir.
- SmallString<128> T(
- Args.getLastArgValue(options::OPT_fdebug_compilation_dir));
+ Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
+ options::OPT_fdebug_compilation_dir_EQ);
+ SmallString<128> T(A ? A->getValue() : "");
SmallString<128> F(llvm::sys::path::stem(Input.getBaseInput()));
AddPostfix(F);
T += F;
- return Args.MakeArgString(F);
+ return Args.MakeArgString(T);
}
}
@@ -1361,11 +1385,19 @@ bool tools::isObjCAutoRefCount(const ArgList &Args) {
enum class LibGccType { UnspecifiedLibGcc, StaticLibGcc, SharedLibGcc };
-static LibGccType getLibGccType(const Driver &D, const ArgList &Args) {
+static LibGccType getLibGccType(const ToolChain &TC, const Driver &D,
+ const ArgList &Args) {
if (Args.hasArg(options::OPT_static_libgcc) ||
Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie))
return LibGccType::StaticLibGcc;
- if (Args.hasArg(options::OPT_shared_libgcc) || D.CCCIsCXX())
+ if (Args.hasArg(options::OPT_shared_libgcc))
+ return LibGccType::SharedLibGcc;
+ // The Android NDK only provides libunwind.a, not libunwind.so.
+ if (TC.getTriple().isAndroid())
+ return LibGccType::StaticLibGcc;
+ // For MinGW, don't imply a shared libgcc here, we only want to return
+ // SharedLibGcc if that was explicitly requested.
+ if (D.CCCIsCXX() && !TC.getTriple().isOSCygMing())
return LibGccType::SharedLibGcc;
return LibGccType::UnspecifiedLibGcc;
}
@@ -1387,14 +1419,15 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
ToolChain::UnwindLibType UNW = TC.GetUnwindLibType(Args);
// Targets that don't use unwind libraries.
- if (TC.getTriple().isAndroid() || TC.getTriple().isOSIAMCU() ||
- TC.getTriple().isOSBinFormatWasm() ||
+ if ((TC.getTriple().isAndroid() && UNW == ToolChain::UNW_Libgcc) ||
+ TC.getTriple().isOSIAMCU() || TC.getTriple().isOSBinFormatWasm() ||
UNW == ToolChain::UNW_None)
return;
- LibGccType LGT = getLibGccType(D, Args);
+ LibGccType LGT = getLibGccType(TC, D, Args);
bool AsNeeded = LGT == LibGccType::UnspecifiedLibGcc &&
- !TC.getTriple().isAndroid() && !TC.getTriple().isOSCygMing();
+ !TC.getTriple().isAndroid() &&
+ !TC.getTriple().isOSCygMing() && !TC.getTriple().isOSAIX();
if (AsNeeded)
CmdArgs.push_back(getAsNeededOption(TC, true));
@@ -1409,17 +1442,23 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
break;
}
case ToolChain::UNW_CompilerRT:
- if (LGT == LibGccType::StaticLibGcc)
+ if (TC.getTriple().isOSAIX()) {
+ // AIX only has libunwind as a shared library. So do not pass
+ // anything in if -static is specified.
+ if (LGT != LibGccType::StaticLibGcc)
+ CmdArgs.push_back("-lunwind");
+ } else if (LGT == LibGccType::StaticLibGcc) {
CmdArgs.push_back("-l:libunwind.a");
- else if (TC.getTriple().isOSCygMing()) {
+ } else if (TC.getTriple().isOSCygMing()) {
if (LGT == LibGccType::SharedLibGcc)
CmdArgs.push_back("-l:libunwind.dll.a");
else
// Let the linker choose between libunwind.dll.a and libunwind.a
// depending on what's available, and depending on the -static flag
CmdArgs.push_back("-lunwind");
- } else
+ } else {
CmdArgs.push_back("-l:libunwind.so");
+ }
break;
}
@@ -1429,20 +1468,12 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
static void AddLibgcc(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
- LibGccType LGT = getLibGccType(D, Args);
+ LibGccType LGT = getLibGccType(TC, D, Args);
if (LGT != LibGccType::SharedLibGcc)
CmdArgs.push_back("-lgcc");
AddUnwindLibrary(TC, D, CmdArgs, Args);
if (LGT == LibGccType::SharedLibGcc)
CmdArgs.push_back("-lgcc");
-
- // According to Android ABI, we have to link with libdl if we are
- // linking with non-static libgcc.
- //
- // NOTE: This fixes a link error on Android MIPS as well. The non-static
- // libgcc for MIPS relies on _Unwind_Find_FDE and dl_iterate_phdr from libdl.
- if (TC.getTriple().isAndroid() && LGT != LibGccType::StaticLibGcc)
- CmdArgs.push_back("-ldl");
}
void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
@@ -1468,6 +1499,13 @@ void tools::AddRunTimeLibs(const ToolChain &TC, const Driver &D,
AddLibgcc(TC, D, CmdArgs, Args);
break;
}
+
+ // On Android, the unwinder uses dl_iterate_phdr (or one of
+ // dl_unwind_find_exidx/__gnu_Unwind_Find_exidx on arm32) from libdl.so. For
+ // statically-linked executables, these functions come from libc.a instead.
+ if (TC.getTriple().isAndroid() && !Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_static_pie))
+ CmdArgs.push_back("-ldl");
}
SmallString<128> tools::getStatsFileName(const llvm::opt::ArgList &Args,
@@ -1549,29 +1587,46 @@ void tools::addX86AlignBranchArgs(const Driver &D, const ArgList &Args,
}
}
-unsigned tools::getOrCheckAMDGPUCodeObjectVersion(
- const Driver &D, const llvm::opt::ArgList &Args, bool Diagnose) {
+static llvm::opt::Arg *
+getAMDGPUCodeObjectArgument(const Driver &D, const llvm::opt::ArgList &Args) {
+ // The last of -mcode-object-v3, -mno-code-object-v3 and
+ // -mcode-object-version=<version> wins.
+ return Args.getLastArg(options::OPT_mcode_object_v3_legacy,
+ options::OPT_mno_code_object_v3_legacy,
+ options::OPT_mcode_object_version_EQ);
+}
+
+void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
+ const llvm::opt::ArgList &Args) {
const unsigned MinCodeObjVer = 2;
const unsigned MaxCodeObjVer = 4;
- unsigned CodeObjVer = 3;
// Emit warnings for legacy options even if they are overridden.
- if (Diagnose) {
- if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mno-code-object-v3"
- << "-mcode-object-version=2";
+ if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
+ D.Diag(diag::warn_drv_deprecated_arg) << "-mno-code-object-v3"
+ << "-mcode-object-version=2";
- if (Args.hasArg(options::OPT_mcode_object_v3_legacy))
- D.Diag(diag::warn_drv_deprecated_arg) << "-mcode-object-v3"
- << "-mcode-object-version=3";
+ if (Args.hasArg(options::OPT_mcode_object_v3_legacy))
+ D.Diag(diag::warn_drv_deprecated_arg) << "-mcode-object-v3"
+ << "-mcode-object-version=3";
+
+ if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
+ if (CodeObjArg->getOption().getID() ==
+ options::OPT_mcode_object_version_EQ) {
+ unsigned CodeObjVer = MaxCodeObjVer;
+ auto Remnant =
+ StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
+ if (Remnant || CodeObjVer < MinCodeObjVer || CodeObjVer > MaxCodeObjVer)
+ D.Diag(diag::err_drv_invalid_int_value)
+ << CodeObjArg->getAsString(Args) << CodeObjArg->getValue();
+ }
}
+}
- // The last of -mcode-object-v3, -mno-code-object-v3 and
- // -mcode-object-version=<version> wins.
- if (auto *CodeObjArg =
- Args.getLastArg(options::OPT_mcode_object_v3_legacy,
- options::OPT_mno_code_object_v3_legacy,
- options::OPT_mcode_object_version_EQ)) {
+unsigned tools::getAMDGPUCodeObjectVersion(const Driver &D,
+ const llvm::opt::ArgList &Args) {
+ unsigned CodeObjVer = 4; // default
+ if (auto *CodeObjArg = getAMDGPUCodeObjectArgument(D, Args)) {
if (CodeObjArg->getOption().getID() ==
options::OPT_mno_code_object_v3_legacy) {
CodeObjVer = 2;
@@ -1579,17 +1634,17 @@ unsigned tools::getOrCheckAMDGPUCodeObjectVersion(
options::OPT_mcode_object_v3_legacy) {
CodeObjVer = 3;
} else {
- auto Remnant =
- StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
- if (Diagnose &&
- (Remnant || CodeObjVer < MinCodeObjVer || CodeObjVer > MaxCodeObjVer))
- D.Diag(diag::err_drv_invalid_int_value)
- << CodeObjArg->getAsString(Args) << CodeObjArg->getValue();
+ StringRef(CodeObjArg->getValue()).getAsInteger(0, CodeObjVer);
}
}
return CodeObjVer;
}
+bool tools::haveAMDGPUCodeObjectVersionArgument(
+ const Driver &D, const llvm::opt::ArgList &Args) {
+ return getAMDGPUCodeObjectArgument(D, Args) != nullptr;
+}
+
void tools::addMachineOutlinerArgs(const Driver &D,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
@@ -1622,3 +1677,63 @@ void tools::addMachineOutlinerArgs(const Driver &D,
}
}
}
+
+void tools::addOpenMPDeviceRTL(const Driver &D,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef BitcodeSuffix,
+ const llvm::Triple &Triple) {
+ SmallVector<StringRef, 8> LibraryPaths;
+ // Add user defined library paths from LIBRARY_PATH.
+ llvm::Optional<std::string> LibPath =
+ llvm::sys::Process::GetEnv("LIBRARY_PATH");
+ if (LibPath) {
+ SmallVector<StringRef, 8> Frags;
+ const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
+ llvm::SplitString(*LibPath, Frags, EnvPathSeparatorStr);
+ for (StringRef Path : Frags)
+ LibraryPaths.emplace_back(Path.trim());
+ }
+
+ // Add path to lib / lib64 folder.
+ SmallString<256> DefaultLibPath = llvm::sys::path::parent_path(D.Dir);
+ llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
+ LibraryPaths.emplace_back(DefaultLibPath.c_str());
+
+ OptSpecifier LibomptargetBCPathOpt =
+ Triple.isAMDGCN() ? options::OPT_libomptarget_amdgcn_bc_path_EQ
+ : options::OPT_libomptarget_nvptx_bc_path_EQ;
+
+ StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgcn" : "nvptx";
+ // First check whether user specifies bc library
+ if (const Arg *A = DriverArgs.getLastArg(LibomptargetBCPathOpt)) {
+ std::string LibOmpTargetName(A->getValue());
+ if (llvm::sys::fs::exists(LibOmpTargetName)) {
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetName));
+ } else {
+ D.Diag(diag::err_drv_omp_offload_target_bcruntime_not_found)
+ << LibOmpTargetName;
+ }
+ } else {
+ bool FoundBCLibrary = false;
+
+ std::string LibOmpTargetName =
+ "libomptarget-" + BitcodeSuffix.str() + ".bc";
+
+ for (StringRef LibraryPath : LibraryPaths) {
+ SmallString<128> LibOmpTargetFile(LibraryPath);
+ llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
+ if (llvm::sys::fs::exists(LibOmpTargetFile)) {
+ CC1Args.push_back("-mlink-builtin-bitcode");
+ CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
+ FoundBCLibrary = true;
+ break;
+ }
+ }
+
+ if (!FoundBCLibrary)
+ D.Diag(diag::err_drv_omp_offload_target_missingbcruntime)
+ << LibOmpTargetName << ArchPrefix;
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/CommonArgs.h b/clang/lib/Driver/ToolChains/CommonArgs.h
index 187c340d1c3c..c94c15864661 100644
--- a/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -9,8 +9,8 @@
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_COMMONARGS_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_COMMONARGS_H
-#include "InputInfo.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -138,13 +138,22 @@ void addMultilibFlag(bool Enabled, const char *const Flag,
void addX86AlignBranchArgs(const Driver &D, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs, bool IsLTO);
-unsigned getOrCheckAMDGPUCodeObjectVersion(const Driver &D,
- const llvm::opt::ArgList &Args,
- bool Diagnose = false);
+void checkAMDGPUCodeObjectVersion(const Driver &D,
+ const llvm::opt::ArgList &Args);
+
+unsigned getAMDGPUCodeObjectVersion(const Driver &D,
+ const llvm::opt::ArgList &Args);
+
+bool haveAMDGPUCodeObjectVersionArgument(const Driver &D,
+ const llvm::opt::ArgList &Args);
void addMachineOutlinerArgs(const Driver &D, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
const llvm::Triple &Triple, bool IsLTO);
+
+void addOpenMPDeviceRTL(const Driver &D, const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef BitcodeSuffix, const llvm::Triple &Triple);
} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/clang/lib/Driver/ToolChains/Cuda.cpp b/clang/lib/Driver/ToolChains/Cuda.cpp
index d14776c5f5ba..769eae14df51 100644
--- a/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -8,13 +8,13 @@
#include "Cuda.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Option/ArgList.h"
@@ -75,6 +75,8 @@ CudaVersion getCudaVersion(uint32_t raw_version) {
return CudaVersion::CUDA_102;
if (raw_version < 11010)
return CudaVersion::CUDA_110;
+ if (raw_version < 11020)
+ return CudaVersion::CUDA_111;
return CudaVersion::LATEST;
}
@@ -689,22 +691,17 @@ void CudaToolChain::addClangTargetOptions(
if (DriverArgs.hasFlag(options::OPT_fcuda_approx_transcendentals,
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
-
- if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
- false))
- CC1Args.push_back("-fgpu-rdc");
}
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
- std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch);
+ if (DeviceOffloadingKind == Action::OFK_OpenMP &&
+ DriverArgs.hasArg(options::OPT_S))
+ return;
+ std::string LibDeviceFile = CudaInstallation.getLibDeviceFile(GpuArch);
if (LibDeviceFile.empty()) {
- if (DeviceOffloadingKind == Action::OFK_OpenMP &&
- DriverArgs.hasArg(options::OPT_S))
- return;
-
getDriver().Diag(diag::err_drv_no_cuda_libdevice) << GpuArch;
return;
}
@@ -712,18 +709,19 @@ void CudaToolChain::addClangTargetOptions(
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(LibDeviceFile));
- std::string CudaVersionStr;
+ clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
// New CUDA versions often introduce new instructions that are only supported
// by new PTX version, so we need to raise PTX level to enable them in NVPTX
// back-end.
const char *PtxFeature = nullptr;
- switch (CudaInstallation.version()) {
+ switch (CudaInstallationVersion) {
#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
case CudaVersion::CUDA_##CUDA_VER: \
- CudaVersionStr = #CUDA_VER; \
PtxFeature = "+ptx" #PTX_VER; \
break;
+ CASE_CUDA_VERSION(112, 72);
+ CASE_CUDA_VERSION(111, 71);
CASE_CUDA_VERSION(110, 70);
CASE_CUDA_VERSION(102, 65);
CASE_CUDA_VERSION(101, 64);
@@ -733,9 +731,6 @@ void CudaToolChain::addClangTargetOptions(
CASE_CUDA_VERSION(90, 60);
#undef CASE_CUDA_VERSION
default:
- // If unknown CUDA version, we take it as CUDA 8.0. Same assumption is also
- // made in libomptarget/deviceRTLs.
- CudaVersionStr = "80";
PtxFeature = "+ptx42";
}
CC1Args.append({"-target-feature", PtxFeature});
@@ -743,62 +738,28 @@ void CudaToolChain::addClangTargetOptions(
options::OPT_fno_cuda_short_ptr, false))
CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
- if (CudaInstallation.version() >= CudaVersion::UNKNOWN)
- CC1Args.push_back(DriverArgs.MakeArgString(
- Twine("-target-sdk-version=") +
- CudaVersionToString(CudaInstallation.version())));
+ if (CudaInstallationVersion >= CudaVersion::UNKNOWN)
+ CC1Args.push_back(
+ DriverArgs.MakeArgString(Twine("-target-sdk-version=") +
+ CudaVersionToString(CudaInstallationVersion)));
if (DeviceOffloadingKind == Action::OFK_OpenMP) {
- SmallVector<StringRef, 8> LibraryPaths;
- // Add user defined library paths from LIBRARY_PATH.
- llvm::Optional<std::string> LibPath =
- llvm::sys::Process::GetEnv("LIBRARY_PATH");
- if (LibPath) {
- SmallVector<StringRef, 8> Frags;
- const char EnvPathSeparatorStr[] = {llvm::sys::EnvPathSeparator, '\0'};
- llvm::SplitString(*LibPath, Frags, EnvPathSeparatorStr);
- for (StringRef Path : Frags)
- LibraryPaths.emplace_back(Path.trim());
+ if (CudaInstallationVersion < CudaVersion::CUDA_92) {
+ getDriver().Diag(
+ diag::err_drv_omp_offload_target_cuda_version_not_support)
+ << CudaVersionToString(CudaInstallationVersion);
+ return;
}
- // Add path to lib / lib64 folder.
- SmallString<256> DefaultLibPath =
- llvm::sys::path::parent_path(getDriver().Dir);
- llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
- LibraryPaths.emplace_back(DefaultLibPath.c_str());
-
- // First check whether user specifies bc library
- if (const Arg *A =
- DriverArgs.getLastArg(options::OPT_libomptarget_nvptx_bc_path_EQ)) {
- std::string LibOmpTargetName(A->getValue());
- if (llvm::sys::fs::exists(LibOmpTargetName)) {
- CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetName));
- } else {
- getDriver().Diag(diag::err_drv_omp_offload_target_bcruntime_not_found)
- << LibOmpTargetName;
- }
- } else {
- bool FoundBCLibrary = false;
-
- std::string LibOmpTargetName = "libomptarget-nvptx-cuda_" +
- CudaVersionStr + "-" + GpuArch.str() +
- ".bc";
-
- for (StringRef LibraryPath : LibraryPaths) {
- SmallString<128> LibOmpTargetFile(LibraryPath);
- llvm::sys::path::append(LibOmpTargetFile, LibOmpTargetName);
- if (llvm::sys::fs::exists(LibOmpTargetFile)) {
- CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(LibOmpTargetFile));
- FoundBCLibrary = true;
- break;
- }
- }
- if (!FoundBCLibrary)
- getDriver().Diag(diag::err_drv_omp_offload_target_missingbcruntime)
- << LibOmpTargetName;
- }
+ std::string BitcodeSuffix;
+ if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
+ options::OPT_fno_openmp_target_new_runtime, false))
+ BitcodeSuffix = "new-nvptx-" + GpuArch.str();
+ else
+ BitcodeSuffix = "nvptx-" + GpuArch.str();
+
+ addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
+ getTriple());
}
}
@@ -807,9 +768,8 @@ llvm::DenormalMode CudaToolChain::getDefaultDenormalModeForType(
const llvm::fltSemantics *FPType) const {
if (JA.getOffloadingDeviceKind() == Action::OFK_Cuda) {
if (FPType && FPType == &llvm::APFloat::IEEEsingle() &&
- DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero,
- false))
+ DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero,
+ options::OPT_fno_gpu_flush_denormals_to_zero, false))
return llvm::DenormalMode::getPreserveSign();
}
diff --git a/clang/lib/Driver/ToolChains/Darwin.cpp b/clang/lib/Driver/ToolChains/Darwin.cpp
index eb7bd4aec898..261f522f6c49 100644
--- a/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -34,6 +34,10 @@ using namespace clang::driver::toolchains;
using namespace clang;
using namespace llvm::opt;
+static const VersionTuple minimumMacCatalystDeploymentTarget() {
+ return VersionTuple(13, 1);
+}
+
llvm::Triple::ArchType darwin::getArchTypeForMachOArchName(StringRef Str) {
// See arch(3) and llvm-gcc's driver-driver.c. We don't implement support for
// archs which Darwin doesn't use.
@@ -74,12 +78,12 @@ void darwin::setTripleTypeForMachOArchName(llvm::Triple &T, StringRef Str) {
const llvm::Triple::ArchType Arch = getArchTypeForMachOArchName(Str);
llvm::ARM::ArchKind ArchKind = llvm::ARM::parseArch(Str);
T.setArch(Arch);
-
- if (Str == "x86_64h" || Str == "arm64e")
+ if (Arch != llvm::Triple::UnknownArch)
T.setArchName(Str);
- else if (ArchKind == llvm::ARM::ArchKind::ARMV6M ||
- ArchKind == llvm::ARM::ArchKind::ARMV7M ||
- ArchKind == llvm::ARM::ArchKind::ARMV7EM) {
+
+ if (ArchKind == llvm::ARM::ArchKind::ARMV6M ||
+ ArchKind == llvm::ARM::ArchKind::ARMV7M ||
+ ArchKind == llvm::ARM::ArchKind::ARMV7EM) {
T.setOS(llvm::Triple::UnknownOS);
T.setObjectFormat(llvm::Triple::MachO);
}
@@ -373,6 +377,18 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
D.Diag(diag::err_drv_bitcode_unsupported_on_toolchain);
}
+ // If GlobalISel is enabled, pass it through to LLVM.
+ if (Arg *A = Args.getLastArg(options::OPT_fglobal_isel,
+ options::OPT_fno_global_isel)) {
+ if (A->getOption().matches(options::OPT_fglobal_isel)) {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-global-isel");
+ // Disable abort and fall back to SDAG silently.
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-global-isel-abort=0");
+ }
+ }
+
Args.AddLastArg(CmdArgs, options::OPT_prebind);
Args.AddLastArg(CmdArgs, options::OPT_noprebind);
Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
@@ -417,7 +433,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_sectalign);
Args.AddAllArgs(CmdArgs, options::OPT_sectobjectsymbols);
Args.AddAllArgs(CmdArgs, options::OPT_segcreate);
- Args.AddLastArg(CmdArgs, options::OPT_whyload);
+ Args.AddLastArg(CmdArgs, options::OPT_why_load);
Args.AddLastArg(CmdArgs, options::OPT_whatsloaded);
Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
Args.AddLastArg(CmdArgs, options::OPT_dylinker);
@@ -699,10 +715,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
ResponseFileSupport ResponseSupport;
- if (LinkerIsLLDDarwinNew) {
- // Xcode12's ld64 added support for @response files, but it's crashy:
- // https://openradar.appspot.com/radar?id=4933317065441280
- // FIXME: Pass this for ld64 once it no longer crashes.
+ if (Version[0] >= 705 || LinkerIsLLDDarwinNew) {
ResponseSupport = ResponseFileSupport::AtFileUTF8();
} else {
// For older versions of the linker, use the legacy filelist method instead.
@@ -811,9 +824,9 @@ bool MachO::HasNativeLLVMSupport() const { return true; }
ToolChain::CXXStdlibType Darwin::GetDefaultCXXStdlibType() const {
// Default to use libc++ on OS X 10.9+ and iOS 7+.
- if ((isTargetMacOS() && !isMacosxVersionLT(10, 9)) ||
- (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
- isTargetWatchOSBased())
+ if ((isTargetMacOSBased() && !isMacosxVersionLT(10, 9)) ||
+ (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
+ isTargetWatchOSBased())
return ToolChain::CST_Libcxx;
return ToolChain::CST_Libstdcxx;
@@ -837,7 +850,7 @@ bool Darwin::hasBlocksRuntime() const {
else if (isTargetIOSBased())
return !isIPhoneOSVersionLT(3, 2);
else {
- assert(isTargetMacOS() && "unexpected darwin target");
+ assert(isTargetMacOSBased() && "unexpected darwin target");
return !isMacosxVersionLT(10, 6);
}
}
@@ -938,11 +951,11 @@ std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
Str += "watchos";
else if (isTargetTvOSBased())
Str += "tvos";
- else if (isTargetIOSBased())
+ else if (isTargetIOSBased() || isTargetMacCatalyst())
Str += "ios";
else
Str += "macosx";
- Str += getTargetVersion().getAsString();
+ Str += getTripleTargetVersion().getAsString();
Triple.setOSName(Str);
return Triple.getTriple();
@@ -1011,7 +1024,7 @@ static StringRef getXcodeDeveloperPath(StringRef PathIntoXcode) {
void DarwinClang::AddLinkARCArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
// Avoid linking compatibility stubs on i386 mac.
- if (isTargetMacOS() && getArch() == llvm::Triple::x86)
+ if (isTargetMacOSBased() && getArch() == llvm::Triple::x86)
return;
if (isTargetAppleSiliconMac())
return;
@@ -1069,7 +1082,7 @@ void DarwinClang::AddLinkARCArgs(const ArgList &Args,
unsigned DarwinClang::GetDefaultDwarfVersion() const {
// Default to use DWARF 2 on OS X 10.10 / iOS 8 and lower.
- if ((isTargetMacOS() && isMacosxVersionLT(10, 11)) ||
+ if ((isTargetMacOSBased() && isMacosxVersionLT(10, 11)) ||
(isTargetIOSBased() && isIPhoneOSVersionLT(9)))
return 2;
return 4;
@@ -1100,10 +1113,7 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
// we explicitly force linking with this library).
if ((Opts & RLO_AlwaysLink) || getVFS().exists(P)) {
const char *LibArg = Args.MakeArgString(P);
- if (Opts & RLO_FirstLink)
- CmdArgs.insert(CmdArgs.begin(), LibArg);
- else
- CmdArgs.push_back(LibArg);
+ CmdArgs.push_back(LibArg);
}
// Adding the rpaths might negatively interact when other rpaths are involved,
@@ -1130,6 +1140,8 @@ StringRef Darwin::getPlatformFamily() const {
case DarwinPlatformKind::MacOS:
return "MacOSX";
case DarwinPlatformKind::IPhoneOS:
+ if (TargetEnvironment == MacCatalyst)
+ return "MacOSX";
return "iPhone";
case DarwinPlatformKind::TvOS:
return "AppleTV";
@@ -1156,6 +1168,8 @@ StringRef Darwin::getOSLibraryNameSuffix(bool IgnoreSim) const {
case DarwinPlatformKind::MacOS:
return "osx";
case DarwinPlatformKind::IPhoneOS:
+ if (TargetEnvironment == MacCatalyst)
+ return "osx";
return TargetEnvironment == NativeEnvironment || IgnoreSim ? "ios"
: "iossim";
case DarwinPlatformKind::TvOS:
@@ -1208,7 +1222,7 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
return;
AddLinkRuntimeLib(Args, CmdArgs, "profile",
- RuntimeLinkOptions(RLO_AlwaysLink | RLO_FirstLink));
+ RuntimeLinkOptions(RLO_AlwaysLink));
bool ForGCOV = needsGCovInstrumentation(Args);
@@ -1400,6 +1414,12 @@ struct DarwinPlatform {
bool hasOSVersion() const { return HasOSVersion; }
+ VersionTuple getNativeTargetVersion() const {
+ assert(Environment == DarwinEnvironmentKind::MacCatalyst &&
+ "native target version is specified only for Mac Catalyst");
+ return NativeTargetVersion;
+ }
+
/// Returns true if the target OS was explicitly specified.
bool isExplicitlySpecified() const { return Kind <= DeploymentTargetEnv; }
@@ -1446,21 +1466,40 @@ struct DarwinPlatform {
llvm_unreachable("Unsupported Darwin Source Kind");
}
- static DarwinPlatform createFromTarget(const llvm::Triple &TT,
- StringRef OSVersion, Arg *A) {
+ static DarwinPlatform
+ createFromTarget(const llvm::Triple &TT, StringRef OSVersion, Arg *A,
+ const Optional<DarwinSDKInfo> &SDKInfo) {
DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
A);
+ unsigned Major, Minor, Micro;
+ TT.getOSVersion(Major, Minor, Micro);
+ if (Major == 0)
+ Result.HasOSVersion = false;
+
switch (TT.getEnvironment()) {
case llvm::Triple::Simulator:
Result.Environment = DarwinEnvironmentKind::Simulator;
break;
+ case llvm::Triple::MacABI: {
+ // The minimum native macOS target for MacCatalyst is macOS 10.15.
+ auto NativeTargetVersion = VersionTuple(10, 15);
+ if (Result.HasOSVersion && SDKInfo) {
+ if (const auto *MacCatalystToMacOSMapping = SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macCatalystToMacOSPair())) {
+ if (auto MacOSVersion = MacCatalystToMacOSMapping->map(
+ VersionTuple(Major, Minor, Micro), NativeTargetVersion,
+ None)) {
+ NativeTargetVersion = *MacOSVersion;
+ }
+ }
+ }
+ Result.Environment = DarwinEnvironmentKind::MacCatalyst;
+ Result.NativeTargetVersion = NativeTargetVersion;
+ break;
+ }
default:
break;
}
- unsigned Major, Minor, Micro;
- TT.getOSVersion(Major, Minor, Micro);
- if (Major == 0)
- Result.HasOSVersion = false;
return Result;
}
static DarwinPlatform createOSVersionArg(DarwinPlatformKind Platform,
@@ -1497,7 +1536,9 @@ struct DarwinPlatform {
bool IsValid = !Version.tryParse(OSVersion);
(void)IsValid;
assert(IsValid && "invalid SDK version");
- return DarwinSDKInfo(Version);
+ return DarwinSDKInfo(
+ Version,
+ /*MaximumDeploymentTarget=*/VersionTuple(Version.getMajor(), 0, 99));
}
private:
@@ -1526,6 +1567,7 @@ private:
SourceKind Kind;
DarwinPlatformKind Platform;
DarwinEnvironmentKind Environment = DarwinEnvironmentKind::NativeEnvironment;
+ VersionTuple NativeTargetVersion;
std::string OSVersion;
bool HasOSVersion = true, InferSimulatorFromArch = true;
Arg *Argument;
@@ -1628,6 +1670,15 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
return None;
}
+/// Returns the SDK name without the optional prefix that ends with a '.' or an
+/// empty string otherwise.
+static StringRef dropSDKNamePrefix(StringRef SDKName) {
+ size_t PrefixPos = SDKName.find('.');
+ if (PrefixPos == StringRef::npos)
+ return "";
+ return SDKName.substr(PrefixPos + 1);
+}
+
/// Tries to infer the deployment target from the SDK specified by -isysroot
/// (or SDKROOT). Uses the version specified in the SDKSettings.json file if
/// it's available.
@@ -1657,22 +1708,29 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
if (Version.empty())
return None;
- if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::IPhoneOS, Version,
- /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
- else if (SDK.startswith("MacOSX"))
- return DarwinPlatform::createFromSDK(Darwin::MacOS,
- getSystemOrSDKMacOSVersion(Version));
- else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::WatchOS, Version,
- /*IsSimulator=*/SDK.startswith("WatchSimulator"));
- else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
- return DarwinPlatform::createFromSDK(
- Darwin::TvOS, Version,
- /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
- return None;
+ auto CreatePlatformFromSDKName =
+ [&](StringRef SDK) -> Optional<DarwinPlatform> {
+ if (SDK.startswith("iPhoneOS") || SDK.startswith("iPhoneSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::IPhoneOS, Version,
+ /*IsSimulator=*/SDK.startswith("iPhoneSimulator"));
+ else if (SDK.startswith("MacOSX"))
+ return DarwinPlatform::createFromSDK(Darwin::MacOS,
+ getSystemOrSDKMacOSVersion(Version));
+ else if (SDK.startswith("WatchOS") || SDK.startswith("WatchSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::WatchOS, Version,
+ /*IsSimulator=*/SDK.startswith("WatchSimulator"));
+ else if (SDK.startswith("AppleTVOS") || SDK.startswith("AppleTVSimulator"))
+ return DarwinPlatform::createFromSDK(
+ Darwin::TvOS, Version,
+ /*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
+ return None;
+ };
+ if (auto Result = CreatePlatformFromSDKName(SDK))
+ return Result;
+ // The SDK can be an SDK variant with a name like `<prefix>.<platform>`.
+ return CreatePlatformFromSDKName(dropSDKNamePrefix(SDK));
}
std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
@@ -1743,15 +1801,16 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
/// Returns the deployment target that's specified using the -target option.
Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
- DerivedArgList &Args, const llvm::Triple &Triple, const Driver &TheDriver) {
+ DerivedArgList &Args, const llvm::Triple &Triple, const Driver &TheDriver,
+ const Optional<DarwinSDKInfo> &SDKInfo) {
if (!Args.hasArg(options::OPT_target))
return None;
if (Triple.getOS() == llvm::Triple::Darwin ||
Triple.getOS() == llvm::Triple::UnknownOS)
return None;
std::string OSVersion = getOSVersion(Triple.getOS(), Triple, TheDriver);
- return DarwinPlatform::createFromTarget(Triple, OSVersion,
- Args.getLastArg(options::OPT_target));
+ return DarwinPlatform::createFromTarget(
+ Triple, OSVersion, Args.getLastArg(options::OPT_target), SDKInfo);
}
Optional<DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS,
@@ -1761,7 +1820,7 @@ Optional<DarwinSDKInfo> parseSDKSettings(llvm::vfs::FileSystem &VFS,
if (!A)
return None;
StringRef isysroot = A->getValue();
- auto SDKInfoOrErr = driver::parseDarwinSDKInfo(VFS, isysroot);
+ auto SDKInfoOrErr = parseDarwinSDKInfo(VFS, isysroot);
if (!SDKInfoOrErr) {
llvm::consumeError(SDKInfoOrErr.takeError());
TheDriver.Diag(diag::warn_drv_darwin_sdk_invalid_settings);
@@ -1800,7 +1859,7 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
// The OS and the version can be specified using the -target argument.
Optional<DarwinPlatform> OSTarget =
- getDeploymentTargetFromTargetArg(Args, getTriple(), getDriver());
+ getDeploymentTargetFromTargetArg(Args, getTriple(), getDriver(), SDKInfo);
if (OSTarget) {
Optional<DarwinPlatform> OSVersionArgTarget =
getDeploymentTargetFromOSVersionArg(Args, getDriver());
@@ -1886,13 +1945,24 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
;
+ if (OSTarget->getEnvironment() == MacCatalyst &&
+ (Major < 13 || (Major == 13 && Minor < 1))) {
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSTarget->getAsString(Args, Opts);
+ Major = 13;
+ Minor = 1;
+ Micro = 0;
+ }
// For 32-bit targets, the deployment target for iOS has to be earlier than
// iOS 11.
if (getTriple().isArch32Bit() && Major >= 11) {
// If the deployment target is explicitly specified, print a diagnostic.
if (OSTarget->isExplicitlySpecified()) {
- getDriver().Diag(diag::warn_invalid_ios_deployment_target)
- << OSTarget->getAsString(Args, Opts);
+ if (OSTarget->getEnvironment() == MacCatalyst)
+ getDriver().Diag(diag::err_invalid_macos_32bit_deployment_target);
+ else
+ getDriver().Diag(diag::warn_invalid_ios_deployment_target)
+ << OSTarget->getAsString(Args, Opts);
// Otherwise, set it to 10.99.99.
} else {
Major = 10;
@@ -1921,14 +1991,18 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
OSTarget->canInferSimulatorFromArch() && getTriple().isX86())
Environment = Simulator;
- setTarget(Platform, Environment, Major, Minor, Micro);
+ VersionTuple NativeTargetVersion;
+ if (Environment == MacCatalyst)
+ NativeTargetVersion = OSTarget->getNativeTargetVersion();
+ setTarget(Platform, Environment, Major, Minor, Micro, NativeTargetVersion);
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
StringRef SDK = getSDKName(A->getValue());
if (SDK.size() > 0) {
size_t StartVer = SDK.find_first_of("0123456789");
StringRef SDKName = SDK.slice(0, StartVer);
- if (!SDKName.startswith(getPlatformFamily()))
+ if (!SDKName.startswith(getPlatformFamily()) &&
+ !dropSDKNamePrefix(SDKName).startswith(getPlatformFamily()))
getDriver().Diag(diag::warn_incompatible_sysroot)
<< SDKName << getPlatformFamily();
}
@@ -2410,6 +2484,8 @@ void MachO::AddLinkRuntimeLibArgs(const ArgList &Args,
bool Darwin::isAlignedAllocationUnavailable() const {
llvm::Triple::OSType OS;
+ if (isTargetMacCatalyst())
+ return TargetVersion < alignedAllocMinVersion(llvm::Triple::MacOSX);
switch (TargetPlatform) {
case MacOS: // Earlier than 10.13.
OS = llvm::Triple::MacOSX;
@@ -2441,10 +2517,24 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
if (SDKInfo) {
/// Pass the SDK version to the compiler when the SDK information is
/// available.
- std::string Arg;
- llvm::raw_string_ostream OS(Arg);
- OS << "-target-sdk-version=" << SDKInfo->getVersion();
- CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ auto EmitTargetSDKVersionArg = [&](const VersionTuple &V) {
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-target-sdk-version=" << V;
+ CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ };
+
+ if (isTargetMacCatalyst()) {
+ if (const auto *MacOStoMacCatalystMapping = SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ Optional<VersionTuple> SDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion(), minimumMacCatalystDeploymentTarget(), None);
+ EmitTargetSDKVersionArg(
+ SDKVersion ? *SDKVersion : minimumMacCatalystDeploymentTarget());
+ }
+ } else {
+ EmitTargetSDKVersionArg(SDKInfo->getVersion());
+ }
}
// Enable compatibility mode for NSItemProviderCompletionHandler in
@@ -2580,7 +2670,7 @@ bool MachO::SupportsProfiling() const {
void Darwin::addMinVersionArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
- VersionTuple TargetVersion = getTargetVersion();
+ VersionTuple TargetVersion = getTripleTargetVersion();
if (isTargetWatchOS())
CmdArgs.push_back("-watchos_version_min");
@@ -2594,6 +2684,8 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
CmdArgs.push_back("-ios_simulator_version_min");
else if (isTargetIOSBased())
CmdArgs.push_back("-iphoneos_version_min");
+ else if (isTargetMacCatalyst())
+ CmdArgs.push_back("-maccatalyst_version_min");
else {
assert(isTargetMacOS() && "unexpected target");
CmdArgs.push_back("-macosx_version_min");
@@ -2611,11 +2703,9 @@ static const char *getPlatformName(Darwin::DarwinPlatformKind Platform,
case Darwin::MacOS:
return "macos";
case Darwin::IPhoneOS:
- if (Environment == Darwin::NativeEnvironment ||
- Environment == Darwin::Simulator)
- return "ios";
- // FIXME: Add macCatalyst support here ("\"mac catalyst\"").
- llvm_unreachable("macCatalyst isn't yet supported");
+ if (Environment == Darwin::MacCatalyst)
+ return "mac catalyst";
+ return "ios";
case Darwin::TvOS:
return "tvos";
case Darwin::WatchOS:
@@ -2633,17 +2723,44 @@ void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
if (TargetEnvironment == Darwin::Simulator)
PlatformName += "-simulator";
CmdArgs.push_back(Args.MakeArgString(PlatformName));
- VersionTuple TargetVersion = getTargetVersion().withoutBuild();
+ VersionTuple TargetVersion = getTripleTargetVersion().withoutBuild();
VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+
+ if (isTargetMacCatalyst()) {
+ // Mac Catalyst programs must use the appropriate iOS SDK version
+ // that corresponds to the macOS SDK version used for the compilation.
+ Optional<VersionTuple> iOSSDKVersion;
+ if (SDKInfo) {
+ if (const auto *MacOStoMacCatalystMapping = SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ iOSSDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion().withoutBuild(),
+ minimumMacCatalystDeploymentTarget(), None);
+ }
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ (iOSSDKVersion ? *iOSSDKVersion : minimumMacCatalystDeploymentTarget())
+ .getAsString()));
+ return;
+ }
+
if (SDKInfo) {
VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
CmdArgs.push_back(Args.MakeArgString(SDKVersion.getAsString()));
} else {
- // Use a blank SDK version if it's not present.
- CmdArgs.push_back("0.0.0");
+ // Use an SDK version that's matching the deployment target if the SDK
+ // version is missing. This is preferred over an empty SDK version (0.0.0)
+ // as the system's runtime might expect the linked binary to contain a
+ // valid SDK version in order for the binary to work correctly. It's
+ // reasonable to use the deployment target version as a proxy for the
+ // SDK version because older SDKs don't guarantee support for deployment
+ // targets newer than the SDK versions, so that rules out using some
+ // predetermined older SDK version, which leaves the deployment target
+ // version as the only reasonable choice.
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
}
}
@@ -2697,7 +2814,7 @@ static void addPgProfilingLinkArgs(const Darwin &D, const ArgList &Args,
CmdArgs.push_back("-no_new_main");
} else {
D.getDriver().Diag(diag::err_drv_clang_unsupported_opt_pg_darwin)
- << D.isTargetMacOS();
+ << D.isTargetMacOSBased();
}
}
@@ -2750,7 +2867,7 @@ void Darwin::addStartObjectFileArgs(const ArgList &Args,
void Darwin::CheckObjCARC() const {
if (isTargetIOSBased() || isTargetWatchOSBased() ||
- (isTargetMacOS() && !isMacosxVersionLT(10, 6)))
+ (isTargetMacOSBased() && !isMacosxVersionLT(10, 6)))
return;
getDriver().Diag(diag::err_arc_unsupported_on_toolchain);
}
@@ -2771,11 +2888,11 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
// Prior to 10.9, macOS shipped a version of the C++ standard library without
// C++11 support. The same is true of iOS prior to version 5. These OS'es are
// incompatible with -fsanitize=vptr.
- if (!(isTargetMacOS() && isMacosxVersionLT(10, 9))
- && !(isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0)))
+ if (!(isTargetMacOSBased() && isMacosxVersionLT(10, 9)) &&
+ !(isTargetIPhoneOS() && isIPhoneOSVersionLT(5, 0)))
Res |= SanitizerKind::Vptr;
- if ((IsX86_64 || IsAArch64) && isTargetMacOS()) {
+ if ((IsX86_64 || IsAArch64) && isTargetMacOSBased()) {
Res |= SanitizerKind::Thread;
} else if (isTargetIOSSimulator() || isTargetTvOSSimulator()) {
if (IsX86_64)
diff --git a/clang/lib/Driver/ToolChains/Darwin.h b/clang/lib/Driver/ToolChains/Darwin.h
index 25c63fed922a..4de122c8d513 100644
--- a/clang/lib/Driver/ToolChains/Darwin.h
+++ b/clang/lib/Driver/ToolChains/Darwin.h
@@ -11,8 +11,8 @@
#include "Cuda.h"
#include "ROCm.h"
+#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/LangOptions.h"
-#include "clang/Driver/DarwinSDKInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/XRayArgs.h"
@@ -184,9 +184,6 @@ public:
/// Emit rpaths for @executable_path as well as the resource directory.
RLO_AddRPath = 1 << 2,
-
- /// Link the library in before any others.
- RLO_FirstLink = 1 << 3,
};
/// Add a runtime library to the list of items to link.
@@ -284,13 +281,16 @@ public:
enum DarwinEnvironmentKind {
NativeEnvironment,
Simulator,
+ MacCatalyst,
};
mutable DarwinPlatformKind TargetPlatform;
mutable DarwinEnvironmentKind TargetEnvironment;
- /// The OS version we are targeting.
+ /// The native OS version we are targeting.
mutable VersionTuple TargetVersion;
+ /// The OS version we are targeting as specified in the triple.
+ mutable VersionTuple OSTargetVersion;
/// The information about the darwin SDK that was used.
mutable Optional<DarwinSDKInfo> SDKInfo;
@@ -337,12 +337,14 @@ protected:
// FIXME: Eliminate these ...Target functions and derive separate tool chains
// for these targets and put version in constructor.
void setTarget(DarwinPlatformKind Platform, DarwinEnvironmentKind Environment,
- unsigned Major, unsigned Minor, unsigned Micro) const {
+ unsigned Major, unsigned Minor, unsigned Micro,
+ VersionTuple NativeTargetVersion) const {
// FIXME: For now, allow reinitialization as long as values don't
// change. This will go away when we move away from argument translation.
if (TargetInitialized && TargetPlatform == Platform &&
TargetEnvironment == Environment &&
- TargetVersion == VersionTuple(Major, Minor, Micro))
+ (Environment == MacCatalyst ? OSTargetVersion : TargetVersion) ==
+ VersionTuple(Major, Minor, Micro))
return;
assert(!TargetInitialized && "Target already initialized!");
@@ -352,6 +354,11 @@ protected:
TargetVersion = VersionTuple(Major, Minor, Micro);
if (Environment == Simulator)
const_cast<Darwin *>(this)->setTripleEnvironment(llvm::Triple::Simulator);
+ else if (Environment == MacCatalyst) {
+ const_cast<Darwin *>(this)->setTripleEnvironment(llvm::Triple::MacABI);
+ TargetVersion = NativeTargetVersion;
+ OSTargetVersion = VersionTuple(Major, Minor, Micro);
+ }
}
public:
@@ -402,6 +409,10 @@ public:
return TargetPlatform == WatchOS;
}
+ bool isTargetMacCatalyst() const {
+ return TargetPlatform == IPhoneOS && TargetEnvironment == MacCatalyst;
+ }
+
bool isTargetMacOS() const {
assert(TargetInitialized && "Target not initialized!");
return TargetPlatform == MacOS;
@@ -409,8 +420,7 @@ public:
bool isTargetMacOSBased() const {
assert(TargetInitialized && "Target not initialized!");
- // FIXME (Alex L): Add remaining MacCatalyst suppport.
- return TargetPlatform == MacOS;
+ return TargetPlatform == MacOS || isTargetMacCatalyst();
}
bool isTargetAppleSiliconMac() const {
@@ -420,9 +430,13 @@ public:
bool isTargetInitialized() const { return TargetInitialized; }
- VersionTuple getTargetVersion() const {
+ /// The version of the OS that's used by the OS specified in the target
+ /// triple. It might be different from the actual target OS on which the
+ /// program will run, e.g. MacCatalyst code runs on a macOS target, but its
+ /// target triple is iOS.
+ VersionTuple getTripleTargetVersion() const {
assert(TargetInitialized && "Target not initialized!");
- return TargetVersion;
+ return isTargetMacCatalyst() ? OSTargetVersion : TargetVersion;
}
bool isIPhoneOSVersionLT(unsigned V0, unsigned V1 = 0,
@@ -436,7 +450,8 @@ public:
/// supported macOS version, the deployment target version is compared to the
/// specifed version instead.
bool isMacosxVersionLT(unsigned V0, unsigned V1 = 0, unsigned V2 = 0) const {
- assert(isTargetMacOS() && getTriple().isMacOSX() &&
+ assert(isTargetMacOSBased() &&
+ (getTriple().isMacOSX() || getTriple().isMacCatalystEnvironment()) &&
"Unexpected call for non OS X target!");
// The effective triple might not be initialized yet, so construct a
// pseudo-effective triple to get the minimum supported OS version.
@@ -490,7 +505,7 @@ public:
// This is only used with the non-fragile ABI and non-legacy dispatch.
// Mixed dispatch is used everywhere except OS X before 10.6.
- return !(isTargetMacOS() && isMacosxVersionLT(10, 6));
+ return !(isTargetMacOSBased() && isMacosxVersionLT(10, 6));
}
LangOptions::StackProtectorMode
@@ -499,9 +514,9 @@ public:
// and for everything in 10.6 and beyond
if (isTargetIOSBased() || isTargetWatchOSBased())
return LangOptions::SSPOn;
- else if (isTargetMacOS() && !isMacosxVersionLT(10, 6))
+ else if (isTargetMacOSBased() && !isMacosxVersionLT(10, 6))
return LangOptions::SSPOn;
- else if (isTargetMacOS() && !isMacosxVersionLT(10, 5) && !KernelOrKext)
+ else if (isTargetMacOSBased() && !isMacosxVersionLT(10, 5) && !KernelOrKext)
return LangOptions::SSPOn;
return LangOptions::SSPOff;
diff --git a/clang/lib/Driver/ToolChains/Flang.cpp b/clang/lib/Driver/ToolChains/Flang.cpp
index 669d911de18a..1bfad6115d51 100644
--- a/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/clang/lib/Driver/ToolChains/Flang.cpp
@@ -19,9 +19,33 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+void Flang::AddFortranDialectOptions(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ Args.AddAllArgs(
+ CmdArgs, {options::OPT_ffixed_form, options::OPT_ffree_form,
+ options::OPT_ffixed_line_length_EQ, options::OPT_fopenmp,
+ options::OPT_fopenacc, options::OPT_finput_charset_EQ,
+ options::OPT_fimplicit_none, options::OPT_fno_implicit_none,
+ options::OPT_fbackslash, options::OPT_fno_backslash,
+ options::OPT_flogical_abbreviations,
+ options::OPT_fno_logical_abbreviations,
+ options::OPT_fxor_operator, options::OPT_fno_xor_operator,
+ options::OPT_falternative_parameter_statement,
+ options::OPT_fdefault_real_8, options::OPT_fdefault_integer_8,
+ options::OPT_fdefault_double_8, options::OPT_flarge_sizes});
+}
+
void Flang::AddPreprocessingOptions(const ArgList &Args,
ArgStringList &CmdArgs) const {
- Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I});
+ Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I,
+ options::OPT_cpp, options::OPT_nocpp});
+}
+
+void Flang::AddOtherOptions(const ArgList &Args, ArgStringList &CmdArgs) const {
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_module_dir, options::OPT_fdebug_module_writer,
+ options::OPT_fintrinsic_modules_path, options::OPT_pedantic,
+ options::OPT_std_EQ, options::OPT_W_Joined});
}
void Flang::ConstructJob(Compilation &C, const JobAction &JA,
@@ -45,9 +69,6 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
// CmdArgs.push_back(Args.MakeArgString(TripleStr));
if (isa<PreprocessJobAction>(JA)) {
- if (C.getArgs().hasArg(options::OPT_test_io))
- CmdArgs.push_back("-test-io");
- else
CmdArgs.push_back("-E");
} else if (isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) {
if (JA.getType() == types::TY_Nothing) {
@@ -79,6 +100,14 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
if (types::getPreprocessedType(InputType) != types::TY_INVALID)
AddPreprocessingOptions(Args, CmdArgs);
+ AddFortranDialectOptions(Args, CmdArgs);
+
+ // Add other compile options
+ AddOtherOptions(Args, CmdArgs);
+
+ // Forward -Xflang arguments to -fc1
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xflang);
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
diff --git a/clang/lib/Driver/ToolChains/Flang.h b/clang/lib/Driver/ToolChains/Flang.h
index 83b79505e0a9..efbdbe854e24 100644
--- a/clang/lib/Driver/ToolChains/Flang.h
+++ b/clang/lib/Driver/ToolChains/Flang.h
@@ -24,6 +24,14 @@ namespace tools {
/// Flang compiler tool.
class LLVM_LIBRARY_VISIBILITY Flang : public Tool {
private:
+ /// Extract fortran dialect options from the driver arguments and add them to
+ /// the list of arguments for the generated command/job.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddFortranDialectOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
+
/// Extract preprocessing options from the driver arguments and add them to
/// the preprocessor command arguments.
///
@@ -31,6 +39,13 @@ private:
/// \param [out] CmdArgs The list of output command arguments
void AddPreprocessingOptions(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
+ /// Extract other compilation options from the driver arguments and add them
+ /// to the command arguments.
+ ///
+ /// \param [in] Args The list of input driver arguments
+ /// \param [out] CmdArgs The list of output command arguments
+ void AddOtherOptions(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const;
public:
Flang(const ToolChain &TC);
diff --git a/clang/lib/Driver/ToolChains/FreeBSD.cpp b/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 4524d9b8a85c..5dcf74dabf4f 100644
--- a/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -290,8 +290,11 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
+ addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+ bool Profiling = Args.hasArg(options::OPT_pg) &&
+ ToolChain.getTriple().getOSMajorVersion() < 14;
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
// Use the static OpenMP runtime with -static-openmp
bool StaticOpenMP = Args.hasArg(options::OPT_static_openmp) &&
@@ -301,7 +304,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (D.CCCIsCXX()) {
if (ToolChain.ShouldLinkCXXStdlib(Args))
ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
- if (Args.hasArg(options::OPT_pg))
+ if (Profiling)
CmdArgs.push_back("-lm_p");
else
CmdArgs.push_back("-lm");
@@ -312,13 +315,13 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
linkXRayRuntimeDeps(ToolChain, CmdArgs);
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
// the default system libraries. Just mimic this for now.
- if (Args.hasArg(options::OPT_pg))
+ if (Profiling)
CmdArgs.push_back("-lgcc_p");
else
CmdArgs.push_back("-lgcc");
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-lgcc_eh");
- } else if (Args.hasArg(options::OPT_pg)) {
+ } else if (Profiling) {
CmdArgs.push_back("-lgcc_eh_p");
} else {
CmdArgs.push_back("--as-needed");
@@ -327,13 +330,13 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Args.hasArg(options::OPT_pthread)) {
- if (Args.hasArg(options::OPT_pg))
+ if (Profiling)
CmdArgs.push_back("-lpthread_p");
else
CmdArgs.push_back("-lpthread");
}
- if (Args.hasArg(options::OPT_pg)) {
+ if (Profiling) {
if (Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-lc");
else
@@ -346,7 +349,7 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-lgcc_eh");
- } else if (Args.hasArg(options::OPT_pg)) {
+ } else if (Profiling) {
CmdArgs.push_back("-lgcc_eh_p");
} else {
CmdArgs.push_back("--as-needed");
@@ -408,14 +411,15 @@ void FreeBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void FreeBSD::addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot, "/usr/include/c++/4.2", "", "",
- "", "", DriverArgs, CC1Args);
+ addLibStdCXXIncludePaths(getDriver().SysRoot + "/usr/include/c++/4.2", "", "",
+ DriverArgs, CC1Args);
}
void FreeBSD::AddCXXStdlibLibArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
CXXStdlibType Type = GetCXXStdlibType(Args);
- bool Profiling = Args.hasArg(options::OPT_pg);
+ bool Profiling =
+ Args.hasArg(options::OPT_pg) && getTriple().getOSMajorVersion() < 14;
switch (Type) {
case ToolChain::CST_Libcxx:
@@ -466,6 +470,7 @@ bool FreeBSD::IsUnwindTablesDefault(const ArgList &Args) const { return true; }
bool FreeBSD::isPIEDefault() const { return getSanitizerArgs().requiresPIE(); }
SanitizerMask FreeBSD::getSupportedSanitizers() const {
+ const bool IsAArch64 = getTriple().getArch() == llvm::Triple::aarch64;
const bool IsX86 = getTriple().getArch() == llvm::Triple::x86;
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
const bool IsMIPS64 = getTriple().isMIPS64();
@@ -484,8 +489,13 @@ SanitizerMask FreeBSD::getSupportedSanitizers() const {
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::FuzzerNoLink;
}
- if (IsX86_64)
+ if (IsAArch64 || IsX86_64) {
+ Res |= SanitizerKind::KernelAddress;
+ Res |= SanitizerKind::KernelMemory;
+ }
+ if (IsX86_64) {
Res |= SanitizerKind::Memory;
+ }
return Res;
}
diff --git a/clang/lib/Driver/ToolChains/Fuchsia.cpp b/clang/lib/Driver/ToolChains/Fuchsia.cpp
index 8e086010a984..fd9804a7f353 100644
--- a/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -54,8 +54,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("now");
const char *Exec = Args.MakeArgString(ToolChain.GetLinkerPath());
- if (llvm::sys::path::filename(Exec).equals_lower("ld.lld") ||
- llvm::sys::path::stem(Exec).equals_lower("ld.lld")) {
+ if (llvm::sys::path::filename(Exec).equals_insensitive("ld.lld") ||
+ llvm::sys::path::stem(Exec).equals_insensitive("ld.lld")) {
CmdArgs.push_back("-z");
CmdArgs.push_back("rodynamic");
CmdArgs.push_back("-z");
@@ -95,6 +95,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
std::string Dyld = D.DyldPrefix;
if (SanArgs.needsAsanRt() && SanArgs.needsSharedRt())
Dyld += "asan/";
+ if (SanArgs.needsHwasanRt() && SanArgs.needsSharedRt())
+ Dyld += "hwasan/";
if (SanArgs.needsTsanRt() && SanArgs.needsSharedRt())
Dyld += "tsan/";
Dyld += "ld.so.1";
@@ -187,13 +189,9 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
auto FilePaths = [&](const Multilib &M) -> std::vector<std::string> {
std::vector<std::string> FP;
- if (D.CCCIsCXX()) {
- if (auto CXXStdlibPath = getCXXStdlibPath()) {
- SmallString<128> P(*CXXStdlibPath);
- llvm::sys::path::append(P, M.gccSuffix());
- FP.push_back(std::string(P.str()));
- }
- }
+ SmallString<128> P(getStdlibPath());
+ llvm::sys::path::append(P, M.gccSuffix());
+ FP.push_back(std::string(P.str()));
return FP;
};
@@ -210,23 +208,43 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
.flag("+fsanitize=address")
.flag("-fexceptions")
.flag("+fno-exceptions"));
+ // HWASan has higher priority because we always want the instrumentated
+ // version.
+ Multilibs.push_back(
+ Multilib("hwasan", {}, {}, 4).flag("+fsanitize=hwaddress"));
+ // Use the hwasan+noexcept variant with HWASan and -fno-exceptions.
+ Multilibs.push_back(Multilib("hwasan+noexcept", {}, {}, 5)
+ .flag("+fsanitize=hwaddress")
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
// Use the relative vtables ABI.
// TODO: Remove these multilibs once relative vtables are enabled by default
// for Fuchsia.
- Multilibs.push_back(Multilib("relative-vtables", {}, {}, 4)
+ Multilibs.push_back(Multilib("relative-vtables", {}, {}, 6)
.flag("+fexperimental-relative-c++-abi-vtables"));
- Multilibs.push_back(Multilib("relative-vtables+noexcept", {}, {}, 5)
+ Multilibs.push_back(Multilib("relative-vtables+noexcept", {}, {}, 7)
.flag("+fexperimental-relative-c++-abi-vtables")
.flag("-fexceptions")
.flag("+fno-exceptions"));
- Multilibs.push_back(Multilib("relative-vtables+asan", {}, {}, 6)
+ Multilibs.push_back(Multilib("relative-vtables+asan", {}, {}, 8)
.flag("+fexperimental-relative-c++-abi-vtables")
.flag("+fsanitize=address"));
- Multilibs.push_back(Multilib("relative-vtables+asan+noexcept", {}, {}, 7)
+ Multilibs.push_back(Multilib("relative-vtables+asan+noexcept", {}, {}, 9)
.flag("+fexperimental-relative-c++-abi-vtables")
.flag("+fsanitize=address")
.flag("-fexceptions")
.flag("+fno-exceptions"));
+ Multilibs.push_back(Multilib("relative-vtables+hwasan", {}, {}, 10)
+ .flag("+fexperimental-relative-c++-abi-vtables")
+ .flag("+fsanitize=hwaddress"));
+ Multilibs.push_back(Multilib("relative-vtables+hwasan+noexcept", {}, {}, 11)
+ .flag("+fexperimental-relative-c++-abi-vtables")
+ .flag("+fsanitize=hwaddress")
+ .flag("-fexceptions")
+ .flag("+fno-exceptions"));
+ // Use Itanium C++ ABI for the compat multilib.
+ Multilibs.push_back(Multilib("compat", {}, {}, 12).flag("+fc++-abi=itanium"));
+
Multilibs.FilterOut([&](const Multilib &M) {
std::vector<std::string> RD = FilePaths(M);
return std::all_of(RD.begin(), RD.end(), [&](std::string P) {
@@ -239,12 +257,16 @@ Fuchsia::Fuchsia(const Driver &D, const llvm::Triple &Triple,
Args.hasFlag(options::OPT_fexceptions, options::OPT_fno_exceptions, true),
"fexceptions", Flags);
addMultilibFlag(getSanitizerArgs().needsAsanRt(), "fsanitize=address", Flags);
+ addMultilibFlag(getSanitizerArgs().needsHwasanRt(), "fsanitize=hwaddress",
+ Flags);
addMultilibFlag(
Args.hasFlag(options::OPT_fexperimental_relative_cxx_abi_vtables,
options::OPT_fno_experimental_relative_cxx_abi_vtables,
/*default=*/false),
"fexperimental-relative-c++-abi-vtables", Flags);
+ addMultilibFlag(Args.getLastArgValue(options::OPT_fcxx_abi_EQ) == "itanium",
+ "fc++-abi=itanium", Flags);
Multilibs.setFilePathsCallback(FilePaths);
@@ -340,11 +362,31 @@ void Fuchsia::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
DriverArgs.hasArg(options::OPT_nostdincxx))
return;
+ const Driver &D = getDriver();
+ std::string Target = getTripleString();
+
+ auto AddCXXIncludePath = [&](StringRef Path) {
+ std::string Version = detectLibcxxVersion(Path);
+ if (Version.empty())
+ return;
+
+ // First add the per-target include path.
+ SmallString<128> TargetDir(Path);
+ llvm::sys::path::append(TargetDir, Target, "c++", Version);
+ if (getVFS().exists(TargetDir))
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+
+ // Second add the generic one.
+ SmallString<128> Dir(Path);
+ llvm::sys::path::append(Dir, "c++", Version);
+ addSystemInclude(DriverArgs, CC1Args, Dir);
+ };
+
switch (GetCXXStdlibType(DriverArgs)) {
case ToolChain::CST_Libcxx: {
- SmallString<128> P(getDriver().Dir);
- llvm::sys::path::append(P, "..", "include", "c++", "v1");
- addSystemInclude(DriverArgs, CC1Args, P.str());
+ SmallString<128> P(D.Dir);
+ llvm::sys::path::append(P, "..", "include");
+ AddCXXIncludePath(P);
break;
}
@@ -368,6 +410,7 @@ void Fuchsia::AddCXXStdlibLibArgs(const ArgList &Args,
SanitizerMask Fuchsia::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
+ Res |= SanitizerKind::HWAddress;
Res |= SanitizerKind::PointerCompare;
Res |= SanitizerKind::PointerSubtract;
Res |= SanitizerKind::Fuzzer;
diff --git a/clang/lib/Driver/ToolChains/Gnu.cpp b/clang/lib/Driver/ToolChains/Gnu.cpp
index 1d8a3cdce92a..da39f29e4619 100644
--- a/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -51,9 +51,9 @@ static void normalizeCPUNamesForAssembler(const ArgList &Args,
ArgStringList &CmdArgs) {
if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
StringRef CPUArg(A->getValue());
- if (CPUArg.equals_lower("krait"))
+ if (CPUArg.equals_insensitive("krait"))
CmdArgs.push_back("-mcpu=cortex-a15");
- else if(CPUArg.equals_lower("kryo"))
+ else if (CPUArg.equals_insensitive("kryo"))
CmdArgs.push_back("-mcpu=cortex-a57");
else
Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
@@ -254,6 +254,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
return isArmBigEndian(T, Args) ? "armelfb_linux_eabi" : "armelf_linux_eabi";
+ case llvm::Triple::m68k:
+ return "m68kelf";
case llvm::Triple::ppc:
if (T.isOSLinux())
return "elf32ppclinux";
@@ -292,7 +294,7 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
case llvm::Triple::systemz:
return "elf64_s390";
case llvm::Triple::x86_64:
- if (T.getEnvironment() == llvm::Triple::GNUX32)
+ if (T.isX32())
return "elf32_x86_64";
return "elf_x86_64";
case llvm::Triple::ve:
@@ -723,7 +725,7 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
CmdArgs.push_back("--32");
break;
case llvm::Triple::x86_64:
- if (getToolChain().getTriple().getEnvironment() == llvm::Triple::GNUX32)
+ if (getToolChain().getTriple().isX32())
CmdArgs.push_back("--x32");
else
CmdArgs.push_back("--64");
@@ -1731,7 +1733,7 @@ static bool findBiarchMultilibs(const Driver &D,
// Determine default multilib from: 32, 64, x32
// Also handle cases such as 64 on 32, 32 on 64, etc.
enum { UNKNOWN, WANT32, WANT64, WANTX32 } Want = UNKNOWN;
- const bool IsX32 = TargetTriple.getEnvironment() == llvm::Triple::GNUX32;
+ const bool IsX32 = TargetTriple.isX32();
if (TargetTriple.isArch32Bit() && !NonExistent(Alt32))
Want = WANT64;
else if (TargetTriple.isArch64Bit() && IsX32 && !NonExistent(Altx32))
@@ -1907,9 +1909,7 @@ void Generic_GCC::GCCInstallationDetector::init(
CandidateBiarchTripleAliases);
// Compute the set of prefixes for our search.
- SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
- D.PrefixDirs.end());
-
+ SmallVector<std::string, 8> Prefixes;
StringRef GCCToolchainDir = getGCCToolchainDir(Args, D.SysRoot);
if (GCCToolchainDir != "") {
if (GCCToolchainDir.back() == '/')
@@ -1932,13 +1932,10 @@ void Generic_GCC::GCCInstallationDetector::init(
// Typically /usr.
AddDefaultGCCPrefixes(TargetTriple, Prefixes, D.SysRoot);
}
- }
- // Try to respect gcc-config on Gentoo. However, do that only
- // if --gcc-toolchain is not provided or equal to the Gentoo install
- // in /usr. This avoids accidentally enforcing the system GCC version
- // when using a custom toolchain.
- if (GCCToolchainDir == "" || GCCToolchainDir == D.SysRoot + "/usr") {
+ // Try to respect gcc-config on Gentoo if --gcc-toolchain is not provided.
+ // This avoids accidentally enforcing the system GCC version when using a
+ // custom toolchain.
SmallVector<StringRef, 16> GentooTestTriples;
// Try to match an exact triple as target triple first.
// e.g. crossdev -S x86_64-gentoo-linux-gnu will install gcc libs for
@@ -1958,7 +1955,8 @@ void Generic_GCC::GCCInstallationDetector::init(
// Loop over the various components which exist and select the best GCC
// installation available. GCC installs are ranked by version number.
- Version = GCCVersion::Parse("0.0.0");
+ const GCCVersion VersionZero = GCCVersion::Parse("0.0.0");
+ Version = VersionZero;
for (const std::string &Prefix : Prefixes) {
auto &VFS = D.getVFS();
if (!VFS.exists(Prefix))
@@ -1991,6 +1989,10 @@ void Generic_GCC::GCCInstallationDetector::init(
ScanLibDirForGCCTriple(TargetTriple, Args, LibDir, Candidate, true,
GCCDirExists, GCCCrossDirExists);
}
+
+ // Skip other prefixes once a GCC installation is found.
+ if (Version > VersionZero)
+ break;
}
}
@@ -2104,15 +2106,21 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
"x86_64-amazon-linux", "x86_64-linux-android"};
- static const char *const X32LibDirs[] = {"/libx32"};
+ static const char *const X32Triples[] = {"x86_64-linux-gnux32",
+ "x86_64-pc-linux-gnux32"};
+ static const char *const X32LibDirs[] = {"/libx32", "/lib"};
static const char *const X86LibDirs[] = {"/lib32", "/lib"};
static const char *const X86Triples[] = {
- "i686-linux-gnu", "i686-pc-linux-gnu", "i486-linux-gnu",
- "i386-linux-gnu", "i386-redhat-linux6E", "i686-redhat-linux",
- "i586-redhat-linux", "i386-redhat-linux", "i586-suse-linux",
- "i486-slackware-linux", "i686-montavista-linux", "i586-linux-gnu",
- "i686-linux-android", "i386-gnu", "i486-gnu",
- "i586-gnu", "i686-gnu"};
+ "i586-linux-gnu", "i686-linux-gnu",
+ "i686-pc-linux-gnu", "i386-redhat-linux6E",
+ "i686-redhat-linux", "i386-redhat-linux",
+ "i586-suse-linux", "i686-montavista-linux",
+ "i686-linux-android", "i686-gnu",
+ };
+
+ static const char *const M68kLibDirs[] = {"/lib"};
+ static const char *const M68kTriples[] = {
+ "m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux"};
static const char *const MIPSLibDirs[] = {"/lib"};
static const char *const MIPSTriples[] = {
@@ -2331,17 +2339,19 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(AVRTriples), end(AVRTriples));
break;
case llvm::Triple::x86_64:
- LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
- TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
- // x32 is always available when x86_64 is available, so adding it as
- // secondary arch with x86_64 triples
- if (TargetTriple.getEnvironment() == llvm::Triple::GNUX32) {
- BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ if (TargetTriple.isX32()) {
+ LibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ TripleAliases.append(begin(X32Triples), end(X32Triples));
+ BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
} else {
- BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
- BiarchTripleAliases.append(begin(X86Triples), end(X86Triples));
+ LibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
+ TripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ BiarchTripleAliases.append(begin(X32Triples), end(X32Triples));
}
+ BiarchLibDirs.append(begin(X86LibDirs), end(X86LibDirs));
+ BiarchTripleAliases.append(begin(X86Triples), end(X86Triples));
break;
case llvm::Triple::x86:
LibDirs.append(begin(X86LibDirs), end(X86LibDirs));
@@ -2351,8 +2361,14 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
TripleAliases.append(begin(X86Triples), end(X86Triples));
BiarchLibDirs.append(begin(X86_64LibDirs), end(X86_64LibDirs));
BiarchTripleAliases.append(begin(X86_64Triples), end(X86_64Triples));
+ BiarchLibDirs.append(begin(X32LibDirs), end(X32LibDirs));
+ BiarchTripleAliases.append(begin(X32Triples), end(X32Triples));
}
break;
+ case llvm::Triple::m68k:
+ LibDirs.append(begin(M68kLibDirs), end(M68kLibDirs));
+ TripleAliases.append(begin(M68kTriples), end(M68kTriples));
+ break;
case llvm::Triple::mips:
LibDirs.append(begin(MIPSLibDirs), end(MIPSLibDirs));
TripleAliases.append(begin(MIPSTriples), end(MIPSTriples));
@@ -2496,7 +2512,6 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
const llvm::Triple &TargetTriple, const ArgList &Args,
const std::string &LibDir, StringRef CandidateTriple,
bool NeedsBiarchSuffix, bool GCCDirExists, bool GCCCrossDirExists) {
- llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
// Locations relative to the system lib directory where GCC's triple-specific
// directories might reside.
struct GCCLibSuffix {
@@ -2520,24 +2535,7 @@ void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
// files in that location, not just GCC installation data.
{CandidateTriple.str(), "..",
TargetTriple.getVendor() == llvm::Triple::Freescale ||
- TargetTriple.getVendor() == llvm::Triple::OpenEmbedded},
-
- // Natively multiarch systems sometimes put the GCC triple-specific
- // directory within their multiarch lib directory, resulting in the
- // triple appearing twice.
- {CandidateTriple.str() + "/gcc/" + CandidateTriple.str(), "../../..",
- TargetTriple.getOS() != llvm::Triple::Solaris},
-
- // Deal with cases (on Ubuntu) where the system architecture could be i386
- // but the GCC target architecture could be (say) i686.
- // FIXME: It may be worthwhile to generalize this and look for a second
- // triple.
- {"i386-linux-gnu/gcc/" + CandidateTriple.str(), "../../..",
- (TargetArch == llvm::Triple::x86 &&
- TargetTriple.getOS() != llvm::Triple::Solaris)},
- {"i386-gnu/gcc/" + CandidateTriple.str(), "../../..",
- (TargetArch == llvm::Triple::x86 &&
- TargetTriple.getOS() != llvm::Triple::Solaris)}};
+ TargetTriple.getVendor() == llvm::Triple::OpenEmbedded}};
for (auto &Suffix : Suffixes) {
if (!Suffix.Active)
@@ -2760,6 +2758,7 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::mips64:
case llvm::Triple::mips64el:
case llvm::Triple::msp430:
+ case llvm::Triple::m68k:
return true;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
@@ -2773,15 +2772,6 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
}
}
-static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
- const Multilib &Multilib,
- StringRef InstallPath,
- ToolChain::path_list &Paths) {
- if (const auto &PathsCallback = Multilibs.filePathsCallback())
- for (const auto &Path : PathsCallback(Multilib))
- addPathIfExists(D, InstallPath + Path, Paths);
-}
-
void Generic_GCC::PushPPaths(ToolChain::path_list &PPaths) {
// Cross-compiling binutils and GCC installations (vanilla and openSUSE at
// least) put various tools in a triple-prefixed directory off of the parent
@@ -2808,12 +2798,13 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
const std::string &LibPath =
std::string(GCCInstallation.getParentLibPath());
- // Add toolchain / multilib specific file paths.
- addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
- GCCInstallation.getInstallPath(), Paths);
-
// Sourcery CodeBench MIPS toolchain holds some libraries under
// a biarch-like suffix of the GCC installation.
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(SelectedMultilib))
+ addPathIfExists(D, GCCInstallation.getInstallPath() + Path, Paths);
+
+ // Add lib/gcc/$triple/$version, with an optional /multilib suffix.
addPathIfExists(
D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
Paths);
@@ -2850,10 +2841,8 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
// the cross. Note that GCC does include some of these directories in some
// configurations but this seems somewhere between questionable and simply
// a bug.
- if (StringRef(LibPath).startswith(SysRoot)) {
- addPathIfExists(D, LibPath + "/" + MultiarchTriple, Paths);
+ if (StringRef(LibPath).startswith(SysRoot))
addPathIfExists(D, LibPath + "/../" + OSLibDir, Paths);
- }
}
}
@@ -2861,24 +2850,7 @@ void Generic_GCC::AddMultiarchPaths(const Driver &D,
const std::string &SysRoot,
const std::string &OSLibDir,
path_list &Paths) {
- // Try walking via the GCC triple path in case of biarch or multiarch GCC
- // installations with strange symlinks.
if (GCCInstallation.isValid()) {
- addPathIfExists(D,
- SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
- "/../../" + OSLibDir,
- Paths);
-
- // Add the 'other' biarch variant path
- Multilib BiarchSibling;
- if (GCCInstallation.getBiarchSibling(BiarchSibling)) {
- addPathIfExists(
- D, GCCInstallation.getInstallPath() + BiarchSibling.gccSuffix(),
- Paths);
- }
-
- // See comments above on the multilib variant for details of why this is
- // included even from outside the sysroot.
const std::string &LibPath =
std::string(GCCInstallation.getParentLibPath());
const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
@@ -2886,31 +2858,32 @@ void Generic_GCC::AddMultiarchPaths(const Driver &D,
addPathIfExists(
D, LibPath + "/../" + GCCTriple.str() + "/lib" + Multilib.osSuffix(),
Paths);
-
- // See comments above on the multilib variant for details of why this is
- // only included from within the sysroot.
- if (StringRef(LibPath).startswith(SysRoot))
- addPathIfExists(D, LibPath, Paths);
}
}
void Generic_GCC::AddMultilibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
// Add include directories specific to the selected multilib set and multilib.
- if (GCCInstallation.isValid()) {
- const auto &Callback = Multilibs.includeDirsCallback();
- if (Callback) {
- for (const auto &Path : Callback(GCCInstallation.getMultilib()))
- addExternCSystemIncludeIfExists(
- DriverArgs, CC1Args, GCCInstallation.getInstallPath() + Path);
- }
+ if (!GCCInstallation.isValid())
+ return;
+ // gcc TOOL_INCLUDE_DIR.
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ std::string LibPath(GCCInstallation.getParentLibPath());
+ addSystemInclude(DriverArgs, CC1Args,
+ Twine(LibPath) + "/../" + GCCTriple.str() + "/include");
+
+ const auto &Callback = Multilibs.includeDirsCallback();
+ if (Callback) {
+ for (const auto &Path : Callback(GCCInstallation.getMultilib()))
+ addExternCSystemIncludeIfExists(DriverArgs, CC1Args,
+ GCCInstallation.getInstallPath() + Path);
}
}
void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
- DriverArgs.hasArg(options::OPT_nostdincxx))
+ if (DriverArgs.hasArg(options::OPT_nostdinc, options::OPT_nostdincxx,
+ options::OPT_nostdlibinc))
return;
switch (GetCXXStdlibType(DriverArgs)) {
@@ -2924,90 +2897,80 @@ void Generic_GCC::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
}
}
-static std::string DetectLibcxxIncludePath(llvm::vfs::FileSystem &vfs,
- StringRef base) {
- std::error_code EC;
- int MaxVersion = 0;
- std::string MaxVersionString;
- for (llvm::vfs::directory_iterator LI = vfs.dir_begin(base, EC), LE;
- !EC && LI != LE; LI = LI.increment(EC)) {
- StringRef VersionText = llvm::sys::path::filename(LI->path());
- int Version;
- if (VersionText[0] == 'v' &&
- !VersionText.slice(1, StringRef::npos).getAsInteger(10, Version)) {
- if (Version > MaxVersion) {
- MaxVersion = Version;
- MaxVersionString = std::string(VersionText);
- }
- }
- }
- return MaxVersion ? (base + "/" + MaxVersionString).str() : "";
-}
-
void
Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+ std::string Target = getTripleString();
+
auto AddIncludePath = [&](std::string Path) {
- std::string IncludePath = DetectLibcxxIncludePath(getVFS(), Path);
- if (IncludePath.empty() || !getVFS().exists(IncludePath))
+ std::string Version = detectLibcxxVersion(Path);
+ if (Version.empty())
return false;
- addSystemInclude(DriverArgs, CC1Args, IncludePath);
+
+ // First add the per-target include path if it exists.
+ std::string TargetDir = Path + "/" + Target + "/c++/" + Version;
+ if (D.getVFS().exists(TargetDir))
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+
+ // Second add the generic one.
+ addSystemInclude(DriverArgs, CC1Args, Path + "/c++/" + Version);
return true;
};
+
// Android never uses the libc++ headers installed alongside the toolchain,
// which are generally incompatible with the NDK libraries anyway.
if (!getTriple().isAndroid())
- if (AddIncludePath(getDriver().Dir + "/../include/c++"))
+ if (AddIncludePath(getDriver().Dir + "/../include"))
return;
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
- std::string SysRoot = computeSysRoot();
- if (AddIncludePath(SysRoot + "/usr/local/include/c++"))
+ if (AddIncludePath(SysRoot + "/usr/local/include"))
return;
- if (AddIncludePath(SysRoot + "/usr/include/c++"))
+ if (AddIncludePath(SysRoot + "/usr/include"))
return;
}
-/// Helper to add the variant paths of a libstdc++ installation.
-bool Generic_GCC::addLibStdCXXIncludePaths(
- Twine Base, Twine Suffix, StringRef GCCTriple, StringRef GCCMultiarchTriple,
- StringRef TargetMultiarchTriple, Twine IncludeSuffix,
- const ArgList &DriverArgs, ArgStringList &CC1Args) const {
- if (!getVFS().exists(Base + Suffix))
+bool Generic_GCC::addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
+ Twine IncludeSuffix,
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ bool DetectDebian) const {
+ if (!getVFS().exists(IncludeDir))
return false;
- addSystemInclude(DriverArgs, CC1Args, Base + Suffix);
+ // Debian native gcc uses g++-multiarch-incdir.diff which uses
+ // include/x86_64-linux-gnu/c++/10$IncludeSuffix instead of
+ // include/c++/10/x86_64-linux-gnu$IncludeSuffix.
+ std::string Dir = IncludeDir.str();
+ StringRef Include =
+ llvm::sys::path::parent_path(llvm::sys::path::parent_path(Dir));
+ std::string Path =
+ (Include + "/" + Triple + Dir.substr(Include.size()) + IncludeSuffix)
+ .str();
+ if (DetectDebian && !getVFS().exists(Path))
+ return false;
- // The vanilla GCC layout of libstdc++ headers uses a triple subdirectory. If
- // that path exists or we have neither a GCC nor target multiarch triple, use
- // this vanilla search path.
- if ((GCCMultiarchTriple.empty() && TargetMultiarchTriple.empty()) ||
- getVFS().exists(Base + Suffix + "/" + GCCTriple + IncludeSuffix)) {
+ // GPLUSPLUS_INCLUDE_DIR
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir);
+ // GPLUSPLUS_TOOL_INCLUDE_DIR. If Triple is not empty, add a target-dependent
+ // include directory.
+ if (DetectDebian)
+ addSystemInclude(DriverArgs, CC1Args, Path);
+ else if (!Triple.empty())
addSystemInclude(DriverArgs, CC1Args,
- Base + Suffix + "/" + GCCTriple + IncludeSuffix);
- } else {
- // Otherwise try to use multiarch naming schemes which have normalized the
- // triples and put the triple before the suffix.
- //
- // GCC surprisingly uses *both* the GCC triple with a multilib suffix and
- // the target triple, so we support that here.
- addSystemInclude(DriverArgs, CC1Args,
- Base + "/" + GCCMultiarchTriple + Suffix + IncludeSuffix);
- addSystemInclude(DriverArgs, CC1Args,
- Base + "/" + TargetMultiarchTriple + Suffix);
- }
-
- addSystemInclude(DriverArgs, CC1Args, Base + Suffix + "/backward");
+ IncludeDir + "/" + Triple + IncludeSuffix);
+ // GPLUSPLUS_BACKWARD_INCLUDE_DIR
+ addSystemInclude(DriverArgs, CC1Args, IncludeDir + "/backward");
return true;
}
-bool
-Generic_GCC::addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const {
- // Use GCCInstallation to know where libstdc++ headers are installed.
- if (!GCCInstallation.isValid())
- return false;
+bool Generic_GCC::addGCCLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ StringRef DebianMultiarch) const {
+ assert(GCCInstallation.isValid());
// By default, look for the C++ headers in an include directory adjacent to
// the lib directory of the GCC installation. Note that this is expect to be
@@ -3016,17 +2979,24 @@ Generic_GCC::addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
StringRef InstallDir = GCCInstallation.getInstallPath();
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
- const std::string GCCMultiarchTriple = getMultiarchTriple(
- getDriver(), GCCInstallation.getTriple(), getDriver().SysRoot);
- const std::string TargetMultiarchTriple =
- getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
const GCCVersion &Version = GCCInstallation.getVersion();
- // The primary search for libstdc++ supports multiarch variants.
- if (addLibStdCXXIncludePaths(LibDir.str() + "/../include",
- "/c++/" + Version.Text, TripleStr,
- GCCMultiarchTriple, TargetMultiarchTriple,
- Multilib.includeSuffix(), DriverArgs, CC1Args))
+ // Try /../$triple/include/c++/$version (gcc --print-multiarch is not empty).
+ if (addLibStdCXXIncludePaths(
+ LibDir.str() + "/../" + TripleStr + "/include/c++/" + Version.Text,
+ TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args))
+ return true;
+
+ // Detect Debian g++-multiarch-incdir.diff.
+ if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
+ DebianMultiarch, Multilib.includeSuffix(),
+ DriverArgs, CC1Args, /*Debian=*/true))
+ return true;
+
+ // Try /../include/c++/$version (gcc --print-multiarch is empty).
+ if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args))
return true;
// Otherwise, fall back on a bunch of options which don't use multiarch
@@ -3041,9 +3011,7 @@ Generic_GCC::addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
- if (addLibStdCXXIncludePaths(IncludePath, /*Suffix*/ "", TripleStr,
- /*GCCMultiarchTriple*/ "",
- /*TargetMultiarchTriple*/ "",
+ if (addLibStdCXXIncludePaths(IncludePath, TripleStr,
Multilib.includeSuffix(), DriverArgs, CC1Args))
return true;
}
@@ -3053,7 +3021,10 @@ Generic_GCC::addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void
Generic_GCC::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args);
+ if (GCCInstallation.isValid()) {
+ addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args,
+ GCCInstallation.getTriple().str());
+ }
}
llvm::opt::DerivedArgList *
diff --git a/clang/lib/Driver/ToolChains/Gnu.h b/clang/lib/Driver/ToolChains/Gnu.h
index 90d3bafc1f00..40fd756a5653 100644
--- a/clang/lib/Driver/ToolChains/Gnu.h
+++ b/clang/lib/Driver/ToolChains/Gnu.h
@@ -310,11 +310,6 @@ protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
- virtual std::string getMultiarchTriple(const Driver &D,
- const llvm::Triple &TargetTriple,
- StringRef SysRoot) const
- { return TargetTriple.str(); }
-
/// \name ToolChain Implementation Helper Functions
/// @{
@@ -347,16 +342,15 @@ protected:
addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const;
- bool
- addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const;
+ bool addGCCLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ StringRef DebianMultiarch) const;
- bool addLibStdCXXIncludePaths(Twine Base, Twine Suffix, StringRef GCCTriple,
- StringRef GCCMultiarchTriple,
- StringRef TargetMultiarchTriple,
+ bool addLibStdCXXIncludePaths(Twine IncludeDir, StringRef Triple,
Twine IncludeSuffix,
const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const;
+ llvm::opt::ArgStringList &CC1Args,
+ bool DetectDebian = false) const;
/// @}
diff --git a/clang/lib/Driver/ToolChains/HIP.cpp b/clang/lib/Driver/ToolChains/HIP.cpp
index a84c0c257033..59d58aadb687 100644
--- a/clang/lib/Driver/ToolChains/HIP.cpp
+++ b/clang/lib/Driver/ToolChains/HIP.cpp
@@ -9,12 +9,12 @@
#include "HIP.h"
#include "AMDGPU.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Basic/Cuda.h"
#include "clang/Basic/TargetID.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/FileSystem.h"
@@ -35,23 +35,6 @@ using namespace llvm::opt;
namespace {
const unsigned HIPCodeObjectAlign = 4096;
-
-static void addBCLib(const Driver &D, const ArgList &Args,
- ArgStringList &CmdArgs, ArgStringList LibraryPaths,
- StringRef BCName) {
- StringRef FullName;
- for (std::string LibraryPath : LibraryPaths) {
- SmallString<128> Path(LibraryPath);
- llvm::sys::path::append(Path, BCName);
- FullName = Path;
- if (llvm::sys::fs::exists(FullName)) {
- CmdArgs.push_back("-mlink-builtin-bitcode");
- CmdArgs.push_back(Args.MakeArgString(FullName));
- return;
- }
- }
- D.Diag(diag::err_drv_no_such_file) << BCName;
-}
} // namespace
void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
@@ -66,8 +49,8 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
auto &TC = getToolChain();
auto &D = TC.getDriver();
assert(!Inputs.empty() && "Must have at least one input.");
- addLTOOptions(TC, Args, LldArgs, Output, Inputs[0],
- D.getLTOMode() == LTOK_Thin);
+ bool IsThinLTO = D.getLTOMode(/*IsOffload=*/true) == LTOK_Thin;
+ addLTOOptions(TC, Args, LldArgs, Output, Inputs[0], IsThinLTO);
// Extract all the -m options
std::vector<llvm::StringRef> Features;
@@ -83,6 +66,12 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
if (!Features.empty())
LldArgs.push_back(Args.MakeArgString(MAttrString));
+ // ToDo: Remove this option after AMDGPU backend supports ISA-level linking.
+ // Since AMDGPU backend currently does not support ISA-level linking, all
+ // called functions need to be imported.
+ if (IsThinLTO)
+ LldArgs.push_back(Args.MakeArgString("-plugin-opt=-force-import-all"));
+
for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
LldArgs.push_back(
Args.MakeArgString(Twine("-plugin-opt=") + A->getValue(0)));
@@ -96,6 +85,13 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
LldArgs.append({"-o", Output.getFilename()});
for (auto Input : Inputs)
LldArgs.push_back(Input.getFilename());
+
+ if (Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ false))
+ llvm::for_each(TC.getHIPDeviceLibs(Args), [&](StringRef BCFile) {
+ LldArgs.push_back(Args.MakeArgString(BCFile));
+ });
+
const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Lld, LldArgs, Inputs, Output));
@@ -118,11 +114,12 @@ void AMDGCN::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
std::string BundlerInputArg = "-inputs=" NULL_FILE;
- // TODO: Change the bundle ID as requested by HIP runtime.
// For code object version 2 and 3, the offload kind in bundle ID is 'hip'
// for backward compatibility. For code object version 4 and greater, the
// offload kind in bundle ID is 'hipv4'.
std::string OffloadKind = "hip";
+ if (getAMDGPUCodeObjectVersion(C.getDriver(), Args) >= 4)
+ OffloadKind = OffloadKind + "v4";
for (const auto &II : Inputs) {
const auto* A = II.getAction();
BundlerTargetArg = BundlerTargetArg + "," + OffloadKind +
@@ -183,6 +180,7 @@ void AMDGCN::Linker::constructGenerateObjFileFromHIPFatBinary(
// in several main host machines.
ObjStream << "# HIP Object Generator\n";
ObjStream << "# *** Automatically generated by Clang ***\n";
+ ObjStream << " .protected __hip_fatbin\n";
ObjStream << " .type __hip_fatbin,@object\n";
ObjStream << " .section .hip_fatbin,\"a\",@progbits\n";
ObjStream << " .globl __hip_fatbin\n";
@@ -247,13 +245,8 @@ void HIPToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadingKind) const {
HostTC.addClangTargetOptions(DriverArgs, CC1Args, DeviceOffloadingKind);
- StringRef GpuArch = getGPUArch(DriverArgs);
- assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
- (void) GpuArch;
assert(DeviceOffloadingKind == Action::OFK_HIP &&
"Only HIP offloading kinds are supported for GPUs.");
- auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
- const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
CC1Args.push_back("-fcuda-is-device");
@@ -261,10 +254,8 @@ void HIPToolChain::addClangTargetOptions(
options::OPT_fno_cuda_approx_transcendentals, false))
CC1Args.push_back("-fcuda-approx-transcendentals");
- if (DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
- false))
- CC1Args.push_back("-fgpu-rdc");
- else
+ if (!DriverArgs.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
+ false))
CC1Args.append({"-mllvm", "-amdgpu-internalize-symbols"});
StringRef MaxThreadsPerBlock =
@@ -285,66 +276,10 @@ void HIPToolChain::addClangTargetOptions(
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
- if (DriverArgs.hasArg(options::OPT_nogpulib))
- return;
- ArgStringList LibraryPaths;
-
- // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
- for (auto Path : RocmInstallation.getRocmDeviceLibPathArg())
- LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
-
- addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
-
- // Maintain compatability with --hip-device-lib.
- auto BCLibs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
- if (!BCLibs.empty()) {
- for (auto Lib : BCLibs)
- addBCLib(getDriver(), DriverArgs, CC1Args, LibraryPaths, Lib);
- } else {
- if (!RocmInstallation.hasDeviceLibrary()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
- return;
- }
-
- std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
- if (LibDeviceFile.empty()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
- return;
- }
-
- // If --hip-device-lib is not set, add the default bitcode libraries.
- // TODO: There are way too many flags that change this. Do we need to check
- // them all?
- bool DAZ = DriverArgs.hasFlag(options::OPT_fcuda_flush_denormals_to_zero,
- options::OPT_fno_cuda_flush_denormals_to_zero,
- getDefaultDenormsAreZeroForTarget(Kind));
- // TODO: Check standard C++ flags?
- bool FiniteOnly = false;
- bool UnsafeMathOpt = false;
- bool FastRelaxedMath = false;
- bool CorrectSqrt = true;
- bool Wave64 = isWave64(DriverArgs, Kind);
-
- // Add the HIP specific bitcode library.
+ llvm::for_each(getHIPDeviceLibs(DriverArgs), [&](StringRef BCFile) {
CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(RocmInstallation.getHIPPath()));
-
- // Add the generic set of libraries.
- RocmInstallation.addCommonBitcodeLibCC1Args(
- DriverArgs, CC1Args, LibDeviceFile, Wave64, DAZ, FiniteOnly,
- UnsafeMathOpt, FastRelaxedMath, CorrectSqrt);
-
- // Add instrument lib.
- auto InstLib =
- DriverArgs.getLastArgValue(options::OPT_gpu_instrument_lib_EQ);
- if (InstLib.empty())
- return;
- if (llvm::sys::fs::exists(InstLib)) {
- CC1Args.push_back("-mlink-builtin-bitcode");
- CC1Args.push_back(DriverArgs.MakeArgString(InstLib));
- } else
- getDriver().Diag(diag::err_drv_no_such_file) << InstLib;
- }
+ CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
+ });
}
llvm::opt::DerivedArgList *
@@ -423,3 +358,130 @@ VersionTuple HIPToolChain::computeMSVCVersion(const Driver *D,
const ArgList &Args) const {
return HostTC.computeMSVCVersion(D, Args);
}
+
+llvm::SmallVector<std::string, 12>
+HIPToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
+ llvm::SmallVector<std::string, 12> BCLibs;
+ if (DriverArgs.hasArg(options::OPT_nogpulib))
+ return {};
+ ArgStringList LibraryPaths;
+
+ // Find in --hip-device-lib-path and HIP_LIBRARY_PATH.
+ for (auto Path : RocmInstallation.getRocmDeviceLibPathArg())
+ LibraryPaths.push_back(DriverArgs.MakeArgString(Path));
+
+ addDirectoryList(DriverArgs, LibraryPaths, "", "HIP_DEVICE_LIB_PATH");
+
+ // Maintain compatability with --hip-device-lib.
+ auto BCLibArgs = DriverArgs.getAllArgValues(options::OPT_hip_device_lib_EQ);
+ if (!BCLibArgs.empty()) {
+ llvm::for_each(BCLibArgs, [&](StringRef BCName) {
+ StringRef FullName;
+ for (std::string LibraryPath : LibraryPaths) {
+ SmallString<128> Path(LibraryPath);
+ llvm::sys::path::append(Path, BCName);
+ FullName = Path;
+ if (llvm::sys::fs::exists(FullName)) {
+ BCLibs.push_back(FullName.str());
+ return;
+ }
+ }
+ getDriver().Diag(diag::err_drv_no_such_file) << BCName;
+ });
+ } else {
+ if (!RocmInstallation.hasDeviceLibrary()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return {};
+ }
+ StringRef GpuArch = getGPUArch(DriverArgs);
+ assert(!GpuArch.empty() && "Must have an explicit GPU arch.");
+ (void)GpuArch;
+ auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
+ const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
+
+ std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
+ if (LibDeviceFile.empty()) {
+ getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ return {};
+ }
+
+ // If --hip-device-lib is not set, add the default bitcode libraries.
+ // TODO: There are way too many flags that change this. Do we need to check
+ // them all?
+ bool DAZ = DriverArgs.hasFlag(options::OPT_fgpu_flush_denormals_to_zero,
+ options::OPT_fno_gpu_flush_denormals_to_zero,
+ getDefaultDenormsAreZeroForTarget(Kind));
+ bool FiniteOnly =
+ DriverArgs.hasFlag(options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only, false);
+ bool UnsafeMathOpt =
+ DriverArgs.hasFlag(options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations, false);
+ bool FastRelaxedMath = DriverArgs.hasFlag(
+ options::OPT_ffast_math, options::OPT_fno_fast_math, false);
+ bool CorrectSqrt = DriverArgs.hasFlag(
+ options::OPT_fhip_fp32_correctly_rounded_divide_sqrt,
+ options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt);
+ bool Wave64 = isWave64(DriverArgs, Kind);
+
+ if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
+ options::OPT_fno_gpu_sanitize, false)) {
+ auto AsanRTL = RocmInstallation.getAsanRTLPath();
+ if (AsanRTL.empty()) {
+ unsigned DiagID = getDriver().getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "AMDGPU address sanitizer runtime library (asanrtl) is not found. "
+ "Please install ROCm device library which supports address "
+ "sanitizer");
+ getDriver().Diag(DiagID);
+ return {};
+ } else
+ BCLibs.push_back(AsanRTL.str());
+ }
+
+ // Add the HIP specific bitcode library.
+ BCLibs.push_back(RocmInstallation.getHIPPath().str());
+
+ // Add the generic set of libraries.
+ BCLibs.append(RocmInstallation.getCommonBitcodeLibs(
+ DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
+ FastRelaxedMath, CorrectSqrt));
+
+ // Add instrument lib.
+ auto InstLib =
+ DriverArgs.getLastArgValue(options::OPT_gpu_instrument_lib_EQ);
+ if (InstLib.empty())
+ return BCLibs;
+ if (llvm::sys::fs::exists(InstLib))
+ BCLibs.push_back(InstLib.str());
+ else
+ getDriver().Diag(diag::err_drv_no_such_file) << InstLib;
+ }
+
+ return BCLibs;
+}
+
+void HIPToolChain::checkTargetID(const llvm::opt::ArgList &DriverArgs) const {
+ auto PTID = getParsedTargetID(DriverArgs);
+ if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
+ getDriver().Diag(clang::diag::err_drv_bad_target_id)
+ << PTID.OptionalTargetID.getValue();
+ return;
+ }
+
+ assert(PTID.OptionalFeatures && "Invalid return from getParsedTargetID");
+ auto &FeatureMap = PTID.OptionalFeatures.getValue();
+ // Sanitizer is not supported with xnack-.
+ if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
+ options::OPT_fno_gpu_sanitize, false)) {
+ auto Loc = FeatureMap.find("xnack");
+ if (Loc != FeatureMap.end() && !Loc->second) {
+ auto &Diags = getDriver().getDiags();
+ auto DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "'-fgpu-sanitize' is not compatible with offload arch '%0'. "
+ "Use an offload arch without 'xnack-' instead");
+ Diags.Report(DiagID) << PTID.OptionalTargetID.getValue();
+ }
+ }
+}
diff --git a/clang/lib/Driver/ToolChains/HIP.h b/clang/lib/Driver/ToolChains/HIP.h
index 5e2be7138579..3cced0a320dc 100644
--- a/clang/lib/Driver/ToolChains/HIP.h
+++ b/clang/lib/Driver/ToolChains/HIP.h
@@ -71,15 +71,6 @@ public:
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
-
- bool useIntegratedAs() const override { return true; }
- bool isCrossCompiling() const override { return true; }
- bool isPICDefault() const override { return false; }
- bool isPIEDefault() const override { return false; }
- bool isPICDefaultForced() const override { return false; }
- bool SupportsProfiling() const override { return false; }
- bool IsMathErrnoDefault() const override { return false; }
-
void addClangWarningOptions(llvm::opt::ArgStringList &CC1Args) const override;
CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
void
@@ -92,6 +83,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ llvm::SmallVector<std::string, 12>
+ getHIPDeviceLibs(const llvm::opt::ArgList &Args) const override;
SanitizerMask getSupportedSanitizers() const override;
@@ -102,6 +95,7 @@ public:
unsigned GetDefaultDwarfVersion() const override { return 4; }
const ToolChain &HostTC;
+ void checkTargetID(const llvm::opt::ArgList &DriverArgs) const override;
protected:
Tool *buildLinker() const override;
diff --git a/clang/lib/Driver/ToolChains/Haiku.cpp b/clang/lib/Driver/ToolChains/Haiku.cpp
index 18f550c9ceca..a79f0f7622ad 100644
--- a/clang/lib/Driver/ToolChains/Haiku.cpp
+++ b/clang/lib/Driver/ToolChains/Haiku.cpp
@@ -29,6 +29,6 @@ void Haiku::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void Haiku::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot, "/system/develop/headers/c++",
- getTriple().str(), "", "", "", DriverArgs, CC1Args);
+ addLibStdCXXIncludePaths(getDriver().SysRoot + "/system/develop/headers/c++",
+ getTriple().str(), "", DriverArgs, CC1Args);
}
diff --git a/clang/lib/Driver/ToolChains/Hexagon.cpp b/clang/lib/Driver/ToolChains/Hexagon.cpp
index fb54f73bcd4c..828bfdbb05a3 100644
--- a/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -8,10 +8,10 @@
#include "Hexagon.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Option/ArgList.h"
@@ -38,7 +38,7 @@ static void handleHVXWarnings(const Driver &D, const ArgList &Args) {
// Handle the unsupported values passed to mhvx-length.
if (Arg *A = Args.getLastArg(options::OPT_mhexagon_hvx_length_EQ)) {
StringRef Val = A->getValue();
- if (!Val.equals_lower("64b") && !Val.equals_lower("128b"))
+ if (!Val.equals_insensitive("64b") && !Val.equals_insensitive("128b"))
D.Diag(diag::err_drv_unsupported_option_argument)
<< A->getOption().getName() << Val;
}
@@ -218,8 +218,8 @@ constructHexagonLinkArgs(Compilation &C, const JobAction &JA,
bool IncDefLibs = !Args.hasArg(options::OPT_nodefaultlibs);
bool UseG0 = false;
const char *Exec = Args.MakeArgString(HTC.GetLinkerPath());
- bool UseLLD = (llvm::sys::path::filename(Exec).equals_lower("ld.lld") ||
- llvm::sys::path::stem(Exec).equals_lower("ld.lld"));
+ bool UseLLD = (llvm::sys::path::filename(Exec).equals_insensitive("ld.lld") ||
+ llvm::sys::path::stem(Exec).equals_insensitive("ld.lld"));
bool UseShared = IsShared && !IsStatic;
StringRef CpuVer = toolchains::HexagonToolChain::GetTargetCPUVersion(Args);
@@ -613,15 +613,15 @@ void HexagonToolChain::addLibCxxIncludePaths(
llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
if (!D.SysRoot.empty() && getTriple().isMusl())
- addLibStdCXXIncludePaths(D.SysRoot + "/usr/include/c++/v1", "", "", "", "",
- "", DriverArgs, CC1Args);
- else if (getTriple().isMusl())
- addLibStdCXXIncludePaths("/usr/include/c++/v1", "", "", "", "", "",
+ addLibStdCXXIncludePaths(D.SysRoot + "/usr/include/c++/v1", "", "",
DriverArgs, CC1Args);
+ else if (getTriple().isMusl())
+ addLibStdCXXIncludePaths("/usr/include/c++/v1", "", "", DriverArgs,
+ CC1Args);
else {
std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
- addLibStdCXXIncludePaths(TargetDir, "/hexagon/include/c++/v1", "", "", "",
- "", DriverArgs, CC1Args);
+ addLibStdCXXIncludePaths(TargetDir + "/hexagon/include/c++/v1", "", "",
+ DriverArgs, CC1Args);
}
}
void HexagonToolChain::addLibStdCxxIncludePaths(
@@ -629,7 +629,7 @@ void HexagonToolChain::addLibStdCxxIncludePaths(
llvm::opt::ArgStringList &CC1Args) const {
const Driver &D = getDriver();
std::string TargetDir = getHexagonTargetDir(D.InstalledDir, D.PrefixDirs);
- addLibStdCXXIncludePaths(TargetDir, "/hexagon/include/c++", "", "", "", "",
+ addLibStdCXXIncludePaths(TargetDir + "/hexagon/include/c++", "", "",
DriverArgs, CC1Args);
}
diff --git a/clang/lib/Driver/ToolChains/Hurd.cpp b/clang/lib/Driver/ToolChains/Hurd.cpp
index a700d7b9064c..48b9ccadf36f 100644
--- a/clang/lib/Driver/ToolChains/Hurd.cpp
+++ b/clang/lib/Driver/ToolChains/Hurd.cpp
@@ -170,11 +170,13 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
AddMultilibIncludeArgs(DriverArgs, CC1Args);
- if (getTriple().getArch() == llvm::Triple::x86) {
- std::string Path = SysRoot + "/usr/include/i386-gnu";
- if (D.getVFS().exists(Path))
- addExternCSystemInclude(DriverArgs, CC1Args, Path);
- }
+ // On systems using multiarch, add /usr/include/$triple before
+ // /usr/include.
+ std::string MultiarchIncludeDir = getMultiarchTriple(D, getTriple(), SysRoot);
+ if (!MultiarchIncludeDir.empty() &&
+ D.getVFS().exists(SysRoot + "/usr/include/" + MultiarchIncludeDir))
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/" + MultiarchIncludeDir);
// Add an include of '/include' directly. This isn't provided by default by
// system GCCs, but is often used with cross-compiling GCCs, and harmless to
@@ -184,6 +186,21 @@ void Hurd::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
}
+void Hurd::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // We need a detected GCC installation on Linux to provide libstdc++'s
+ // headers in odd Linuxish places.
+ if (!GCCInstallation.isValid())
+ return;
+
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ StringRef DebianMultiarch =
+ GCCInstallation.getTriple().getArch() == llvm::Triple::x86 ? "i386-gnu"
+ : TripleStr;
+
+ addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args, DebianMultiarch);
+}
+
void Hurd::addExtraOpts(llvm::opt::ArgStringList &CmdArgs) const {
for (const auto &Opt : ExtraOpts)
CmdArgs.push_back(Opt.c_str());
diff --git a/clang/lib/Driver/ToolChains/Hurd.h b/clang/lib/Driver/ToolChains/Hurd.h
index 0612a55280a8..f301bc5f4269 100644
--- a/clang/lib/Driver/ToolChains/Hurd.h
+++ b/clang/lib/Driver/ToolChains/Hurd.h
@@ -26,6 +26,9 @@ public:
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
std::string getDynamicLinker(const llvm::opt::ArgList &Args) const override;
diff --git a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
index 57acf338df5c..05a13db8d0cf 100644
--- a/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
+++ b/clang/lib/Driver/ToolChains/InterfaceStubs.cpp
@@ -20,10 +20,11 @@ void Merger::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::opt::ArgList &Args,
const char *LinkingOutput) const {
std::string Merger = getToolChain().GetProgramPath(getShortName());
+ // TODO: Use IFS library directly in the future.
llvm::opt::ArgStringList CmdArgs;
- CmdArgs.push_back("-action");
+ CmdArgs.push_back("--input-format=IFS");
const bool WriteBin = !Args.getLastArg(options::OPT_emit_merged_ifs);
- CmdArgs.push_back(WriteBin ? "write-bin" : "write-ifs");
+ CmdArgs.push_back(WriteBin ? "--output-format=ELF" : "--output-format=IFS");
CmdArgs.push_back("-o");
// Normally we want to write to a side-car file ending in ".ifso" so for
diff --git a/clang/lib/Driver/ToolChains/Linux.cpp b/clang/lib/Driver/ToolChains/Linux.cpp
index e17a6bd4bdd2..c9360fc67165 100644
--- a/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/clang/lib/Driver/ToolChains/Linux.cpp
@@ -58,64 +58,42 @@ std::string Linux::getMultiarchTriple(const Driver &D,
// regardless of what the actual target triple is.
case llvm::Triple::arm:
case llvm::Triple::thumb:
- if (IsAndroid) {
+ if (IsAndroid)
return "arm-linux-androideabi";
- } else if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
- if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabihf"))
- return "arm-linux-gnueabihf";
- } else {
- if (D.getVFS().exists(SysRoot + "/lib/arm-linux-gnueabi"))
- return "arm-linux-gnueabi";
- }
- break;
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF)
+ return "arm-linux-gnueabihf";
+ return "arm-linux-gnueabi";
case llvm::Triple::armeb:
case llvm::Triple::thumbeb:
- if (TargetEnvironment == llvm::Triple::GNUEABIHF) {
- if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabihf"))
- return "armeb-linux-gnueabihf";
- } else {
- if (D.getVFS().exists(SysRoot + "/lib/armeb-linux-gnueabi"))
- return "armeb-linux-gnueabi";
- }
- break;
+ if (TargetEnvironment == llvm::Triple::GNUEABIHF)
+ return "armeb-linux-gnueabihf";
+ return "armeb-linux-gnueabi";
case llvm::Triple::x86:
if (IsAndroid)
return "i686-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/i386-linux-gnu"))
- return "i386-linux-gnu";
- break;
+ return "i386-linux-gnu";
case llvm::Triple::x86_64:
if (IsAndroid)
return "x86_64-linux-android";
- // We don't want this for x32, otherwise it will match x86_64 libs
- if (TargetEnvironment != llvm::Triple::GNUX32 &&
- D.getVFS().exists(SysRoot + "/lib/x86_64-linux-gnu"))
- return "x86_64-linux-gnu";
- break;
+ if (TargetEnvironment == llvm::Triple::GNUX32)
+ return "x86_64-linux-gnux32";
+ return "x86_64-linux-gnu";
case llvm::Triple::aarch64:
if (IsAndroid)
return "aarch64-linux-android";
- if (D.getVFS().exists(SysRoot + "/lib/aarch64-linux-gnu"))
- return "aarch64-linux-gnu";
- break;
+ return "aarch64-linux-gnu";
case llvm::Triple::aarch64_be:
- if (D.getVFS().exists(SysRoot + "/lib/aarch64_be-linux-gnu"))
- return "aarch64_be-linux-gnu";
- break;
- case llvm::Triple::mips: {
- std::string MT = IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
- return MT;
- break;
- }
- case llvm::Triple::mipsel: {
+ return "aarch64_be-linux-gnu";
+
+ case llvm::Triple::m68k:
+ return "m68k-linux-gnu";
+
+ case llvm::Triple::mips:
+ return IsMipsR6 ? "mipsisa32r6-linux-gnu" : "mips-linux-gnu";
+ case llvm::Triple::mipsel:
if (IsAndroid)
return "mipsel-linux-android";
- std::string MT = IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
- return MT;
- break;
- }
+ return IsMipsR6 ? "mipsisa32r6el-linux-gnu" : "mipsel-linux-gnu";
case llvm::Triple::mips64: {
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
@@ -139,33 +117,19 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::ppc:
if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
return "powerpc-linux-gnuspe";
- if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnu"))
- return "powerpc-linux-gnu";
- break;
+ return "powerpc-linux-gnu";
case llvm::Triple::ppcle:
- if (D.getVFS().exists(SysRoot + "/lib/powerpcle-linux-gnu"))
- return "powerpcle-linux-gnu";
- break;
+ return "powerpcle-linux-gnu";
case llvm::Triple::ppc64:
- if (D.getVFS().exists(SysRoot + "/lib/powerpc64-linux-gnu"))
- return "powerpc64-linux-gnu";
- break;
+ return "powerpc64-linux-gnu";
case llvm::Triple::ppc64le:
- if (D.getVFS().exists(SysRoot + "/lib/powerpc64le-linux-gnu"))
- return "powerpc64le-linux-gnu";
- break;
+ return "powerpc64le-linux-gnu";
case llvm::Triple::sparc:
- if (D.getVFS().exists(SysRoot + "/lib/sparc-linux-gnu"))
- return "sparc-linux-gnu";
- break;
+ return "sparc-linux-gnu";
case llvm::Triple::sparcv9:
- if (D.getVFS().exists(SysRoot + "/lib/sparc64-linux-gnu"))
- return "sparc64-linux-gnu";
- break;
+ return "sparc64-linux-gnu";
case llvm::Triple::systemz:
- if (D.getVFS().exists(SysRoot + "/lib/s390x-linux-gnu"))
- return "s390x-linux-gnu";
- break;
+ return "s390x-linux-gnu";
}
return TargetTriple.str();
}
@@ -202,8 +166,7 @@ static StringRef getOSLibDir(const llvm::Triple &Triple, const ArgList &Args) {
Triple.getArch() == llvm::Triple::sparc)
return "lib32";
- if (Triple.getArch() == llvm::Triple::x86_64 &&
- Triple.getEnvironment() == llvm::Triple::GNUX32)
+ if (Triple.getArch() == llvm::Triple::x86_64 && Triple.isX32())
return "libx32";
if (Triple.getArch() == llvm::Triple::riscv32)
@@ -236,15 +199,6 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
ExtraOpts.push_back("relro");
}
- if (Triple.isAndroid() && Triple.isAndroidVersionLT(29)) {
- // https://github.com/android/ndk/issues/1196
- // The unwinder used by the crash handler on versions of Android prior to
- // API 29 did not correctly handle binaries built with rosegment, which is
- // enabled by default for LLD. Android only supports LLD, so it's not an
- // issue that this flag is not accepted by other linkers.
- ExtraOpts.push_back("--no-rosegment");
- }
-
// Android ARM/AArch64 use max-page-size=4096 to reduce VMA usage. Note, lld
// from 11 onwards default max-page-size to 65536 for both ARM and AArch64.
if ((Triple.isARM() || Triple.isAArch64()) && Triple.isAndroid()) {
@@ -310,16 +264,6 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
- // Similar to the logic for GCC above, if we currently running Clang inside
- // of the requested system root, add its parent library paths to
- // those searched.
- // FIXME: It's not clear whether we should use the driver's installed
- // directory ('Dir' below) or the ResourceDir.
- if (StringRef(D.Dir).startswith(SysRoot)) {
- addPathIfExists(D, D.Dir + "/../lib/" + MultiarchTriple, Paths);
- addPathIfExists(D, D.Dir + "/../" + OSLibDir, Paths);
- }
-
addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
@@ -366,6 +310,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
addPathIfExists(D, SysRoot + "/usr/lib", Paths);
}
+ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
+ if (getTriple().isAndroid())
+ return ToolChain::RLT_CompilerRT;
+ return Generic_ELF::GetDefaultRuntimeLibType();
+}
+
ToolChain::CXXStdlibType Linux::GetDefaultCXXStdlibType() const {
if (getTriple().isAndroid())
return ToolChain::CST_Libcxx;
@@ -447,6 +397,12 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
ArchName = "armeb";
IsArm = true;
break;
+ case llvm::Triple::x86:
+ ArchName = "i386";
+ break;
+ case llvm::Triple::x86_64:
+ ArchName = Triple.isX32() ? "x32" : Triple.getArchName().str();
+ break;
default:
ArchName = Triple.getArchName().str();
}
@@ -485,6 +441,10 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = HF ? "ld-linux-armhf.so.3" : "ld-linux.so.3";
break;
}
+ case llvm::Triple::m68k:
+ LibDir = "lib";
+ Loader = "ld.so.1";
+ break;
case llvm::Triple::mips:
case llvm::Triple::mipsel:
case llvm::Triple::mips64:
@@ -552,7 +512,7 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
Loader = "ld-linux.so.2";
break;
case llvm::Triple::x86_64: {
- bool X32 = Triple.getEnvironment() == llvm::Triple::GNUX32;
+ bool X32 = Triple.isX32();
LibDir = X32 ? "libx32" : "lib64";
Loader = X32 ? "ld-linux-x32.so.2" : "ld-linux-x86-64.so.2";
@@ -577,9 +537,10 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(clang::driver::options::OPT_nostdinc))
return;
- if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
- addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
-
+ // Add 'include' in the resource directory, which is similar to
+ // GCC_INCLUDE_DIR (private headers) in GCC. Note: the include directory
+ // contains some files conflicting with system /usr/include. musl systems
+ // prefer the /usr/include copies which are more relevant.
SmallString<128> ResourceDirInclude(D.ResourceDir);
llvm::sys::path::append(ResourceDirInclude, "include");
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) &&
@@ -589,6 +550,11 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
+ // LOCAL_INCLUDE_DIR
+ addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+ // TOOL_INCLUDE_DIR
+ AddMultilibIncludeArgs(DriverArgs, CC1Args);
+
// Check for configure-time C include directories.
StringRef CIncludeDirs(C_INCLUDE_DIRS);
if (CIncludeDirs != "") {
@@ -602,173 +568,13 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
}
- // Lacking those, try to detect the correct set of system includes for the
- // target triple.
-
- AddMultilibIncludeArgs(DriverArgs, CC1Args);
-
- // Implement generic Debian multiarch support.
- const StringRef X86_64MultiarchIncludeDirs[] = {
- "/usr/include/x86_64-linux-gnu",
-
- // FIXME: These are older forms of multiarch. It's not clear that they're
- // in use in any released version of Debian, so we should consider
- // removing them.
- "/usr/include/i686-linux-gnu/64", "/usr/include/i486-linux-gnu/64"};
- const StringRef X86MultiarchIncludeDirs[] = {
- "/usr/include/i386-linux-gnu",
-
- // FIXME: These are older forms of multiarch. It's not clear that they're
- // in use in any released version of Debian, so we should consider
- // removing them.
- "/usr/include/x86_64-linux-gnu/32", "/usr/include/i686-linux-gnu",
- "/usr/include/i486-linux-gnu"};
- const StringRef AArch64MultiarchIncludeDirs[] = {
- "/usr/include/aarch64-linux-gnu"};
- const StringRef ARMMultiarchIncludeDirs[] = {
- "/usr/include/arm-linux-gnueabi"};
- const StringRef ARMHFMultiarchIncludeDirs[] = {
- "/usr/include/arm-linux-gnueabihf"};
- const StringRef ARMEBMultiarchIncludeDirs[] = {
- "/usr/include/armeb-linux-gnueabi"};
- const StringRef ARMEBHFMultiarchIncludeDirs[] = {
- "/usr/include/armeb-linux-gnueabihf"};
- const StringRef MIPSMultiarchIncludeDirs[] = {"/usr/include/mips-linux-gnu"};
- const StringRef MIPSELMultiarchIncludeDirs[] = {
- "/usr/include/mipsel-linux-gnu"};
- const StringRef MIPS64MultiarchIncludeDirs[] = {
- "/usr/include/mips64-linux-gnuabi64"};
- const StringRef MIPS64ELMultiarchIncludeDirs[] = {
- "/usr/include/mips64el-linux-gnuabi64"};
- const StringRef MIPSN32MultiarchIncludeDirs[] = {
- "/usr/include/mips64-linux-gnuabin32"};
- const StringRef MIPSN32ELMultiarchIncludeDirs[] = {
- "/usr/include/mips64el-linux-gnuabin32"};
- const StringRef MIPSR6MultiarchIncludeDirs[] = {
- "/usr/include/mipsisa32-linux-gnu"};
- const StringRef MIPSR6ELMultiarchIncludeDirs[] = {
- "/usr/include/mipsisa32r6el-linux-gnu"};
- const StringRef MIPS64R6MultiarchIncludeDirs[] = {
- "/usr/include/mipsisa64r6-linux-gnuabi64"};
- const StringRef MIPS64R6ELMultiarchIncludeDirs[] = {
- "/usr/include/mipsisa64r6el-linux-gnuabi64"};
- const StringRef MIPSN32R6MultiarchIncludeDirs[] = {
- "/usr/include/mipsisa64r6-linux-gnuabin32"};
- const StringRef MIPSN32R6ELMultiarchIncludeDirs[] = {
- "/usr/include/mipsisa64r6el-linux-gnuabin32"};
- const StringRef PPCMultiarchIncludeDirs[] = {
- "/usr/include/powerpc-linux-gnu",
- "/usr/include/powerpc-linux-gnuspe"};
- const StringRef PPCLEMultiarchIncludeDirs[] = {
- "/usr/include/powerpcle-linux-gnu"};
- const StringRef PPC64MultiarchIncludeDirs[] = {
- "/usr/include/powerpc64-linux-gnu"};
- const StringRef PPC64LEMultiarchIncludeDirs[] = {
- "/usr/include/powerpc64le-linux-gnu"};
- const StringRef SparcMultiarchIncludeDirs[] = {
- "/usr/include/sparc-linux-gnu"};
- const StringRef Sparc64MultiarchIncludeDirs[] = {
- "/usr/include/sparc64-linux-gnu"};
- const StringRef SYSTEMZMultiarchIncludeDirs[] = {
- "/usr/include/s390x-linux-gnu"};
- ArrayRef<StringRef> MultiarchIncludeDirs;
- switch (getTriple().getArch()) {
- case llvm::Triple::x86_64:
- MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
- break;
- case llvm::Triple::x86:
- MultiarchIncludeDirs = X86MultiarchIncludeDirs;
- break;
- case llvm::Triple::aarch64:
- case llvm::Triple::aarch64_be:
- MultiarchIncludeDirs = AArch64MultiarchIncludeDirs;
- break;
- case llvm::Triple::arm:
- case llvm::Triple::thumb:
- if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
- MultiarchIncludeDirs = ARMHFMultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
- break;
- case llvm::Triple::armeb:
- case llvm::Triple::thumbeb:
- if (getTriple().getEnvironment() == llvm::Triple::GNUEABIHF)
- MultiarchIncludeDirs = ARMEBHFMultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = ARMEBMultiarchIncludeDirs;
- break;
- case llvm::Triple::mips:
- if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
- MultiarchIncludeDirs = MIPSR6MultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
- break;
- case llvm::Triple::mipsel:
- if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
- MultiarchIncludeDirs = MIPSR6ELMultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
- break;
- case llvm::Triple::mips64:
- if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
- if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
- MultiarchIncludeDirs = MIPSN32R6MultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPS64R6MultiarchIncludeDirs;
- else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
- MultiarchIncludeDirs = MIPSN32MultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPS64MultiarchIncludeDirs;
- break;
- case llvm::Triple::mips64el:
- if (getTriple().getSubArch() == llvm::Triple::MipsSubArch_r6)
- if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
- MultiarchIncludeDirs = MIPSN32R6ELMultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPS64R6ELMultiarchIncludeDirs;
- else if (getTriple().getEnvironment() == llvm::Triple::GNUABIN32)
- MultiarchIncludeDirs = MIPSN32ELMultiarchIncludeDirs;
- else
- MultiarchIncludeDirs = MIPS64ELMultiarchIncludeDirs;
- break;
- case llvm::Triple::ppc:
- MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
- break;
- case llvm::Triple::ppcle:
- MultiarchIncludeDirs = PPCLEMultiarchIncludeDirs;
- break;
- case llvm::Triple::ppc64:
- MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
- break;
- case llvm::Triple::ppc64le:
- MultiarchIncludeDirs = PPC64LEMultiarchIncludeDirs;
- break;
- case llvm::Triple::sparc:
- MultiarchIncludeDirs = SparcMultiarchIncludeDirs;
- break;
- case llvm::Triple::sparcv9:
- MultiarchIncludeDirs = Sparc64MultiarchIncludeDirs;
- break;
- case llvm::Triple::systemz:
- MultiarchIncludeDirs = SYSTEMZMultiarchIncludeDirs;
- break;
- default:
- break;
- }
-
- const std::string AndroidMultiarchIncludeDir =
- std::string("/usr/include/") +
- getMultiarchTriple(D, getTriple(), SysRoot);
- const StringRef AndroidMultiarchIncludeDirs[] = {AndroidMultiarchIncludeDir};
- if (getTriple().isAndroid())
- MultiarchIncludeDirs = AndroidMultiarchIncludeDirs;
-
- for (StringRef Dir : MultiarchIncludeDirs) {
- if (D.getVFS().exists(SysRoot + Dir)) {
- addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + Dir);
- break;
- }
- }
+ // On systems using multiarch and Android, add /usr/include/$triple before
+ // /usr/include.
+ std::string MultiarchIncludeDir = getMultiarchTriple(D, getTriple(), SysRoot);
+ if (!MultiarchIncludeDir.empty() &&
+ D.getVFS().exists(SysRoot + "/usr/include/" + MultiarchIncludeDir))
+ addExternCSystemInclude(DriverArgs, CC1Args,
+ SysRoot + "/usr/include/" + MultiarchIncludeDir);
if (getTriple().getOS() == llvm::Triple::RTEMS)
return;
@@ -786,17 +592,24 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- // Try generic GCC detection first.
- if (Generic_GCC::addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args))
- return;
-
// We need a detected GCC installation on Linux to provide libstdc++'s
// headers in odd Linuxish places.
if (!GCCInstallation.isValid())
return;
- StringRef LibDir = GCCInstallation.getParentLibPath();
+ // Detect Debian g++-multiarch-incdir.diff.
StringRef TripleStr = GCCInstallation.getTriple().str();
+ StringRef DebianMultiarch =
+ GCCInstallation.getTriple().getArch() == llvm::Triple::x86
+ ? "i386-linux-gnu"
+ : TripleStr;
+
+ // Try generic GCC detection first.
+ if (Generic_GCC::addGCCLibStdCxxIncludePaths(DriverArgs, CC1Args,
+ DebianMultiarch))
+ return;
+
+ StringRef LibDir = GCCInstallation.getParentLibPath();
const Multilib &Multilib = GCCInstallation.getMultilib();
const GCCVersion &Version = GCCInstallation.getVersion();
@@ -812,9 +625,7 @@ void Linux::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
};
for (const auto &IncludePath : LibStdCXXIncludePathCandidates) {
- if (addLibStdCXXIncludePaths(IncludePath, /*Suffix*/ "", TripleStr,
- /*GCCMultiarchTriple*/ "",
- /*TargetMultiarchTriple*/ "",
+ if (addLibStdCXXIncludePaths(IncludePath, TripleStr,
Multilib.includeSuffix(), DriverArgs, CC1Args))
break;
}
@@ -845,6 +656,19 @@ bool Linux::isPIEDefault() const {
getTriple().isMusl() || getSanitizerArgs().requiresPIE();
}
+bool Linux::IsAArch64OutlineAtomicsDefault(const ArgList &Args) const {
+ // Outline atomics for AArch64 are supported by compiler-rt
+ // and libgcc since 9.3.1
+ assert(getTriple().isAArch64() && "expected AArch64 target!");
+ ToolChain::RuntimeLibType RtLib = GetRuntimeLibType(Args);
+ if (RtLib == ToolChain::RLT_CompilerRT)
+ return true;
+ assert(RtLib == ToolChain::RLT_Libgcc && "unexpected runtime library type!");
+ if (GCCInstallation.getVersion().isOlderThan(9, 3, 1))
+ return false;
+ return true;
+}
+
bool Linux::isNoExecStackDefault() const {
return getTriple().isAndroid();
}
@@ -868,6 +692,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
getTriple().getArch() == llvm::Triple::thumb ||
getTriple().getArch() == llvm::Triple::armeb ||
getTriple().getArch() == llvm::Triple::thumbeb;
+ const bool IsRISCV64 = getTriple().getArch() == llvm::Triple::riscv64;
const bool IsSystemZ = getTriple().getArch() == llvm::Triple::systemz;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
@@ -882,9 +707,9 @@ SanitizerMask Linux::getSupportedSanitizers() const {
if (IsX86_64 || IsMIPS64 || IsAArch64)
Res |= SanitizerKind::DataFlow;
if (IsX86_64 || IsMIPS64 || IsAArch64 || IsX86 || IsArmArch || IsPowerPC64 ||
- IsSystemZ)
+ IsRISCV64 || IsSystemZ)
Res |= SanitizerKind::Leak;
- if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64)
+ if (IsX86_64 || IsMIPS64 || IsAArch64 || IsPowerPC64 || IsSystemZ)
Res |= SanitizerKind::Thread;
if (IsX86_64)
Res |= SanitizerKind::KernelMemory;
diff --git a/clang/lib/Driver/ToolChains/Linux.h b/clang/lib/Driver/ToolChains/Linux.h
index 6b16b0e64990..169a37c44072 100644
--- a/clang/lib/Driver/ToolChains/Linux.h
+++ b/clang/lib/Driver/ToolChains/Linux.h
@@ -23,6 +23,10 @@ public:
bool HasNativeLLVMSupport() const override;
+ std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const override;
+
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -35,7 +39,10 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
CXXStdlibType GetDefaultCXXStdlibType() const override;
+ bool
+ IsAArch64OutlineAtomicsDefault(const llvm::opt::ArgList &Args) const override;
bool isPIEDefault() const override;
bool isNoExecStackDefault() const override;
bool IsMathErrnoDefault() const override;
@@ -58,10 +65,6 @@ protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
Tool *buildStaticLibTool() const override;
-
- std::string getMultiarchTriple(const Driver &D,
- const llvm::Triple &TargetTriple,
- StringRef SysRoot) const override;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/ToolChains/MSP430.cpp b/clang/lib/Driver/ToolChains/MSP430.cpp
index f3ed9967a81a..96994ba77fac 100644
--- a/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -9,8 +9,8 @@
#include "MSP430.h"
#include "CommonArgs.h"
#include "Gnu.h"
-#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Multilib.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
diff --git a/clang/lib/Driver/ToolChains/MSP430.h b/clang/lib/Driver/ToolChains/MSP430.h
index 3789e7442a23..9d247ca3a896 100644
--- a/clang/lib/Driver/ToolChains/MSP430.h
+++ b/clang/lib/Driver/ToolChains/MSP430.h
@@ -10,9 +10,9 @@
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_MSP430_H
#include "Gnu.h"
-#include "InputInfo.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringRef.h"
diff --git a/clang/lib/Driver/ToolChains/MSVC.cpp b/clang/lib/Driver/ToolChains/MSVC.cpp
index f4b7a57e0bb7..0dc94a4c6c7d 100644
--- a/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -11,6 +11,7 @@
#include "Darwin.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
@@ -27,6 +28,7 @@
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/Process.h"
+#include "llvm/Support/VirtualFileSystem.h"
#include <cstdio>
#ifdef _WIN32
@@ -61,19 +63,65 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+static bool canExecute(llvm::vfs::FileSystem &VFS, StringRef Path) {
+ auto Status = VFS.status(Path);
+ if (!Status)
+ return false;
+ return (Status->getPermissions() & llvm::sys::fs::perms::all_exe) != 0;
+}
+
// Defined below.
// Forward declare this so there aren't too many things above the constructor.
static bool getSystemRegistryString(const char *keyPath, const char *valueName,
std::string &value, std::string *phValue);
+static std::string getHighestNumericTupleInDirectory(llvm::vfs::FileSystem &VFS,
+ StringRef Directory) {
+ std::string Highest;
+ llvm::VersionTuple HighestTuple;
+
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator DirIt = VFS.dir_begin(Directory, EC),
+ DirEnd;
+ !EC && DirIt != DirEnd; DirIt.increment(EC)) {
+ auto Status = VFS.status(DirIt->path());
+ if (!Status || !Status->isDirectory())
+ continue;
+ StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
+ llvm::VersionTuple Tuple;
+ if (Tuple.tryParse(CandidateName)) // tryParse() returns true on error.
+ continue;
+ if (Tuple > HighestTuple) {
+ HighestTuple = Tuple;
+ Highest = CandidateName.str();
+ }
+ }
+
+ return Highest;
+}
+
// Check command line arguments to try and find a toolchain.
static bool
-findVCToolChainViaCommandLine(const ArgList &Args, std::string &Path,
+findVCToolChainViaCommandLine(llvm::vfs::FileSystem &VFS, const ArgList &Args,
+ std::string &Path,
MSVCToolChain::ToolsetLayout &VSLayout) {
// Don't validate the input; trust the value supplied by the user.
// The primary motivation is to prevent unnecessary file and registry access.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir)) {
- Path = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir,
+ options::OPT__SLASH_winsysroot)) {
+ if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
+ llvm::SmallString<128> ToolsPath(A->getValue());
+ llvm::sys::path::append(ToolsPath, "VC", "Tools", "MSVC");
+ std::string VCToolsVersion;
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsversion))
+ VCToolsVersion = A->getValue();
+ else
+ VCToolsVersion = getHighestNumericTupleInDirectory(VFS, ToolsPath);
+ llvm::sys::path::append(ToolsPath, VCToolsVersion);
+ Path = std::string(ToolsPath.str());
+ } else {
+ Path = A->getValue();
+ }
VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
return true;
}
@@ -81,8 +129,9 @@ findVCToolChainViaCommandLine(const ArgList &Args, std::string &Path,
}
// Check various environment variables to try and find a toolchain.
-static bool findVCToolChainViaEnvironment(std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
+static bool
+findVCToolChainViaEnvironment(llvm::vfs::FileSystem &VFS, std::string &Path,
+ MSVCToolChain::ToolsetLayout &VSLayout) {
// These variables are typically set by vcvarsall.bat
// when launching a developer command prompt.
if (llvm::Optional<std::string> VCToolsInstallDir =
@@ -120,34 +169,37 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
// If cl.exe doesn't exist, then this definitely isn't a VC toolchain.
ExeTestPath = PathEntry;
llvm::sys::path::append(ExeTestPath, "cl.exe");
- if (!llvm::sys::fs::exists(ExeTestPath))
+ if (!VFS.exists(ExeTestPath))
continue;
// cl.exe existing isn't a conclusive test for a VC toolchain; clang also
// has a cl.exe. So let's check for link.exe too.
ExeTestPath = PathEntry;
llvm::sys::path::append(ExeTestPath, "link.exe");
- if (!llvm::sys::fs::exists(ExeTestPath))
+ if (!VFS.exists(ExeTestPath))
continue;
// whatever/VC/bin --> old toolchain, VC dir is toolchain dir.
llvm::StringRef TestPath = PathEntry;
- bool IsBin = llvm::sys::path::filename(TestPath).equals_lower("bin");
+ bool IsBin =
+ llvm::sys::path::filename(TestPath).equals_insensitive("bin");
if (!IsBin) {
// Strip any architecture subdir like "amd64".
TestPath = llvm::sys::path::parent_path(TestPath);
- IsBin = llvm::sys::path::filename(TestPath).equals_lower("bin");
+ IsBin = llvm::sys::path::filename(TestPath).equals_insensitive("bin");
}
if (IsBin) {
llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath);
llvm::StringRef ParentFilename = llvm::sys::path::filename(ParentPath);
- if (ParentFilename == "VC") {
+ if (ParentFilename.equals_insensitive("VC")) {
Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
return true;
}
- if (ParentFilename == "x86ret" || ParentFilename == "x86chk"
- || ParentFilename == "amd64ret" || ParentFilename == "amd64chk") {
+ if (ParentFilename.equals_insensitive("x86ret") ||
+ ParentFilename.equals_insensitive("x86chk") ||
+ ParentFilename.equals_insensitive("amd64ret") ||
+ ParentFilename.equals_insensitive("amd64chk")) {
Path = std::string(ParentPath);
VSLayout = MSVCToolChain::ToolsetLayout::DevDivInternal;
return true;
@@ -166,7 +218,7 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
for (llvm::StringRef Prefix : ExpectedPrefixes) {
if (It == End)
goto NotAToolChain;
- if (!It->startswith(Prefix))
+ if (!It->startswith_insensitive(Prefix))
goto NotAToolChain;
++It;
}
@@ -193,8 +245,9 @@ static bool findVCToolChainViaEnvironment(std::string &Path,
// and find its default VC toolchain.
// This is the preferred way to discover new Visual Studios, as they're no
// longer listed in the registry.
-static bool findVCToolChainViaSetupConfig(std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
+static bool
+findVCToolChainViaSetupConfig(llvm::vfs::FileSystem &VFS, std::string &Path,
+ MSVCToolChain::ToolsetLayout &VSLayout) {
#if !defined(USE_MSVC_SETUP_API)
return false;
#else
@@ -272,7 +325,8 @@ static bool findVCToolChainViaSetupConfig(std::string &Path,
llvm::SmallString<256> ToolchainPath(VCRootPath);
llvm::sys::path::append(ToolchainPath, "Tools", "MSVC",
ToolsVersionFile->get()->getBuffer().rtrim());
- if (!llvm::sys::fs::is_directory(ToolchainPath))
+ auto Status = VFS.status(ToolchainPath);
+ if (!Status || !Status->isDirectory())
return false;
Path = std::string(ToolchainPath.str());
@@ -314,8 +368,7 @@ static std::string FindVisualStudioExecutable(const ToolChain &TC,
SmallString<128> FilePath(MSVC.getSubDirectoryPath(
toolchains::MSVCToolChain::SubDirectoryType::Bin));
llvm::sys::path::append(FilePath, Exe);
- return std::string(llvm::sys::fs::can_execute(FilePath) ? FilePath.str()
- : Exe);
+ return std::string(canExecute(TC.getVFS(), FilePath) ? FilePath.str() : Exe);
}
void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
@@ -338,30 +391,34 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-defaultlib:oldnames");
}
- if (!llvm::sys::Process::GetEnv("LIB")) {
- // If the VC environment hasn't been configured (perhaps because the user
- // did not run vcvarsall), try to build a consistent link environment. If
- // the environment variable is set however, assume the user knows what
- // they're doing.
+ // If the VC environment hasn't been configured (perhaps because the user
+ // did not run vcvarsall), try to build a consistent link environment. If
+ // the environment variable is set however, assume the user knows what
+ // they're doing. If the user passes /vctoolsdir or /winsdkdir, trust that
+ // over env vars.
+ if (!llvm::sys::Process::GetEnv("LIB") ||
+ Args.getLastArg(options::OPT__SLASH_vctoolsdir,
+ options::OPT__SLASH_winsysroot)) {
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
TC.getSubDirectoryPath(
toolchains::MSVCToolChain::SubDirectoryType::Lib)));
-
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
TC.getSubDirectoryPath(toolchains::MSVCToolChain::SubDirectoryType::Lib,
"atlmfc")));
-
+ }
+ if (!llvm::sys::Process::GetEnv("LIB") ||
+ Args.getLastArg(options::OPT__SLASH_winsdkdir,
+ options::OPT__SLASH_winsysroot)) {
if (TC.useUniversalCRT()) {
std::string UniversalCRTLibPath;
- if (TC.getUniversalCRTLibraryPath(UniversalCRTLibPath))
+ if (TC.getUniversalCRTLibraryPath(Args, UniversalCRTLibPath))
CmdArgs.push_back(
Args.MakeArgString(Twine("-libpath:") + UniversalCRTLibPath));
}
-
std::string WindowsSdkLibPath;
- if (TC.getWindowsSDKLibraryPath(WindowsSdkLibPath))
+ if (TC.getWindowsSDKLibraryPath(Args, WindowsSdkLibPath))
CmdArgs.push_back(
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
@@ -451,11 +508,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// Control Flow Guard checks
if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
StringRef GuardArgs = A->getValue();
- if (GuardArgs.equals_lower("cf") || GuardArgs.equals_lower("cf,nochecks")) {
+ if (GuardArgs.equals_insensitive("cf") ||
+ GuardArgs.equals_insensitive("cf,nochecks")) {
// MSVC doesn't yet support the "nochecks" modifier.
CmdArgs.push_back("-guard:cf");
- } else if (GuardArgs.equals_lower("cf-")) {
+ } else if (GuardArgs.equals_insensitive("cf-")) {
CmdArgs.push_back("-guard:cf-");
+ } else if (GuardArgs.equals_insensitive("ehcont")) {
+ CmdArgs.push_back("-guard:ehcont");
+ } else if (GuardArgs.equals_insensitive("ehcont-")) {
+ CmdArgs.push_back("-guard:ehcont-");
}
}
@@ -520,23 +582,26 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// translate 'lld' into 'lld-link', and in the case of the regular msvc
// linker, we need to use a special search algorithm.
llvm::SmallString<128> linkPath;
- StringRef Linker = Args.getLastArgValue(options::OPT_fuse_ld_EQ, "link");
- if (Linker.equals_lower("lld"))
+ StringRef Linker
+ = Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER);
+ if (Linker.empty())
+ Linker = "link";
+ if (Linker.equals_insensitive("lld"))
Linker = "lld-link";
- if (Linker.equals_lower("link")) {
+ if (Linker.equals_insensitive("link")) {
// If we're using the MSVC linker, it's not sufficient to just use link
// from the program PATH, because other environments like GnuWin32 install
// their own link.exe which may come first.
linkPath = FindVisualStudioExecutable(TC, "link.exe");
- if (!TC.FoundMSVCInstall() && !llvm::sys::fs::can_execute(linkPath)) {
+ if (!TC.FoundMSVCInstall() && !canExecute(TC.getVFS(), linkPath)) {
llvm::SmallString<128> ClPath;
ClPath = TC.GetProgramPath("cl.exe");
- if (llvm::sys::fs::can_execute(ClPath)) {
+ if (canExecute(TC.getVFS(), ClPath)) {
linkPath = llvm::sys::path::parent_path(ClPath);
llvm::sys::path::append(linkPath, "link.exe");
- if (!llvm::sys::fs::can_execute(linkPath))
+ if (!canExecute(TC.getVFS(), linkPath))
C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
} else {
C.getDriver().Diag(clang::diag::warn_drv_msvc_not_found);
@@ -583,7 +648,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// find it.
for (const char *Cursor = EnvBlock.data(); *Cursor != '\0';) {
llvm::StringRef EnvVar(Cursor);
- if (EnvVar.startswith_lower("path=")) {
+ if (EnvVar.startswith_insensitive("path=")) {
using SubDirectoryType = toolchains::MSVCToolChain::SubDirectoryType;
constexpr size_t PrefixLen = 5; // strlen("path=")
Environment.push_back(Args.MakeArgString(
@@ -615,145 +680,6 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
C.addCommand(std::move(LinkCmd));
}
-void visualstudio::Compiler::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- C.addCommand(GetCommand(C, JA, Output, Inputs, Args, LinkingOutput));
-}
-
-std::unique_ptr<Command> visualstudio::Compiler::GetCommand(
- Compilation &C, const JobAction &JA, const InputInfo &Output,
- const InputInfoList &Inputs, const ArgList &Args,
- const char *LinkingOutput) const {
- ArgStringList CmdArgs;
- CmdArgs.push_back("/nologo");
- CmdArgs.push_back("/c"); // Compile only.
- CmdArgs.push_back("/W0"); // No warnings.
-
- // The goal is to be able to invoke this tool correctly based on
- // any flag accepted by clang-cl.
-
- // These are spelled the same way in clang and cl.exe,.
- Args.AddAllArgs(CmdArgs, {options::OPT_D, options::OPT_U, options::OPT_I});
-
- // Optimization level.
- if (Arg *A = Args.getLastArg(options::OPT_fbuiltin, options::OPT_fno_builtin))
- CmdArgs.push_back(A->getOption().getID() == options::OPT_fbuiltin ? "/Oi"
- : "/Oi-");
- if (Arg *A = Args.getLastArg(options::OPT_O, options::OPT_O0)) {
- if (A->getOption().getID() == options::OPT_O0) {
- CmdArgs.push_back("/Od");
- } else {
- CmdArgs.push_back("/Og");
-
- StringRef OptLevel = A->getValue();
- if (OptLevel == "s" || OptLevel == "z")
- CmdArgs.push_back("/Os");
- else
- CmdArgs.push_back("/Ot");
-
- CmdArgs.push_back("/Ob2");
- }
- }
- if (Arg *A = Args.getLastArg(options::OPT_fomit_frame_pointer,
- options::OPT_fno_omit_frame_pointer))
- CmdArgs.push_back(A->getOption().getID() == options::OPT_fomit_frame_pointer
- ? "/Oy"
- : "/Oy-");
- if (!Args.hasArg(options::OPT_fwritable_strings))
- CmdArgs.push_back("/GF");
-
- // Flags for which clang-cl has an alias.
- // FIXME: How can we ensure this stays in sync with relevant clang-cl options?
-
- if (Args.hasFlag(options::OPT__SLASH_GR_, options::OPT__SLASH_GR,
- /*Default=*/false))
- CmdArgs.push_back("/GR-");
-
- if (Args.hasFlag(options::OPT__SLASH_GS_, options::OPT__SLASH_GS,
- /*Default=*/false))
- CmdArgs.push_back("/GS-");
-
- if (Arg *A = Args.getLastArg(options::OPT_ffunction_sections,
- options::OPT_fno_function_sections))
- CmdArgs.push_back(A->getOption().getID() == options::OPT_ffunction_sections
- ? "/Gy"
- : "/Gy-");
- if (Arg *A = Args.getLastArg(options::OPT_fdata_sections,
- options::OPT_fno_data_sections))
- CmdArgs.push_back(
- A->getOption().getID() == options::OPT_fdata_sections ? "/Gw" : "/Gw-");
- if (Args.hasArg(options::OPT_fsyntax_only))
- CmdArgs.push_back("/Zs");
- if (Args.hasArg(options::OPT_g_Flag, options::OPT_gline_tables_only,
- options::OPT__SLASH_Z7))
- CmdArgs.push_back("/Z7");
-
- std::vector<std::string> Includes =
- Args.getAllArgValues(options::OPT_include);
- for (const auto &Include : Includes)
- CmdArgs.push_back(Args.MakeArgString(std::string("/FI") + Include));
-
- // Flags that can simply be passed through.
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LD);
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_LDd);
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_GX);
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_GX_);
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_EH);
- Args.AddAllArgs(CmdArgs, options::OPT__SLASH_Zl);
-
- // The order of these flags is relevant, so pick the last one.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_MD, options::OPT__SLASH_MDd,
- options::OPT__SLASH_MT, options::OPT__SLASH_MTd))
- A->render(Args, CmdArgs);
-
- // Use MSVC's default threadsafe statics behaviour unless there was a flag.
- if (Arg *A = Args.getLastArg(options::OPT_fthreadsafe_statics,
- options::OPT_fno_threadsafe_statics)) {
- CmdArgs.push_back(A->getOption().getID() == options::OPT_fthreadsafe_statics
- ? "/Zc:threadSafeInit"
- : "/Zc:threadSafeInit-");
- }
-
- // Control Flow Guard checks
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
- StringRef GuardArgs = A->getValue();
- if (GuardArgs.equals_lower("cf") || GuardArgs.equals_lower("cf,nochecks")) {
- // MSVC doesn't yet support the "nochecks" modifier.
- CmdArgs.push_back("/guard:cf");
- } else if (GuardArgs.equals_lower("cf-")) {
- CmdArgs.push_back("/guard:cf-");
- }
- }
-
- // Pass through all unknown arguments so that the fallback command can see
- // them too.
- Args.AddAllArgs(CmdArgs, options::OPT_UNKNOWN);
-
- // Input filename.
- assert(Inputs.size() == 1);
- const InputInfo &II = Inputs[0];
- assert(II.getType() == types::TY_C || II.getType() == types::TY_CXX);
- CmdArgs.push_back(II.getType() == types::TY_C ? "/Tc" : "/Tp");
- if (II.isFilename())
- CmdArgs.push_back(II.getFilename());
- else
- II.getInputArg().renderAsInput(Args, CmdArgs);
-
- // Output filename.
- assert(Output.getType() == types::TY_Object);
- const char *Fo =
- Args.MakeArgString(std::string("/Fo") + Output.getFilename());
- CmdArgs.push_back(Fo);
-
- std::string Exec = FindVisualStudioExecutable(getToolChain(), "cl.exe");
- return std::make_unique<Command>(
- JA, *this, ResponseFileSupport::AtFileUTF16(), Args.MakeArgString(Exec),
- CmdArgs, Inputs, Output);
-}
-
MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
: ToolChain(D, Triple, Args), CudaInstallation(D, Triple, Args),
@@ -766,9 +692,9 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
// use. Check the environment next, in case we're being invoked from a VS
// command prompt. Failing that, just try to find the newest Visual Studio
// version we can and use its default VC toolchain.
- findVCToolChainViaCommandLine(Args, VCToolChainPath, VSLayout) ||
- findVCToolChainViaEnvironment(VCToolChainPath, VSLayout) ||
- findVCToolChainViaSetupConfig(VCToolChainPath, VSLayout) ||
+ findVCToolChainViaCommandLine(getVFS(), Args, VCToolChainPath, VSLayout) ||
+ findVCToolChainViaEnvironment(getVFS(), VCToolChainPath, VSLayout) ||
+ findVCToolChainViaSetupConfig(getVFS(), VCToolChainPath, VSLayout) ||
findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
}
@@ -1064,36 +990,67 @@ static bool getSystemRegistryString(const char *keyPath, const char *valueName,
// vcvarsqueryregistry.bat from Visual Studio 2015 sorts entries in the include
// directory by name and uses the last one of the list.
// So we compare entry names lexicographically to find the greatest one.
-static bool getWindows10SDKVersionFromPath(const std::string &SDKPath,
+static bool getWindows10SDKVersionFromPath(llvm::vfs::FileSystem &VFS,
+ const std::string &SDKPath,
std::string &SDKVersion) {
- SDKVersion.clear();
-
- std::error_code EC;
llvm::SmallString<128> IncludePath(SDKPath);
llvm::sys::path::append(IncludePath, "Include");
- for (llvm::sys::fs::directory_iterator DirIt(IncludePath, EC), DirEnd;
- DirIt != DirEnd && !EC; DirIt.increment(EC)) {
- if (!llvm::sys::fs::is_directory(DirIt->path()))
- continue;
- StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
- // If WDK is installed, there could be subfolders like "wdf" in the
- // "Include" directory.
- // Allow only directories which names start with "10.".
- if (!CandidateName.startswith("10."))
- continue;
- if (CandidateName > SDKVersion)
- SDKVersion = std::string(CandidateName);
- }
-
+ SDKVersion = getHighestNumericTupleInDirectory(VFS, IncludePath);
return !SDKVersion.empty();
}
+static bool getWindowsSDKDirViaCommandLine(llvm::vfs::FileSystem &VFS,
+ const ArgList &Args,
+ std::string &Path, int &Major,
+ std::string &Version) {
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkdir,
+ options::OPT__SLASH_winsysroot)) {
+ // Don't validate the input; trust the value supplied by the user.
+ // The motivation is to prevent unnecessary file and registry access.
+ llvm::VersionTuple SDKVersion;
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkversion))
+ SDKVersion.tryParse(A->getValue());
+
+ if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
+ llvm::SmallString<128> SDKPath(A->getValue());
+ llvm::sys::path::append(SDKPath, "Windows Kits");
+ if (!SDKVersion.empty())
+ llvm::sys::path::append(SDKPath, Twine(SDKVersion.getMajor()));
+ else
+ llvm::sys::path::append(
+ SDKPath, getHighestNumericTupleInDirectory(VFS, SDKPath));
+ Path = std::string(SDKPath.str());
+ } else {
+ Path = A->getValue();
+ }
+
+ if (!SDKVersion.empty()) {
+ Major = SDKVersion.getMajor();
+ Version = SDKVersion.getAsString();
+ } else if (getWindows10SDKVersionFromPath(VFS, Path, Version)) {
+ Major = 10;
+ }
+ return true;
+ }
+ return false;
+}
+
/// Get Windows SDK installation directory.
-static bool getWindowsSDKDir(std::string &Path, int &Major,
+static bool getWindowsSDKDir(llvm::vfs::FileSystem &VFS, const ArgList &Args,
+ std::string &Path, int &Major,
std::string &WindowsSDKIncludeVersion,
std::string &WindowsSDKLibVersion) {
- std::string RegistrySDKVersion;
+ // Trust /winsdkdir and /winsdkversion if present.
+ if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major,
+ WindowsSDKIncludeVersion)) {
+ WindowsSDKLibVersion = WindowsSDKIncludeVersion;
+ return true;
+ }
+
+ // FIXME: Try env vars (%WindowsSdkDir%, %UCRTVersion%) before going to registry.
+
// Try the Windows registry.
+ std::string RegistrySDKVersion;
if (!getSystemRegistryString(
"SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
"InstallationFolder", Path, &RegistrySDKVersion))
@@ -1115,7 +1072,7 @@ static bool getWindowsSDKDir(std::string &Path, int &Major,
for (const char *Test : Tests) {
llvm::SmallString<128> TestPath(Path);
llvm::sys::path::append(TestPath, "Lib", Test);
- if (llvm::sys::fs::exists(TestPath.c_str())) {
+ if (VFS.exists(TestPath)) {
WindowsSDKLibVersion = Test;
break;
}
@@ -1123,7 +1080,7 @@ static bool getWindowsSDKDir(std::string &Path, int &Major,
return !WindowsSDKLibVersion.empty();
}
if (Major == 10) {
- if (!getWindows10SDKVersionFromPath(Path, WindowsSDKIncludeVersion))
+ if (!getWindows10SDKVersionFromPath(VFS, Path, WindowsSDKIncludeVersion))
return false;
WindowsSDKLibVersion = WindowsSDKIncludeVersion;
return true;
@@ -1133,15 +1090,16 @@ static bool getWindowsSDKDir(std::string &Path, int &Major,
}
// Gets the library path required to link against the Windows SDK.
-bool MSVCToolChain::getWindowsSDKLibraryPath(std::string &path) const {
+bool MSVCToolChain::getWindowsSDKLibraryPath(
+ const ArgList &Args, std::string &path) const {
std::string sdkPath;
int sdkMajor = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
path.clear();
- if (!getWindowsSDKDir(sdkPath, sdkMajor, windowsSDKIncludeVersion,
- windowsSDKLibVersion))
+ if (!getWindowsSDKDir(getVFS(), Args, sdkPath, sdkMajor,
+ windowsSDKIncludeVersion, windowsSDKLibVersion))
return false;
llvm::SmallString<128> libPath(sdkPath);
@@ -1175,10 +1133,21 @@ bool MSVCToolChain::useUniversalCRT() const {
llvm::SmallString<128> TestPath(
getSubDirectoryPath(SubDirectoryType::Include));
llvm::sys::path::append(TestPath, "stdlib.h");
- return !llvm::sys::fs::exists(TestPath);
+ return !getVFS().exists(TestPath);
}
-static bool getUniversalCRTSdkDir(std::string &Path, std::string &UCRTVersion) {
+static bool getUniversalCRTSdkDir(llvm::vfs::FileSystem &VFS,
+ const ArgList &Args, std::string &Path,
+ std::string &UCRTVersion) {
+ // If /winsdkdir is passed, use it as location for the UCRT too.
+ // FIXME: Should there be a dedicated /ucrtdir to override /winsdkdir?
+ int Major;
+ if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major, UCRTVersion))
+ return true;
+
+ // FIXME: Try env vars (%UniversalCRTSdkDir%, %UCRTVersion%) before going to
+ // registry.
+
// vcvarsqueryregistry.bat for Visual Studio 2015 queries the registry
// for the specific key "KitsRoot10". So do we.
if (!getSystemRegistryString(
@@ -1186,15 +1155,16 @@ static bool getUniversalCRTSdkDir(std::string &Path, std::string &UCRTVersion) {
Path, nullptr))
return false;
- return getWindows10SDKVersionFromPath(Path, UCRTVersion);
+ return getWindows10SDKVersionFromPath(VFS, Path, UCRTVersion);
}
-bool MSVCToolChain::getUniversalCRTLibraryPath(std::string &Path) const {
+bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
+ std::string &Path) const {
std::string UniversalCRTSdkPath;
std::string UCRTVersion;
Path.clear();
- if (!getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion))
+ if (!getUniversalCRTSdkDir(getVFS(), Args, UniversalCRTSdkPath, UCRTVersion))
return false;
StringRef ArchName = llvmArchToWindowsSDKArch(getArch());
@@ -1275,22 +1245,35 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
for (const auto &Path : DriverArgs.getAllArgValues(options::OPT__SLASH_imsvc))
addSystemInclude(DriverArgs, CC1Args, Path);
+ auto AddSystemIncludesFromEnv = [&](StringRef Var) -> bool {
+ if (auto Val = llvm::sys::Process::GetEnv(Var)) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(*Val).split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ if (!Dirs.empty()) {
+ addSystemIncludes(DriverArgs, CC1Args, Dirs);
+ return true;
+ }
+ }
+ return false;
+ };
+
+ // Add %INCLUDE%-like dirs via /external:env: flags.
+ for (const auto &Var :
+ DriverArgs.getAllArgValues(options::OPT__SLASH_external_env)) {
+ AddSystemIncludesFromEnv(Var);
+ }
+
if (DriverArgs.hasArg(options::OPT_nostdlibinc))
return;
- // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
- // Skip if the user expressly set a vctoolsdir
- if (!DriverArgs.getLastArg(options::OPT__SLASH_vctoolsdir)) {
- if (llvm::Optional<std::string> cl_include_dir =
- llvm::sys::Process::GetEnv("INCLUDE")) {
- SmallVector<StringRef, 8> Dirs;
- StringRef(*cl_include_dir)
- .split(Dirs, ";", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
- for (StringRef Dir : Dirs)
- addSystemInclude(DriverArgs, CC1Args, Dir);
- if (!Dirs.empty())
- return;
- }
+ // Honor %INCLUDE% and %EXTERNAL_INCLUDE%. It should have essential search
+ // paths set by vcvarsall.bat. Skip if the user expressly set a vctoolsdir.
+ if (!DriverArgs.getLastArg(options::OPT__SLASH_vctoolsdir,
+ options::OPT__SLASH_winsysroot)) {
+ bool Found = AddSystemIncludesFromEnv("INCLUDE");
+ Found |= AddSystemIncludesFromEnv("EXTERNAL_INCLUDE");
+ if (Found)
+ return;
}
// When built with access to the proper Windows APIs, try to actually find
@@ -1304,33 +1287,34 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (useUniversalCRT()) {
std::string UniversalCRTSdkPath;
std::string UCRTVersion;
- if (getUniversalCRTSdkDir(UniversalCRTSdkPath, UCRTVersion)) {
+ if (getUniversalCRTSdkDir(getVFS(), DriverArgs, UniversalCRTSdkPath,
+ UCRTVersion)) {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
"Include", UCRTVersion, "ucrt");
}
}
std::string WindowsSDKDir;
- int major;
+ int major = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
- if (getWindowsSDKDir(WindowsSDKDir, major, windowsSDKIncludeVersion,
- windowsSDKLibVersion)) {
+ if (getWindowsSDKDir(getVFS(), DriverArgs, WindowsSDKDir, major,
+ windowsSDKIncludeVersion, windowsSDKLibVersion)) {
if (major >= 8) {
// Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
// Anyway, llvm::sys::path::append is able to manage it.
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include", windowsSDKIncludeVersion,
+ "Include", windowsSDKIncludeVersion,
"shared");
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include", windowsSDKIncludeVersion,
+ "Include", windowsSDKIncludeVersion,
"um");
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include", windowsSDKIncludeVersion,
+ "Include", windowsSDKIncludeVersion,
"winrt");
} else {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
- "include");
+ "Include");
}
}
@@ -1367,8 +1351,8 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
if (MSVT.empty() &&
Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC)) {
- // -fms-compatibility-version=19.11 is default, aka 2017, 15.3
- MSVT = VersionTuple(19, 11);
+ // -fms-compatibility-version=19.14 is default, aka 2017, 15.7
+ MSVT = VersionTuple(19, 14);
}
return MSVT;
}
@@ -1519,6 +1503,18 @@ static void TranslateDArg(Arg *A, llvm::opt::DerivedArgList &DAL,
DAL.AddJoinedArg(A, Opts.getOption(options::OPT_D), NewVal);
}
+static void TranslatePermissive(Arg *A, llvm::opt::DerivedArgList &DAL,
+ const OptTable &Opts) {
+ DAL.AddFlagArg(A, Opts.getOption(options::OPT__SLASH_Zc_twoPhase_));
+ DAL.AddFlagArg(A, Opts.getOption(options::OPT_fno_operator_names));
+}
+
+static void TranslatePermissiveMinus(Arg *A, llvm::opt::DerivedArgList &DAL,
+ const OptTable &Opts) {
+ DAL.AddFlagArg(A, Opts.getOption(options::OPT__SLASH_Zc_twoPhase));
+ DAL.AddFlagArg(A, Opts.getOption(options::OPT_foperator_names));
+}
+
llvm::opt::DerivedArgList *
MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
StringRef BoundArch,
@@ -1561,6 +1557,12 @@ MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
} else if (A->getOption().matches(options::OPT_D)) {
// Translate -Dfoo#bar into -Dfoo=bar.
TranslateDArg(A, *DAL, Opts);
+ } else if (A->getOption().matches(options::OPT__SLASH_permissive)) {
+ // Expand /permissive
+ TranslatePermissive(A, *DAL, Opts);
+ } else if (A->getOption().matches(options::OPT__SLASH_permissive_)) {
+ // Expand /permissive-
+ TranslatePermissiveMinus(A, *DAL, Opts);
} else if (OFK != Action::OFK_HIP) {
// HIP Toolchain translates input args by itself.
DAL->append(A);
@@ -1569,3 +1571,13 @@ MSVCToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
return DAL;
}
+
+void MSVCToolChain::addClangTargetOptions(
+ const ArgList &DriverArgs, ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const {
+ // MSVC STL kindly allows removing all usages of typeid by defining
+ // _HAS_STATIC_RTTI to 0. Do so, when compiling with -fno-rtti
+ if (DriverArgs.hasArg(options::OPT_fno_rtti, options::OPT_frtti,
+ /*Default=*/false))
+ CC1Args.push_back("-D_HAS_STATIC_RTTI=0");
+}
diff --git a/clang/lib/Driver/ToolChains/MSVC.h b/clang/lib/Driver/ToolChains/MSVC.h
index dba99ed77246..19d94c5c606e 100644
--- a/clang/lib/Driver/ToolChains/MSVC.h
+++ b/clang/lib/Driver/ToolChains/MSVC.h
@@ -34,27 +34,6 @@ public:
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-
-class LLVM_LIBRARY_VISIBILITY Compiler : public Tool {
-public:
- Compiler(const ToolChain &TC)
- : Tool("visualstudio::Compiler", "compiler", TC) {}
-
- bool hasIntegratedAssembler() const override { return true; }
- bool hasIntegratedCPP() const override { return true; }
- bool isLinkJob() const override { return false; }
-
- void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output, const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const override;
-
- std::unique_ptr<Command> GetCommand(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const llvm::opt::ArgList &TCArgs,
- const char *LinkingOutput) const;
-};
} // end namespace visualstudio
} // end namespace tools
@@ -126,9 +105,10 @@ public:
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- bool getWindowsSDKLibraryPath(std::string &path) const;
- /// Check if Universal CRT should be used if available
- bool getUniversalCRTLibraryPath(std::string &path) const;
+ bool getWindowsSDKLibraryPath(
+ const llvm::opt::ArgList &Args, std::string &path) const;
+ bool getUniversalCRTLibraryPath(const llvm::opt::ArgList &Args,
+ std::string &path) const;
bool useUniversalCRT() const;
VersionTuple
computeMSVCVersion(const Driver *D,
@@ -142,6 +122,11 @@ public:
bool FoundMSVCInstall() const { return !VCToolChainPath.empty(); }
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
+
protected:
void AddSystemIncludeWithSubfolder(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
diff --git a/clang/lib/Driver/ToolChains/MinGW.cpp b/clang/lib/Driver/ToolChains/MinGW.cpp
index f6cead412236..20efbdc237a8 100644
--- a/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -7,12 +7,12 @@
//===----------------------------------------------------------------------===//
#include "MinGW.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
@@ -338,6 +338,7 @@ static bool findGccVersion(StringRef LibDir, std::string &GccLibDir,
continue;
if (CandidateVersion <= Version)
continue;
+ Version = CandidateVersion;
Ver = std::string(VersionText);
GccLibDir = LI->path();
}
@@ -426,7 +427,7 @@ toolchains::MinGW::MinGW(const Driver &D, const llvm::Triple &Triple,
NativeLLVMSupport =
Args.getLastArgValue(options::OPT_fuse_ld_EQ, CLANG_DEFAULT_LINKER)
- .equals_lower("lld");
+ .equals_insensitive("lld");
}
bool toolchains::MinGW::IsIntegratedAssemblerDefault() const { return true; }
diff --git a/clang/lib/Driver/ToolChains/Minix.cpp b/clang/lib/Driver/ToolChains/Minix.cpp
index 44479a24ebe7..5bceb9aba3e9 100644
--- a/clang/lib/Driver/ToolChains/Minix.cpp
+++ b/clang/lib/Driver/ToolChains/Minix.cpp
@@ -8,9 +8,9 @@
#include "Minix.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/VirtualFileSystem.h"
diff --git a/clang/lib/Driver/ToolChains/Myriad.cpp b/clang/lib/Driver/ToolChains/Myriad.cpp
index ab0df5d8f168..f31466633104 100644
--- a/clang/lib/Driver/ToolChains/Myriad.cpp
+++ b/clang/lib/Driver/ToolChains/Myriad.cpp
@@ -260,7 +260,7 @@ void MyriadToolChain::addLibStdCxxIncludePaths(
const Multilib &Multilib = GCCInstallation.getMultilib();
addLibStdCXXIncludePaths(
LibDir.str() + "/../" + TripleStr.str() + "/include/c++/" + Version.Text,
- "", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
+ TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args);
}
// MyriadToolChain handles several triples:
diff --git a/clang/lib/Driver/ToolChains/NaCl.cpp b/clang/lib/Driver/ToolChains/NaCl.cpp
index 8a150c394753..753459cb230b 100644
--- a/clang/lib/Driver/ToolChains/NaCl.cpp
+++ b/clang/lib/Driver/ToolChains/NaCl.cpp
@@ -7,11 +7,11 @@
//===----------------------------------------------------------------------===//
#include "NaCl.h"
-#include "InputInfo.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
diff --git a/clang/lib/Driver/ToolChains/NetBSD.cpp b/clang/lib/Driver/ToolChains/NetBSD.cpp
index 48bf061c6650..1ce5a2a203c2 100644
--- a/clang/lib/Driver/ToolChains/NetBSD.cpp
+++ b/clang/lib/Driver/ToolChains/NetBSD.cpp
@@ -452,8 +452,8 @@ void NetBSD::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
void NetBSD::addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const {
- addLibStdCXXIncludePaths(getDriver().SysRoot, "/usr/include/g++", "", "", "",
- "", DriverArgs, CC1Args);
+ addLibStdCXXIncludePaths(getDriver().SysRoot + "/usr/include/g++", "", "",
+ DriverArgs, CC1Args);
}
llvm::ExceptionHandling NetBSD::GetExceptionModel(const ArgList &Args) const {
diff --git a/clang/lib/Driver/ToolChains/OpenBSD.cpp b/clang/lib/Driver/ToolChains/OpenBSD.cpp
index f155d74632f9..e162165b2561 100644
--- a/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -296,6 +296,7 @@ void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back(Profiling ? "-lc++_p" : "-lc++");
CmdArgs.push_back(Profiling ? "-lc++abi_p" : "-lc++abi");
+ CmdArgs.push_back(Profiling ? "-lpthread_p" : "-lpthread");
}
std::string OpenBSD::getCompilerRT(const ArgList &Args,
diff --git a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 0dc12c7a84b5..714325a2db39 100644
--- a/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -8,8 +8,8 @@
#include "RISCVToolchain.h"
#include "CommonArgs.h"
-#include "InputInfo.h"
#include "clang/Driver/Compilation.h"
+#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
@@ -112,7 +112,8 @@ void RISCVToolChain::addLibStdCxxIncludePaths(
StringRef TripleStr = GCCInstallation.getTriple().str();
const Multilib &Multilib = GCCInstallation.getMultilib();
addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text,
- "", TripleStr, "", "", Multilib.includeSuffix(), DriverArgs, CC1Args);
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args);
}
std::string RISCVToolChain::computeSysRoot() const {
@@ -180,14 +181,15 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
}
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_u);
ToolChain.AddFilePathLibArgs(Args, CmdArgs);
Args.AddAllArgs(CmdArgs,
{options::OPT_T_Group, options::OPT_e, options::OPT_s,
options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
-
// TODO: add C++ includes and libs if compiling C++.
if (!Args.hasArg(options::OPT_nostdlib) &&
diff --git a/clang/lib/Driver/ToolChains/ROCm.h b/clang/lib/Driver/ToolChains/ROCm.h
index 21e62a465d7b..bb482be68260 100644
--- a/clang/lib/Driver/ToolChains/ROCm.h
+++ b/clang/lib/Driver/ToolChains/ROCm.h
@@ -42,9 +42,16 @@ private:
struct Candidate {
llvm::SmallString<0> Path;
bool StrictChecking;
-
- Candidate(std::string Path, bool StrictChecking = false)
- : Path(Path), StrictChecking(StrictChecking) {}
+ // Release string for ROCm packages built with SPACK if not empty. The
+ // installation directories of ROCm packages built with SPACK follow the
+ // convention <package_name>-<rocm_release_string>-<hash>.
+ std::string SPACKReleaseStr;
+
+ bool isSPACK() const { return !SPACKReleaseStr.empty(); }
+ Candidate(std::string Path, bool StrictChecking = false,
+ StringRef SPACKReleaseStr = {})
+ : Path(Path), StrictChecking(StrictChecking),
+ SPACKReleaseStr(SPACKReleaseStr.str()) {}
};
const Driver &D;
@@ -67,6 +74,8 @@ private:
StringRef RocmPathArg;
// ROCm device library paths specified by --rocm-device-lib-path.
std::vector<std::string> RocmDeviceLibPathArg;
+ // HIP runtime path specified by --hip-path.
+ StringRef HIPPathArg;
// HIP version specified by --hip-version.
StringRef HIPVersionArg;
// Wheter -nogpulib is specified.
@@ -88,6 +97,9 @@ private:
SmallString<0> OpenCL;
SmallString<0> HIP;
+ // Asan runtime library
+ SmallString<0> AsanRTL;
+
// Libraries swapped based on compile flags.
ConditionalLibrary WavefrontSize64;
ConditionalLibrary FiniteOnly;
@@ -95,6 +107,11 @@ private:
ConditionalLibrary DenormalsAreZero;
ConditionalLibrary CorrectlyRoundedSqrt;
+ // Cache ROCm installation search paths.
+ SmallVector<Candidate, 4> ROCmSearchDirs;
+ bool PrintROCmSearchDirs;
+ bool Verbose;
+
bool allGenericLibsValid() const {
return !OCML.empty() && !OCKL.empty() && !OpenCL.empty() && !HIP.empty() &&
WavefrontSize64.isValid() && FiniteOnly.isValid() &&
@@ -104,7 +121,14 @@ private:
void scanLibDevicePath(llvm::StringRef Path);
bool parseHIPVersionFile(llvm::StringRef V);
- SmallVector<Candidate, 4> getInstallationPathCandidates();
+ const SmallVectorImpl<Candidate> &getInstallationPathCandidates();
+
+ /// Find the path to a SPACK package under the ROCm candidate installation
+ /// directory if the candidate is a SPACK ROCm candidate. \returns empty
+ /// string if the candidate is not SPACK ROCm candidate or the requested
+ /// package is not found.
+ llvm::SmallString<0> findSPACKPackage(const Candidate &Cand,
+ StringRef PackageName);
public:
RocmInstallationDetector(const Driver &D, const llvm::Triple &HostTriple,
@@ -112,12 +136,13 @@ public:
bool DetectHIPRuntime = true,
bool DetectDeviceLib = false);
- /// Add arguments needed to link default bitcode libraries.
- void addCommonBitcodeLibCC1Args(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- StringRef LibDeviceFile, bool Wave64,
- bool DAZ, bool FiniteOnly, bool UnsafeMathOpt,
- bool FastRelaxedMath, bool CorrectSqrt) const;
+ /// Get file paths of default bitcode libraries common to AMDGPU based
+ /// toolchains.
+ llvm::SmallVector<std::string, 12>
+ getCommonBitcodeLibs(const llvm::opt::ArgList &DriverArgs,
+ StringRef LibDeviceFile, bool Wave64, bool DAZ,
+ bool FiniteOnly, bool UnsafeMathOpt,
+ bool FastRelaxedMath, bool CorrectSqrt) const;
/// Check whether we detected a valid HIP runtime.
bool hasHIPRuntime() const { return HasHIPRuntime; }
@@ -166,6 +191,9 @@ public:
return HIP;
}
+ /// Returns empty string of Asan runtime library is not available.
+ StringRef getAsanRTLPath() const { return AsanRTL; }
+
StringRef getWavefrontSize64Path(bool Enabled) const {
return WavefrontSize64.get(Enabled);
}
diff --git a/clang/lib/Driver/ToolChains/Solaris.cpp b/clang/lib/Driver/ToolChains/Solaris.cpp
index 4ed4d839ad10..4d1af094f481 100644
--- a/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -283,9 +283,7 @@ void Solaris::addLibStdCxxIncludePaths(
const GCCVersion &Version = GCCInstallation.getVersion();
// The primary search for libstdc++ supports multiarch variants.
- addLibStdCXXIncludePaths(LibDir.str() + "/../include", "/c++/" + Version.Text,
- TripleStr,
- /*GCCMultiarchTriple*/ "",
- /*TargetMultiarchTriple*/ "",
- Multilib.includeSuffix(), DriverArgs, CC1Args);
+ addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args);
}
diff --git a/clang/lib/Driver/ToolChains/WebAssembly.cpp b/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 6b654886e774..19f3571e6b38 100644
--- a/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -26,9 +26,9 @@ using namespace llvm::opt;
/// Following the conventions in https://wiki.debian.org/Multiarch/Tuples,
/// we remove the vendor field to form the multiarch triple.
-static std::string getMultiarchTriple(const Driver &D,
- const llvm::Triple &TargetTriple,
- StringRef SysRoot) {
+std::string WebAssembly::getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const {
return (TargetTriple.getArchName() + "-" +
TargetTriple.getOSAndEnvironmentName()).str();
}
@@ -77,6 +77,16 @@ void wasm::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Crt1 = "crt1.o";
const char *Entry = NULL;
+
+ // If crt1-command.o exists, it supports new-style commands, so use it.
+ // Otherwise, use the old crt1.o. This is a temporary transition measure.
+ // Once WASI libc no longer needs to support LLVM versions which lack
+ // support for new-style command, it can make crt1.o the same as
+ // crt1-command.o. And once LLVM no longer needs to support WASI libc
+ // versions before that, it can switch to using crt1-command.o.
+ if (ToolChain.GetFilePath("crt1-command.o") != "crt1-command.o")
+ Crt1 = "crt1-command.o";
+
if (const Arg *A = Args.getLastArg(options::OPT_mexec_model_EQ)) {
StringRef CM = A->getValue();
if (CM == "command") {
@@ -272,12 +282,6 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
getDriver().Diag(diag::err_drv_argument_not_allowed_with)
<< "-fwasm-exceptions"
<< "-mno-exception-handling";
- // '-fwasm-exceptions' is not compatible with '-mno-reference-types'
- if (DriverArgs.hasFlag(options::OPT_mno_reference_types,
- options::OPT_mexception_handing, false))
- getDriver().Diag(diag::err_drv_argument_not_allowed_with)
- << "-fwasm-exceptions"
- << "-mno-reference-types";
// '-fwasm-exceptions' is not compatible with
// '-mllvm -enable-emscripten-cxx-exceptions'
for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
@@ -286,11 +290,39 @@ void WebAssembly::addClangTargetOptions(const ArgList &DriverArgs,
<< "-fwasm-exceptions"
<< "-mllvm -enable-emscripten-cxx-exceptions";
}
- // '-fwasm-exceptions' implies exception-handling and reference-types
+ // '-fwasm-exceptions' implies exception-handling feature
CC1Args.push_back("-target-feature");
CC1Args.push_back("+exception-handling");
- CC1Args.push_back("-target-feature");
- CC1Args.push_back("+reference-types");
+ }
+
+ for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
+ StringRef Opt = A->getValue(0);
+ if (Opt.startswith("-emscripten-cxx-exceptions-allowed")) {
+ // '-mllvm -emscripten-cxx-exceptions-allowed' should be used with
+ // '-mllvm -enable-emscripten-cxx-exceptions'
+ bool EmExceptionArgExists = false;
+ for (const Arg *A : DriverArgs.filtered(options::OPT_mllvm)) {
+ if (StringRef(A->getValue(0)) == "-enable-emscripten-cxx-exceptions") {
+ EmExceptionArgExists = true;
+ break;
+ }
+ }
+ if (!EmExceptionArgExists)
+ getDriver().Diag(diag::err_drv_argument_only_allowed_with)
+ << "-mllvm -emscripten-cxx-exceptions-allowed"
+ << "-mllvm -enable-emscripten-cxx-exceptions";
+
+ // Prevent functions specified in -emscripten-cxx-exceptions-allowed list
+ // from being inlined before reaching the wasm backend.
+ StringRef FuncNamesStr = Opt.split('=').second;
+ SmallVector<StringRef, 4> FuncNames;
+ FuncNamesStr.split(FuncNames, ',');
+ for (auto Name : FuncNames) {
+ CC1Args.push_back("-mllvm");
+ CC1Args.push_back(DriverArgs.MakeArgString("--force-attribute=" + Name +
+ ":noinline"));
+ }
+ }
}
}
diff --git a/clang/lib/Driver/ToolChains/WebAssembly.h b/clang/lib/Driver/ToolChains/WebAssembly.h
index 616bfb5d3d0c..8a3f82d9efdf 100644
--- a/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -70,6 +70,10 @@ private:
const char *getDefaultLinker() const override { return "wasm-ld"; }
Tool *buildLinker() const override;
+
+ std::string getMultiarchTriple(const Driver &D,
+ const llvm::Triple &TargetTriple,
+ StringRef SysRoot) const override;
};
} // end namespace toolchains
diff --git a/clang/lib/Driver/Types.cpp b/clang/lib/Driver/Types.cpp
index e898334c3227..3cb2d6e8f6fd 100644
--- a/clang/lib/Driver/Types.cpp
+++ b/clang/lib/Driver/Types.cpp
@@ -126,7 +126,7 @@ bool types::isAcceptedByClang(ID Id) {
case TY_Asm:
case TY_C: case TY_PP_C:
- case TY_CL:
+ case TY_CL: case TY_CLCXX:
case TY_CUDA: case TY_PP_CUDA:
case TY_CUDA_DEVICE:
case TY_HIP:
@@ -147,6 +147,45 @@ bool types::isAcceptedByClang(ID Id) {
}
}
+bool types::isDerivedFromC(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_PP_C:
+ case TY_C:
+ case TY_CL:
+ case TY_CLCXX:
+ case TY_PP_CUDA:
+ case TY_CUDA:
+ case TY_CUDA_DEVICE:
+ case TY_PP_HIP:
+ case TY_HIP:
+ case TY_HIP_DEVICE:
+ case TY_PP_ObjC:
+ case TY_PP_ObjC_Alias:
+ case TY_ObjC:
+ case TY_PP_CXX:
+ case TY_CXX:
+ case TY_PP_ObjCXX:
+ case TY_PP_ObjCXX_Alias:
+ case TY_ObjCXX:
+ case TY_RenderScript:
+ case TY_PP_CHeader:
+ case TY_CHeader:
+ case TY_CLHeader:
+ case TY_PP_ObjCHeader:
+ case TY_ObjCHeader:
+ case TY_PP_CXXHeader:
+ case TY_CXXHeader:
+ case TY_PP_ObjCXXHeader:
+ case TY_ObjCXXHeader:
+ case TY_CXXModule:
+ case TY_PP_CXXModule:
+ return true;
+ }
+}
+
bool types::isObjC(ID Id) {
switch (Id) {
default:
@@ -160,6 +199,8 @@ bool types::isObjC(ID Id) {
}
}
+bool types::isOpenCL(ID Id) { return Id == TY_CL || Id == TY_CLCXX; }
+
bool types::isCXX(ID Id) {
switch (Id) {
default:
@@ -247,6 +288,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("cc", TY_CXX)
.Case("CC", TY_CXX)
.Case("cl", TY_CL)
+ .Case("clcpp", TY_CLCXX)
.Case("cp", TY_CXX)
.Case("cu", TY_CUDA)
.Case("hh", TY_CXXHeader)
@@ -325,12 +367,10 @@ types::getCompilationPhases(const clang::driver::Driver &Driver,
// Filter to compiler mode. When the compiler is run as a preprocessor then
// compilation is not an option.
// -S runs the compiler in Assembly listing mode.
- // -test-io is used by Flang to run InputOutputTest action
if (Driver.CCCIsCPP() || DAL.getLastArg(options::OPT_E) ||
DAL.getLastArg(options::OPT__SLASH_EP) ||
DAL.getLastArg(options::OPT_M, options::OPT_MM) ||
- DAL.getLastArg(options::OPT__SLASH_P) ||
- DAL.getLastArg(options::OPT_test_io))
+ DAL.getLastArg(options::OPT__SLASH_P))
LastPhase = phases::Preprocess;
// --precompile only runs up to precompilation.
@@ -396,6 +436,7 @@ ID types::lookupHeaderTypeForSourceType(ID Id) {
case types::TY_ObjCXX:
return types::TY_ObjCXXHeader;
case types::TY_CL:
+ case types::TY_CLCXX:
return types::TY_CLHeader;
}
}
diff --git a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index 7565626cba99..a3d388a5ae44 100644
--- a/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -1080,6 +1080,7 @@ static bool rewriteToNumericBoxedExpression(const ObjCMessageExpr *Msg,
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLOpaqueType:
case CK_IntToOCLSampler:
+ case CK_MatrixCast:
return false;
case CK_BooleanToSignedIntegral:
diff --git a/clang/lib/Format/BreakableToken.cpp b/clang/lib/Format/BreakableToken.cpp
index f179ac64de17..455904895848 100644
--- a/clang/lib/Format/BreakableToken.cpp
+++ b/clang/lib/Format/BreakableToken.cpp
@@ -757,6 +757,9 @@ BreakableLineCommentSection::BreakableLineCommentSection(
assert(Tok.is(TT_LineComment) &&
"line comment section must start with a line comment");
FormatToken *LineTok = nullptr;
+ // How many spaces we changed in the first line of the section, this will be
+ // applied in all following lines
+ int FirstLineSpaceChange = 0;
for (const FormatToken *CurrentTok = &Tok;
CurrentTok && CurrentTok->is(TT_LineComment);
CurrentTok = CurrentTok->Next) {
@@ -768,44 +771,72 @@ BreakableLineCommentSection::BreakableLineCommentSection(
TokenText.split(Lines, "\n");
Content.resize(Lines.size());
ContentColumn.resize(Lines.size());
- OriginalContentColumn.resize(Lines.size());
+ PrefixSpaceChange.resize(Lines.size());
Tokens.resize(Lines.size());
Prefix.resize(Lines.size());
OriginalPrefix.resize(Lines.size());
for (size_t i = FirstLineIndex, e = Lines.size(); i < e; ++i) {
Lines[i] = Lines[i].ltrim(Blanks);
StringRef IndentPrefix = getLineCommentIndentPrefix(Lines[i], Style);
- assert((TokenText.startswith("//") || TokenText.startswith("#")) &&
- "unsupported line comment prefix, '//' and '#' are supported");
- OriginalPrefix[i] = Prefix[i] = IndentPrefix;
- if (Lines[i].size() > Prefix[i].size() &&
- isAlphanumeric(Lines[i][Prefix[i].size()])) {
- if (Prefix[i] == "//")
- Prefix[i] = "// ";
- else if (Prefix[i] == "///")
- Prefix[i] = "/// ";
- else if (Prefix[i] == "//!")
- Prefix[i] = "//! ";
- else if (Prefix[i] == "///<")
- Prefix[i] = "///< ";
- else if (Prefix[i] == "//!<")
- Prefix[i] = "//!< ";
- else if (Prefix[i] == "#")
- Prefix[i] = "# ";
- else if (Prefix[i] == "##")
- Prefix[i] = "## ";
- else if (Prefix[i] == "###")
- Prefix[i] = "### ";
- else if (Prefix[i] == "####")
- Prefix[i] = "#### ";
+ OriginalPrefix[i] = IndentPrefix;
+ const unsigned SpacesInPrefix =
+ std::count(IndentPrefix.begin(), IndentPrefix.end(), ' ');
+
+ // On the first line of the comment section we calculate how many spaces
+ // are to be added or removed, all lines after that just get only the
+ // change and we will not look at the maximum anymore. Additionally to the
+ // actual first line, we calculate that when the non space Prefix changes,
+ // e.g. from "///" to "//".
+ if (i == 0 || OriginalPrefix[i].rtrim(Blanks) !=
+ OriginalPrefix[i - 1].rtrim(Blanks)) {
+ if (SpacesInPrefix < Style.SpacesInLineCommentPrefix.Minimum &&
+ Lines[i].size() > IndentPrefix.size() &&
+ isAlphanumeric(Lines[i][IndentPrefix.size()])) {
+ FirstLineSpaceChange =
+ Style.SpacesInLineCommentPrefix.Minimum - SpacesInPrefix;
+ } else if (SpacesInPrefix > Style.SpacesInLineCommentPrefix.Maximum) {
+ FirstLineSpaceChange =
+ Style.SpacesInLineCommentPrefix.Maximum - SpacesInPrefix;
+ } else {
+ FirstLineSpaceChange = 0;
+ }
+ }
+
+ if (Lines[i].size() != IndentPrefix.size()) {
+ PrefixSpaceChange[i] = FirstLineSpaceChange;
+
+ if (SpacesInPrefix + PrefixSpaceChange[i] <
+ Style.SpacesInLineCommentPrefix.Minimum) {
+ PrefixSpaceChange[i] += Style.SpacesInLineCommentPrefix.Minimum -
+ (SpacesInPrefix + PrefixSpaceChange[i]);
+ }
+
+ assert(Lines[i].size() > IndentPrefix.size());
+ const auto FirstNonSpace = Lines[i][IndentPrefix.size()];
+ const auto AllowsSpaceChange =
+ SpacesInPrefix != 0 ||
+ (isAlphanumeric(FirstNonSpace) ||
+ (FirstNonSpace == '}' && FirstLineSpaceChange != 0));
+
+ if (PrefixSpaceChange[i] > 0 && AllowsSpaceChange) {
+ Prefix[i] = IndentPrefix.str();
+ Prefix[i].append(PrefixSpaceChange[i], ' ');
+ } else if (PrefixSpaceChange[i] < 0 && AllowsSpaceChange) {
+ Prefix[i] = IndentPrefix
+ .drop_back(std::min<std::size_t>(
+ -PrefixSpaceChange[i], SpacesInPrefix))
+ .str();
+ } else {
+ Prefix[i] = IndentPrefix.str();
+ }
+ } else {
+ // If the IndentPrefix is the whole line, there is no content and we
+ // drop just all space
+ Prefix[i] = IndentPrefix.drop_back(SpacesInPrefix).str();
}
Tokens[i] = LineTok;
Content[i] = Lines[i].substr(IndentPrefix.size());
- OriginalContentColumn[i] =
- StartColumn + encoding::columnWidthWithTabs(OriginalPrefix[i],
- StartColumn,
- Style.TabWidth, Encoding);
ContentColumn[i] =
StartColumn + encoding::columnWidthWithTabs(Prefix[i], StartColumn,
Style.TabWidth, Encoding);
@@ -848,10 +879,9 @@ BreakableLineCommentSection::getRangeLength(unsigned LineIndex, unsigned Offset,
Encoding);
}
-unsigned BreakableLineCommentSection::getContentStartColumn(unsigned LineIndex,
- bool Break) const {
- if (Break)
- return OriginalContentColumn[LineIndex];
+unsigned
+BreakableLineCommentSection::getContentStartColumn(unsigned LineIndex,
+ bool /*Break*/) const {
return ContentColumn[LineIndex];
}
@@ -864,16 +894,10 @@ void BreakableLineCommentSection::insertBreak(
unsigned BreakOffsetInToken =
Text.data() - tokenAt(LineIndex).TokenText.data() + Split.first;
unsigned CharsToRemove = Split.second;
- // Compute the size of the new indent, including the size of the new prefix of
- // the newly broken line.
- unsigned IndentAtLineBreak = OriginalContentColumn[LineIndex] +
- Prefix[LineIndex].size() -
- OriginalPrefix[LineIndex].size();
- assert(IndentAtLineBreak >= Prefix[LineIndex].size());
Whitespaces.replaceWhitespaceInToken(
tokenAt(LineIndex), BreakOffsetInToken, CharsToRemove, "",
Prefix[LineIndex], InPPDirective, /*Newlines=*/1,
- /*Spaces=*/IndentAtLineBreak - Prefix[LineIndex].size());
+ /*Spaces=*/ContentColumn[LineIndex] - Prefix[LineIndex].size());
}
BreakableComment::Split BreakableLineCommentSection::getReflowSplit(
@@ -971,14 +995,12 @@ void BreakableLineCommentSection::adaptStartOfLine(
}
if (OriginalPrefix[LineIndex] != Prefix[LineIndex]) {
// Adjust the prefix if necessary.
-
- // Take care of the space possibly introduced after a decoration.
- assert(Prefix[LineIndex] == (OriginalPrefix[LineIndex] + " ").str() &&
- "Expecting a line comment prefix to differ from original by at most "
- "a space");
+ const auto SpacesToRemove = -std::min(PrefixSpaceChange[LineIndex], 0);
+ const auto SpacesToAdd = std::max(PrefixSpaceChange[LineIndex], 0);
Whitespaces.replaceWhitespaceInToken(
- tokenAt(LineIndex), OriginalPrefix[LineIndex].size(), 0, "", "",
- /*InPPDirective=*/false, /*Newlines=*/0, /*Spaces=*/1);
+ tokenAt(LineIndex), OriginalPrefix[LineIndex].size() - SpacesToRemove,
+ /*ReplaceChars=*/SpacesToRemove, "", "", /*InPPDirective=*/false,
+ /*Newlines=*/0, /*Spaces=*/SpacesToAdd);
}
}
diff --git a/clang/lib/Format/BreakableToken.h b/clang/lib/Format/BreakableToken.h
index 41b19f82e9df..190144ad1be9 100644
--- a/clang/lib/Format/BreakableToken.h
+++ b/clang/lib/Format/BreakableToken.h
@@ -465,15 +465,23 @@ private:
// then the original prefix is "// ".
SmallVector<StringRef, 16> OriginalPrefix;
- // Prefix[i] contains the intended leading "//" with trailing spaces to
- // account for the indentation of content within the comment at line i after
- // formatting. It can be different than the original prefix when the original
- // line starts like this:
- // //content
- // Then the original prefix is "//", but the prefix is "// ".
- SmallVector<StringRef, 16> Prefix;
-
- SmallVector<unsigned, 16> OriginalContentColumn;
+ /// Prefix[i] + SpacesToAdd[i] contains the intended leading "//" with
+ /// trailing spaces to account for the indentation of content within the
+ /// comment at line i after formatting. It can be different than the original
+ /// prefix.
+ /// When the original line starts like this:
+ /// //content
+ /// Then the OriginalPrefix[i] is "//", but the Prefix[i] is "// " in the LLVM
+ /// style.
+ /// When the line starts like:
+ /// // content
+ /// And we want to remove the spaces the OriginalPrefix[i] is "// " and
+ /// Prefix[i] is "//".
+ SmallVector<std::string, 16> Prefix;
+
+ /// How many spaces are added or removed from the OriginalPrefix to form
+ /// Prefix.
+ SmallVector<int, 16> PrefixSpaceChange;
/// The token to which the last line of this breakable token belongs
/// to; nullptr if that token is the initial token.
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 7198671901f3..8fbc15f27922 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -1651,7 +1651,7 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
StringRef OldDelimiter = *getRawStringDelimiter(Current.TokenText);
StringRef NewDelimiter =
getCanonicalRawStringDelimiter(Style, RawStringStyle.Language);
- if (NewDelimiter.empty() || OldDelimiter.empty())
+ if (NewDelimiter.empty())
NewDelimiter = OldDelimiter;
// The text of a raw string is between the leading 'R"delimiter(' and the
// trailing 'delimiter)"'.
@@ -1906,12 +1906,12 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
LineState &State, bool AllowBreak) {
unsigned StartColumn = State.Column - Current.ColumnWidth;
if (Current.isStringLiteral()) {
- // FIXME: String literal breaking is currently disabled for C#, Java and
- // JavaScript, as it requires strings to be merged using "+" which we
+ // FIXME: String literal breaking is currently disabled for C#, Java, Json
+ // and JavaScript, as it requires strings to be merged using "+" which we
// don't support.
if (Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp() ||
- !Style.BreakStringLiterals || !AllowBreak)
+ Style.isJson() || !Style.BreakStringLiterals || !AllowBreak)
return nullptr;
// Don't break string literals inside preprocessor directives (except for
@@ -1993,6 +1993,11 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// We don't insert backslashes when breaking line comments.
ColumnLimit = Style.ColumnLimit;
}
+ if (ColumnLimit == 0) {
+ // To make the rest of the function easier set the column limit to the
+ // maximum, if there should be no limit.
+ ColumnLimit = std::numeric_limits<decltype(ColumnLimit)>::max();
+ }
if (Current.UnbreakableTailLength >= ColumnLimit)
return {0, false};
// ColumnWidth was already accounted into State.Column before calling
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 5f5bb8585ac1..2b860d2a25f7 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -63,6 +63,7 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageKind> {
IO.enumCase(Value, "TableGen", FormatStyle::LK_TableGen);
IO.enumCase(Value, "TextProto", FormatStyle::LK_TextProto);
IO.enumCase(Value, "CSharp", FormatStyle::LK_CSharp);
+ IO.enumCase(Value, "Json", FormatStyle::LK_Json);
}
};
@@ -85,6 +86,15 @@ template <> struct ScalarEnumerationTraits<FormatStyle::LanguageStandard> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::LambdaBodyIndentationKind> {
+ static void enumeration(IO &IO,
+ FormatStyle::LambdaBodyIndentationKind &Value) {
+ IO.enumCase(Value, "Signature", FormatStyle::LBI_Signature);
+ IO.enumCase(Value, "OuterScope", FormatStyle::LBI_OuterScope);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::UseTabStyle> {
static void enumeration(IO &IO, FormatStyle::UseTabStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::UT_Never);
@@ -143,13 +153,25 @@ template <> struct ScalarEnumerationTraits<FormatStyle::AlignConsecutiveStyle> {
}
};
+template <>
+struct ScalarEnumerationTraits<FormatStyle::ArrayInitializerAlignmentStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::ArrayInitializerAlignmentStyle &Value) {
+ IO.enumCase(Value, "None", FormatStyle::AIAS_None);
+ IO.enumCase(Value, "Left", FormatStyle::AIAS_Left);
+ IO.enumCase(Value, "Right", FormatStyle::AIAS_Right);
+ }
+};
+
template <> struct ScalarEnumerationTraits<FormatStyle::ShortIfStyle> {
static void enumeration(IO &IO, FormatStyle::ShortIfStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SIS_Never);
- IO.enumCase(Value, "Always", FormatStyle::SIS_Always);
IO.enumCase(Value, "WithoutElse", FormatStyle::SIS_WithoutElse);
+ IO.enumCase(Value, "OnlyFirstIf", FormatStyle::SIS_OnlyFirstIf);
+ IO.enumCase(Value, "AllIfsAndElse", FormatStyle::SIS_AllIfsAndElse);
// For backward compatibility.
+ IO.enumCase(Value, "Always", FormatStyle::SIS_OnlyFirstIf);
IO.enumCase(Value, "false", FormatStyle::SIS_Never);
IO.enumCase(Value, "true", FormatStyle::SIS_WithoutElse);
}
@@ -238,6 +260,17 @@ struct ScalarEnumerationTraits<FormatStyle::BreakInheritanceListStyle> {
IO.enumCase(Value, "BeforeColon", FormatStyle::BILS_BeforeColon);
IO.enumCase(Value, "BeforeComma", FormatStyle::BILS_BeforeComma);
IO.enumCase(Value, "AfterColon", FormatStyle::BILS_AfterColon);
+ IO.enumCase(Value, "AfterComma", FormatStyle::BILS_AfterComma);
+ }
+};
+
+template <>
+struct ScalarEnumerationTraits<FormatStyle::EmptyLineAfterAccessModifierStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::EmptyLineAfterAccessModifierStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::ELAAMS_Never);
+ IO.enumCase(Value, "Leave", FormatStyle::ELAAMS_Leave);
+ IO.enumCase(Value, "Always", FormatStyle::ELAAMS_Always);
}
};
@@ -386,14 +419,24 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceAroundPointerQualifiersStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::ReferenceAlignmentStyle> {
+ static void enumeration(IO &IO, FormatStyle::ReferenceAlignmentStyle &Value) {
+ IO.enumCase(Value, "Pointer", FormatStyle::RAS_Pointer);
+ IO.enumCase(Value, "Middle", FormatStyle::RAS_Middle);
+ IO.enumCase(Value, "Left", FormatStyle::RAS_Left);
+ IO.enumCase(Value, "Right", FormatStyle::RAS_Right);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
static void enumeration(IO &IO,
FormatStyle::SpaceBeforeParensOptions &Value) {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
IO.enumCase(Value, "ControlStatements",
FormatStyle::SBPO_ControlStatements);
- IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
- FormatStyle::SBPO_ControlStatementsExceptForEachMacros);
+ IO.enumCase(Value, "ControlStatementsExceptControlMacros",
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros);
IO.enumCase(Value, "NonEmptyParentheses",
FormatStyle::SBPO_NonEmptyParentheses);
IO.enumCase(Value, "Always", FormatStyle::SBPO_Always);
@@ -401,6 +444,8 @@ struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensOptions> {
// For backward compatibility.
IO.enumCase(Value, "false", FormatStyle::SBPO_Never);
IO.enumCase(Value, "true", FormatStyle::SBPO_ControlStatements);
+ IO.enumCase(Value, "ControlStatementsExceptForEachMacros",
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros);
}
};
@@ -415,6 +460,18 @@ struct ScalarEnumerationTraits<FormatStyle::BitFieldColonSpacingStyle> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::SortIncludesOptions> {
+ static void enumeration(IO &IO, FormatStyle::SortIncludesOptions &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SI_Never);
+ IO.enumCase(Value, "CaseInsensitive", FormatStyle::SI_CaseInsensitive);
+ IO.enumCase(Value, "CaseSensitive", FormatStyle::SI_CaseSensitive);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SI_Never);
+ IO.enumCase(Value, "true", FormatStyle::SI_CaseSensitive);
+ }
+};
+
template <>
struct ScalarEnumerationTraits<FormatStyle::SortJavaStaticImportOptions> {
static void enumeration(IO &IO,
@@ -424,6 +481,18 @@ struct ScalarEnumerationTraits<FormatStyle::SortJavaStaticImportOptions> {
}
};
+template <> struct ScalarEnumerationTraits<FormatStyle::SpacesInAnglesStyle> {
+ static void enumeration(IO &IO, FormatStyle::SpacesInAnglesStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::SIAS_Never);
+ IO.enumCase(Value, "Always", FormatStyle::SIAS_Always);
+ IO.enumCase(Value, "Leave", FormatStyle::SIAS_Leave);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "false", FormatStyle::SIAS_Never);
+ IO.enumCase(Value, "true", FormatStyle::SIAS_Always);
+ }
+};
+
template <> struct MappingTraits<FormatStyle> {
static void mapping(IO &IO, FormatStyle &Style) {
// When reading, read the language first, we need it for getPredefinedStyle.
@@ -470,6 +539,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
+ IO.mapOptional("AlignArrayOfStructures", Style.AlignArrayOfStructures);
IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
@@ -572,19 +642,22 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("DeriveLineEnding", Style.DeriveLineEnding);
IO.mapOptional("DerivePointerAlignment", Style.DerivePointerAlignment);
IO.mapOptional("DisableFormat", Style.DisableFormat);
+ IO.mapOptional("EmptyLineAfterAccessModifier",
+ Style.EmptyLineAfterAccessModifier);
IO.mapOptional("EmptyLineBeforeAccessModifier",
Style.EmptyLineBeforeAccessModifier);
IO.mapOptional("ExperimentalAutoDetectBinPacking",
Style.ExperimentalAutoDetectBinPacking);
IO.mapOptional("FixNamespaceComments", Style.FixNamespaceComments);
IO.mapOptional("ForEachMacros", Style.ForEachMacros);
- IO.mapOptional("StatementAttributeLikeMacros",
- Style.StatementAttributeLikeMacros);
+ IO.mapOptional("IfMacros", Style.IfMacros);
+
IO.mapOptional("IncludeBlocks", Style.IncludeStyle.IncludeBlocks);
IO.mapOptional("IncludeCategories", Style.IncludeStyle.IncludeCategories);
IO.mapOptional("IncludeIsMainRegex", Style.IncludeStyle.IncludeIsMainRegex);
IO.mapOptional("IncludeIsMainSourceRegex",
Style.IncludeStyle.IncludeIsMainSourceRegex);
+ IO.mapOptional("IndentAccessModifiers", Style.IndentAccessModifiers);
IO.mapOptional("IndentCaseLabels", Style.IndentCaseLabels);
IO.mapOptional("IndentCaseBlocks", Style.IndentCaseBlocks);
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
@@ -600,6 +673,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("JavaScriptWrapImports", Style.JavaScriptWrapImports);
IO.mapOptional("KeepEmptyLinesAtTheStartOfBlocks",
Style.KeepEmptyLinesAtTheStartOfBlocks);
+ IO.mapOptional("LambdaBodyIndentation", Style.LambdaBodyIndentation);
IO.mapOptional("MacroBlockBegin", Style.MacroBlockBegin);
IO.mapOptional("MacroBlockEnd", Style.MacroBlockEnd);
IO.mapOptional("MaxEmptyLinesToKeep", Style.MaxEmptyLinesToKeep);
@@ -627,8 +701,11 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("PenaltyIndentedWhitespace",
Style.PenaltyIndentedWhitespace);
IO.mapOptional("PointerAlignment", Style.PointerAlignment);
+ IO.mapOptional("PPIndentWidth", Style.PPIndentWidth);
IO.mapOptional("RawStringFormats", Style.RawStringFormats);
+ IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
+ IO.mapOptional("ShortNamespaceLines", Style.ShortNamespaceLines);
IO.mapOptional("SortIncludes", Style.SortIncludes);
IO.mapOptional("SortJavaStaticImport", Style.SortJavaStaticImport);
IO.mapOptional("SortUsingDeclarations", Style.SortUsingDeclarations);
@@ -661,12 +738,16 @@ template <> struct MappingTraits<FormatStyle> {
Style.SpacesInContainerLiterals);
IO.mapOptional("SpacesInCStyleCastParentheses",
Style.SpacesInCStyleCastParentheses);
+ IO.mapOptional("SpacesInLineCommentPrefix",
+ Style.SpacesInLineCommentPrefix);
IO.mapOptional("SpacesInParentheses", Style.SpacesInParentheses);
IO.mapOptional("SpacesInSquareBrackets", Style.SpacesInSquareBrackets);
IO.mapOptional("SpaceBeforeSquareBrackets",
Style.SpaceBeforeSquareBrackets);
IO.mapOptional("BitFieldColonSpacing", Style.BitFieldColonSpacing);
IO.mapOptional("Standard", Style.Standard);
+ IO.mapOptional("StatementAttributeLikeMacros",
+ Style.StatementAttributeLikeMacros);
IO.mapOptional("StatementMacros", Style.StatementMacros);
IO.mapOptional("TabWidth", Style.TabWidth);
IO.mapOptional("TypenameMacros", Style.TypenameMacros);
@@ -710,6 +791,20 @@ template <> struct MappingTraits<FormatStyle::RawStringFormat> {
}
};
+template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
+ static void mapping(IO &IO, FormatStyle::SpacesInLineComment &Space) {
+ // Transform the maximum to signed, to parse "-1" correctly
+ int signedMaximum = static_cast<int>(Space.Maximum);
+ IO.mapOptional("Minimum", Space.Minimum);
+ IO.mapOptional("Maximum", signedMaximum);
+ Space.Maximum = static_cast<unsigned>(signedMaximum);
+
+ if (Space.Maximum != -1u) {
+ Space.Minimum = std::min(Space.Minimum, Space.Maximum);
+ }
+ }
+};
+
// Allows to read vector<FormatStyle> while keeping default values.
// IO.getContext() should contain a pointer to the FormatStyle structure, that
// will be used to get default values for missing keys.
@@ -880,10 +975,12 @@ static FormatStyle expandPresets(const FormatStyle &Style) {
FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
FormatStyle LLVMStyle;
+ LLVMStyle.InheritsParentConfig = false;
LLVMStyle.Language = Language;
LLVMStyle.AccessModifierOffset = -2;
LLVMStyle.AlignEscapedNewlines = FormatStyle::ENAS_Right;
LLVMStyle.AlignAfterOpenBracket = FormatStyle::BAS_Align;
+ LLVMStyle.AlignArrayOfStructures = FormatStyle::AIAS_None;
LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = true;
LLVMStyle.AlignConsecutiveAssignments = FormatStyle::ACS_None;
@@ -943,18 +1040,21 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.Cpp11BracedListStyle = true;
LLVMStyle.DeriveLineEnding = true;
LLVMStyle.DerivePointerAlignment = false;
+ LLVMStyle.EmptyLineAfterAccessModifier = FormatStyle::ELAAMS_Never;
LLVMStyle.EmptyLineBeforeAccessModifier = FormatStyle::ELBAMS_LogicalBlock;
LLVMStyle.ExperimentalAutoDetectBinPacking = false;
LLVMStyle.FixNamespaceComments = true;
LLVMStyle.ForEachMacros.push_back("foreach");
LLVMStyle.ForEachMacros.push_back("Q_FOREACH");
LLVMStyle.ForEachMacros.push_back("BOOST_FOREACH");
+ LLVMStyle.IfMacros.push_back("KJ_IF_MAYBE");
LLVMStyle.IncludeStyle.IncludeCategories = {
{"^\"(llvm|llvm-c|clang|clang-c)/", 2, 0, false},
{"^(<|\"(gtest|gmock|isl|json)/)", 3, 0, false},
{".*", 1, 0, false}};
LLVMStyle.IncludeStyle.IncludeIsMainRegex = "(Test)?$";
LLVMStyle.IncludeStyle.IncludeBlocks = tooling::IncludeStyle::IBS_Preserve;
+ LLVMStyle.IndentAccessModifiers = false;
LLVMStyle.IndentCaseLabels = false;
LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
@@ -962,10 +1062,12 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IndentRequires = false;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
+ LLVMStyle.PPIndentWidth = -1;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
LLVMStyle.TabWidth = 8;
+ LLVMStyle.LambdaBodyIndentation = FormatStyle::LBI_Signature;
LLVMStyle.MaxEmptyLinesToKeep = 1;
LLVMStyle.KeepEmptyLinesAtTheStartOfBlocks = true;
LLVMStyle.NamespaceIndentation = FormatStyle::NI_None;
@@ -975,6 +1077,8 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ObjCSpaceAfterProperty = false;
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
+ LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
+ LLVMStyle.ShortNamespaceLines = 1;
LLVMStyle.SpacesBeforeTrailingComments = 1;
LLVMStyle.Standard = FormatStyle::LS_Latest;
LLVMStyle.UseCRLF = false;
@@ -986,6 +1090,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceInEmptyParentheses = false;
LLVMStyle.SpacesInContainerLiterals = true;
LLVMStyle.SpacesInCStyleCastParentheses = false;
+ LLVMStyle.SpacesInLineCommentPrefix = {/*Minimum=*/1, /*Maximum=*/-1u};
LLVMStyle.SpaceAfterCStyleCast = false;
LLVMStyle.SpaceAfterLogicalNot = false;
LLVMStyle.SpaceAfterTemplateKeyword = true;
@@ -999,7 +1104,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeCpp11BracedList = false;
LLVMStyle.SpaceBeforeSquareBrackets = false;
LLVMStyle.BitFieldColonSpacing = FormatStyle::BFCS_Both;
- LLVMStyle.SpacesInAngles = false;
+ LLVMStyle.SpacesInAngles = FormatStyle::SIAS_Never;
LLVMStyle.SpacesInConditionalStatement = false;
LLVMStyle.PenaltyBreakAssignment = prec::Assignment;
@@ -1013,7 +1118,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.PenaltyIndentedWhitespace = 0;
LLVMStyle.DisableFormat = false;
- LLVMStyle.SortIncludes = true;
+ LLVMStyle.SortIncludes = FormatStyle::SI_CaseSensitive;
LLVMStyle.SortJavaStaticImport = FormatStyle::SJSIO_Before;
LLVMStyle.SortUsingDeclarations = true;
LLVMStyle.StatementAttributeLikeMacros.push_back("Q_EMIT");
@@ -1029,6 +1134,9 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
if (Language == FormatStyle::LK_TableGen) {
LLVMStyle.SpacesInContainerLiterals = false;
}
+ if (LLVMStyle.isJson()) {
+ LLVMStyle.ColumnLimit = 0;
+ }
return LLVMStyle;
}
@@ -1103,7 +1211,7 @@ FormatStyle getGoogleStyle(FormatStyle::LanguageKind Language) {
"ParseTestProto",
"ParsePartialTestProto",
},
- /*CanonicalDelimiter=*/"",
+ /*CanonicalDelimiter=*/"pb",
/*BasedOnStyle=*/"google",
},
};
@@ -1216,7 +1324,7 @@ FormatStyle getChromiumStyle(FormatStyle::LanguageKind Language) {
"java",
"javax",
};
- ChromiumStyle.SortIncludes = true;
+ ChromiumStyle.SortIncludes = FormatStyle::SI_CaseSensitive;
} else if (Language == FormatStyle::LK_JavaScript) {
ChromiumStyle.AllowShortIfStatementsOnASingleLine = FormatStyle::SIS_Never;
ChromiumStyle.AllowShortLoopsOnASingleLine = false;
@@ -1330,29 +1438,31 @@ FormatStyle getMicrosoftStyle(FormatStyle::LanguageKind Language) {
FormatStyle getNoStyle() {
FormatStyle NoStyle = getLLVMStyle();
NoStyle.DisableFormat = true;
- NoStyle.SortIncludes = false;
+ NoStyle.SortIncludes = FormatStyle::SI_Never;
NoStyle.SortUsingDeclarations = false;
return NoStyle;
}
bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
FormatStyle *Style) {
- if (Name.equals_lower("llvm")) {
+ if (Name.equals_insensitive("llvm")) {
*Style = getLLVMStyle(Language);
- } else if (Name.equals_lower("chromium")) {
+ } else if (Name.equals_insensitive("chromium")) {
*Style = getChromiumStyle(Language);
- } else if (Name.equals_lower("mozilla")) {
+ } else if (Name.equals_insensitive("mozilla")) {
*Style = getMozillaStyle();
- } else if (Name.equals_lower("google")) {
+ } else if (Name.equals_insensitive("google")) {
*Style = getGoogleStyle(Language);
- } else if (Name.equals_lower("webkit")) {
+ } else if (Name.equals_insensitive("webkit")) {
*Style = getWebKitStyle();
- } else if (Name.equals_lower("gnu")) {
+ } else if (Name.equals_insensitive("gnu")) {
*Style = getGNUStyle();
- } else if (Name.equals_lower("microsoft")) {
+ } else if (Name.equals_insensitive("microsoft")) {
*Style = getMicrosoftStyle(Language);
- } else if (Name.equals_lower("none")) {
+ } else if (Name.equals_insensitive("none")) {
*Style = getNoStyle();
+ } else if (Name.equals_insensitive("inheritparentconfig")) {
+ Style->InheritsParentConfig = true;
} else {
return false;
}
@@ -1362,8 +1472,9 @@ bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
}
std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
- FormatStyle *Style,
- bool AllowUnknownOptions) {
+ FormatStyle *Style, bool AllowUnknownOptions,
+ llvm::SourceMgr::DiagHandlerTy DiagHandler,
+ void *DiagHandlerCtxt) {
assert(Style);
FormatStyle::LanguageKind Language = Style->Language;
assert(Language != FormatStyle::LK_None);
@@ -1371,7 +1482,8 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
return make_error_code(ParseError::Error);
Style->StyleSet.Clear();
std::vector<FormatStyle> Styles;
- llvm::yaml::Input Input(Config);
+ llvm::yaml::Input Input(Config, /*Ctxt=*/nullptr, DiagHandler,
+ DiagHandlerCtxt);
// DocumentListTraits<vector<FormatStyle>> uses the context to get default
// values for the fields, keys for which are missing from the configuration.
// Mapping also uses the context to get the language to find the correct
@@ -1659,10 +1771,12 @@ private:
Tok = Tok->Next;
}
}
- if (Style.DerivePointerAlignment)
+ if (Style.DerivePointerAlignment) {
Style.PointerAlignment = countVariableAlignments(AnnotatedLines) <= 0
? FormatStyle::PAS_Left
: FormatStyle::PAS_Right;
+ Style.ReferenceAlignment = FormatStyle::RAS_Pointer;
+ }
if (Style.Standard == FormatStyle::LS_Auto)
Style.Standard = hasCpp03IncompatibleFormat(AnnotatedLines)
? FormatStyle::LS_Latest
@@ -2209,10 +2323,23 @@ static void sortCppIncludes(const FormatStyle &Style,
for (unsigned i = 0, e = Includes.size(); i != e; ++i) {
Indices.push_back(i);
}
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
- return std::tie(Includes[LHSI].Priority, Includes[LHSI].Filename) <
- std::tie(Includes[RHSI].Priority, Includes[RHSI].Filename);
- });
+
+ if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
+ llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ const auto LHSFilenameLower = Includes[LHSI].Filename.lower();
+ const auto RHSFilenameLower = Includes[RHSI].Filename.lower();
+ return std::tie(Includes[LHSI].Priority, LHSFilenameLower,
+ Includes[LHSI].Filename) <
+ std::tie(Includes[RHSI].Priority, RHSFilenameLower,
+ Includes[RHSI].Filename);
+ });
+ } else {
+ llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ return std::tie(Includes[LHSI].Priority, Includes[LHSI].Filename) <
+ std::tie(Includes[RHSI].Priority, Includes[RHSI].Filename);
+ });
+ }
+
// The index of the include on which the cursor will be put after
// sorting/deduplicating.
unsigned CursorIndex;
@@ -2527,7 +2654,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName, unsigned *Cursor) {
tooling::Replacements Replaces;
- if (!Style.SortIncludes)
+ if (!Style.SortIncludes || Style.DisableFormat)
return Replaces;
if (isLikelyXml(Code))
return Replaces;
@@ -2702,6 +2829,25 @@ reformat(const FormatStyle &Style, StringRef Code,
if (Expanded.Language == FormatStyle::LK_JavaScript && isMpegTS(Code))
return {tooling::Replacements(), 0};
+ // JSON only needs the formatting passing.
+ if (Style.isJson()) {
+ std::vector<tooling::Range> Ranges(1, tooling::Range(0, Code.size()));
+ auto Env =
+ std::make_unique<Environment>(Code, FileName, Ranges, FirstStartColumn,
+ NextStartColumn, LastStartColumn);
+ // Perform the actual formatting pass.
+ tooling::Replacements Replaces =
+ Formatter(*Env, Style, Status).process().first;
+ // add a replacement to remove the "x = " from the result.
+ if (!Replaces.add(tooling::Replacement(FileName, 0, 4, ""))) {
+ // apply the reformatting changes and the removal of "x = ".
+ if (applyAllReplacements(Code, Replaces)) {
+ return {Replaces, 0};
+ }
+ }
+ return {tooling::Replacements(), 0};
+ }
+
typedef std::function<std::pair<tooling::Replacements, unsigned>(
const Environment &)>
AnalyzerPass;
@@ -2850,23 +2996,26 @@ const char *StyleOptionHelpDescription =
static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
if (FileName.endswith(".java"))
return FormatStyle::LK_Java;
- if (FileName.endswith_lower(".js") || FileName.endswith_lower(".mjs") ||
- FileName.endswith_lower(".ts"))
+ if (FileName.endswith_insensitive(".js") ||
+ FileName.endswith_insensitive(".mjs") ||
+ FileName.endswith_insensitive(".ts"))
return FormatStyle::LK_JavaScript; // (module) JavaScript or TypeScript.
if (FileName.endswith(".m") || FileName.endswith(".mm"))
return FormatStyle::LK_ObjC;
- if (FileName.endswith_lower(".proto") ||
- FileName.endswith_lower(".protodevel"))
+ if (FileName.endswith_insensitive(".proto") ||
+ FileName.endswith_insensitive(".protodevel"))
return FormatStyle::LK_Proto;
- if (FileName.endswith_lower(".textpb") ||
- FileName.endswith_lower(".pb.txt") ||
- FileName.endswith_lower(".textproto") ||
- FileName.endswith_lower(".asciipb"))
+ if (FileName.endswith_insensitive(".textpb") ||
+ FileName.endswith_insensitive(".pb.txt") ||
+ FileName.endswith_insensitive(".textproto") ||
+ FileName.endswith_insensitive(".asciipb"))
return FormatStyle::LK_TextProto;
- if (FileName.endswith_lower(".td"))
+ if (FileName.endswith_insensitive(".td"))
return FormatStyle::LK_TableGen;
- if (FileName.endswith_lower(".cs"))
+ if (FileName.endswith_insensitive(".cs"))
return FormatStyle::LK_CSharp;
+ if (FileName.endswith_insensitive(".json"))
+ return FormatStyle::LK_Json;
return FormatStyle::LK_Cpp;
}
@@ -2905,21 +3054,36 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
return make_string_error("Invalid fallback style \"" + FallbackStyleName);
+ llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1>
+ ChildFormatTextToApply;
+
if (StyleName.startswith("{")) {
// Parse YAML/JSON style from the command line.
- if (std::error_code ec = parseConfiguration(
- llvm::MemoryBufferRef(StyleName, "<command-line>"), &Style,
- AllowUnknownOptions))
+ StringRef Source = "<command-line>";
+ if (std::error_code ec =
+ parseConfiguration(llvm::MemoryBufferRef(StyleName, Source), &Style,
+ AllowUnknownOptions))
return make_string_error("Error parsing -style: " + ec.message());
- return Style;
+ if (Style.InheritsParentConfig)
+ ChildFormatTextToApply.emplace_back(
+ llvm::MemoryBuffer::getMemBuffer(StyleName, Source, false));
+ else
+ return Style;
}
- if (!StyleName.equals_lower("file")) {
+ // If the style inherits the parent configuration it is a command line
+ // configuration, which wants to inherit, so we have to skip the check of the
+ // StyleName.
+ if (!Style.InheritsParentConfig && !StyleName.equals_insensitive("file")) {
if (!getPredefinedStyle(StyleName, Style.Language, &Style))
return make_string_error("Invalid value for -style");
- return Style;
+ if (!Style.InheritsParentConfig)
+ return Style;
}
+ // Reset possible inheritance
+ Style.InheritsParentConfig = false;
+
// Look for .clang-format/_clang-format file in the file's parent directories.
SmallString<128> UnsuitableConfigFiles;
SmallString<128> Path(FileName);
@@ -2930,6 +3094,8 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
FilesToLookFor.push_back(".clang-format");
FilesToLookFor.push_back("_clang-format");
+ auto dropDiagnosticHandler = [](const llvm::SMDiagnostic &, void *) {};
+
for (StringRef Directory = Path; !Directory.empty();
Directory = llvm::sys::path::parent_path(Directory)) {
@@ -2966,7 +3132,36 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
}
LLVM_DEBUG(llvm::dbgs()
<< "Using configuration file " << ConfigFile << "\n");
- return Style;
+
+ if (!Style.InheritsParentConfig) {
+ if (ChildFormatTextToApply.empty())
+ return Style;
+
+ LLVM_DEBUG(llvm::dbgs() << "Applying child configurations\n");
+
+ for (const auto &MemBuf : llvm::reverse(ChildFormatTextToApply)) {
+ auto Ec = parseConfiguration(*MemBuf, &Style, AllowUnknownOptions,
+ dropDiagnosticHandler);
+ // It was already correctly parsed.
+ assert(!Ec);
+ static_cast<void>(Ec);
+ }
+
+ return Style;
+ }
+
+ LLVM_DEBUG(llvm::dbgs() << "Inherits parent configuration\n");
+
+ // Reset inheritance of style
+ Style.InheritsParentConfig = false;
+
+ ChildFormatTextToApply.emplace_back(std::move(*Text));
+
+ // Breaking out of the inner loop, since we don't want to parse
+ // .clang-format AND _clang-format, if both exist. Then we continue the
+ // inner loop (parent directories) in search for the parent
+ // configuration.
+ break;
}
}
}
@@ -2974,6 +3169,21 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
return make_string_error("Configuration file(s) do(es) not support " +
getLanguageName(Style.Language) + ": " +
UnsuitableConfigFiles);
+
+ if (!ChildFormatTextToApply.empty()) {
+ assert(ChildFormatTextToApply.size() == 1);
+
+ LLVM_DEBUG(llvm::dbgs()
+ << "Applying child configuration on fallback style\n");
+
+ auto Ec =
+ parseConfiguration(*ChildFormatTextToApply.front(), &FallbackStyle,
+ AllowUnknownOptions, dropDiagnosticHandler);
+ // It was already correctly parsed.
+ assert(!Ec);
+ static_cast<void>(Ec);
+ }
+
return FallbackStyle;
}
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index 2f53b338379d..0506cd554bcb 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -46,11 +46,13 @@ namespace format {
TYPE(DesignatedInitializerLSquare) \
TYPE(DesignatedInitializerPeriod) \
TYPE(DictLiteral) \
+ TYPE(FatArrow) \
TYPE(ForEachMacro) \
TYPE(FunctionAnnotationRParen) \
TYPE(FunctionDeclarationName) \
TYPE(FunctionLBrace) \
TYPE(FunctionTypeLParen) \
+ TYPE(IfMacro) \
TYPE(ImplicitStringLiteral) \
TYPE(InheritanceColon) \
TYPE(InheritanceComma) \
@@ -61,17 +63,12 @@ namespace format {
TYPE(JsComputedPropertyName) \
TYPE(JsExponentiation) \
TYPE(JsExponentiationEqual) \
- TYPE(JsFatArrow) \
- TYPE(JsNonNullAssertion) \
- TYPE(JsNullishCoalescingOperator) \
- TYPE(JsNullPropagatingOperator) \
+ TYPE(JsPipePipeEqual) \
TYPE(JsPrivateIdentifier) \
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
TYPE(JsAndAndEqual) \
- TYPE(JsPipePipeEqual) \
- TYPE(JsNullishCoalescingEqual) \
TYPE(LambdaArrow) \
TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
@@ -80,6 +77,10 @@ namespace format {
TYPE(MacroBlockBegin) \
TYPE(MacroBlockEnd) \
TYPE(NamespaceMacro) \
+ TYPE(NonNullAssertion) \
+ TYPE(NullCoalescingEqual) \
+ TYPE(NullCoalescingOperator) \
+ TYPE(NullPropagatingOperator) \
TYPE(ObjCBlockLBrace) \
TYPE(ObjCBlockLParen) \
TYPE(ObjCDecl) \
@@ -113,8 +114,6 @@ namespace format {
TYPE(CSharpStringLiteral) \
TYPE(CSharpNamedArgumentColon) \
TYPE(CSharpNullable) \
- TYPE(CSharpNullCoalescing) \
- TYPE(CSharpNullConditional) \
TYPE(CSharpNullConditionalLSquare) \
TYPE(CSharpGenericTypeConstraint) \
TYPE(CSharpGenericTypeConstraintColon) \
@@ -433,6 +432,15 @@ public:
/// The next token in the unwrapped line.
FormatToken *Next = nullptr;
+ /// The first token in set of column elements.
+ bool StartsColumn = false;
+
+ /// This notes the start of the line of an array initializer.
+ bool ArrayInitializerLineStart = false;
+
+ /// This starts an array initializer.
+ bool IsArrayInitializer = false;
+
/// If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
diff --git a/clang/lib/Format/FormatTokenLexer.cpp b/clang/lib/Format/FormatTokenLexer.cpp
index e9b096370dbb..a9cfb4a247f0 100644
--- a/clang/lib/Format/FormatTokenLexer.cpp
+++ b/clang/lib/Format/FormatTokenLexer.cpp
@@ -39,6 +39,8 @@ FormatTokenLexer::FormatTokenLexer(
for (const std::string &ForEachMacro : Style.ForEachMacros)
Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
+ for (const std::string &IfMacro : Style.IfMacros)
+ Macros.insert({&IdentTable.get(IfMacro), TT_IfMacro});
for (const std::string &AttributeMacro : Style.AttributeMacros)
Macros.insert({&IdentTable.get(AttributeMacro), TT_AttributeMacro});
for (const std::string &StatementMacro : Style.StatementMacros)
@@ -92,20 +94,46 @@ void FormatTokenLexer::tryMergePreviousTokens() {
if (Style.isCpp() && tryTransformTryUsageForC())
return;
+ if (Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
+ tok::question};
+ static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
+ tok::period};
+ static const tok::TokenKind FatArrow[] = {tok::equal, tok::greater};
+
+ if (tryMergeTokens(FatArrow, TT_FatArrow))
+ return;
+ if (tryMergeTokens(NullishCoalescingOperator, TT_NullCoalescingOperator)) {
+ // Treat like the "||" operator (as opposed to the ternary ?).
+ Tokens.back()->Tok.setKind(tok::pipepipe);
+ return;
+ }
+ if (tryMergeTokens(NullPropagatingOperator, TT_NullPropagatingOperator)) {
+ // Treat like a regular "." access.
+ Tokens.back()->Tok.setKind(tok::period);
+ return;
+ }
+ if (tryMergeNullishCoalescingEqual()) {
+ return;
+ }
+ }
+
if (Style.isCSharp()) {
+ static const tok::TokenKind CSharpNullConditionalLSquare[] = {
+ tok::question, tok::l_square};
+
if (tryMergeCSharpKeywordVariables())
return;
if (tryMergeCSharpStringLiteral())
return;
- if (tryMergeCSharpDoubleQuestion())
- return;
- if (tryMergeCSharpNullConditional())
- return;
if (tryTransformCSharpForEach())
return;
- static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
- if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
+ if (tryMergeTokens(CSharpNullConditionalLSquare,
+ TT_CSharpNullConditionalLSquare)) {
+ // Treat like a regular "[" operator.
+ Tokens.back()->Tok.setKind(tok::l_square);
return;
+ }
}
if (tryMergeNSStringLiteral())
@@ -117,16 +145,9 @@ void FormatTokenLexer::tryMergePreviousTokens() {
tok::equal};
static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
tok::greaterequal};
- static const tok::TokenKind JSRightArrow[] = {tok::equal, tok::greater};
static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
static const tok::TokenKind JSExponentiationEqual[] = {tok::star,
tok::starequal};
- static const tok::TokenKind JSNullPropagatingOperator[] = {tok::question,
- tok::period};
- static const tok::TokenKind JSNullishOperator[] = {tok::question,
- tok::question};
- static const tok::TokenKind JSNullishEqual[] = {tok::question,
- tok::question, tok::equal};
static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
@@ -137,28 +158,14 @@ void FormatTokenLexer::tryMergePreviousTokens() {
return;
if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
return;
- if (tryMergeTokens(JSRightArrow, TT_JsFatArrow))
- return;
if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
return;
if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
Tokens.back()->Tok.setKind(tok::starequal);
return;
}
- if (tryMergeTokens(JSNullishOperator, TT_JsNullishCoalescingOperator)) {
- // Treat like the "||" operator (as opposed to the ternary ?).
- Tokens.back()->Tok.setKind(tok::pipepipe);
- return;
- }
- if (tryMergeTokens(JSNullPropagatingOperator,
- TT_JsNullPropagatingOperator)) {
- // Treat like a regular "." access.
- Tokens.back()->Tok.setKind(tok::period);
- return;
- }
if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
- tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual) ||
- tryMergeTokens(JSNullishEqual, TT_JsNullishCoalescingEqual)) {
+ tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual)) {
// Treat like the "=" assignment operator.
Tokens.back()->Tok.setKind(tok::equal);
return;
@@ -310,45 +317,20 @@ const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
"param", "property", "return", "type",
};
-bool FormatTokenLexer::tryMergeCSharpDoubleQuestion() {
+bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
if (Tokens.size() < 2)
return false;
- auto &FirstQuestion = *(Tokens.end() - 2);
- auto &SecondQuestion = *(Tokens.end() - 1);
- if (!FirstQuestion->is(tok::question) || !SecondQuestion->is(tok::question))
+ auto &NullishCoalescing = *(Tokens.end() - 2);
+ auto &Equal = *(Tokens.end() - 1);
+ if (NullishCoalescing->getType() != TT_NullCoalescingOperator ||
+ !Equal->is(tok::equal))
return false;
- FirstQuestion->Tok.setKind(tok::question); // no '??' in clang tokens.
- FirstQuestion->TokenText = StringRef(FirstQuestion->TokenText.begin(),
- SecondQuestion->TokenText.end() -
- FirstQuestion->TokenText.begin());
- FirstQuestion->ColumnWidth += SecondQuestion->ColumnWidth;
- FirstQuestion->setType(TT_CSharpNullCoalescing);
- Tokens.erase(Tokens.end() - 1);
- return true;
-}
-
-// Merge '?[' and '?.' pairs into single tokens.
-bool FormatTokenLexer::tryMergeCSharpNullConditional() {
- if (Tokens.size() < 2)
- return false;
- auto &Question = *(Tokens.end() - 2);
- auto &PeriodOrLSquare = *(Tokens.end() - 1);
- if (!Question->is(tok::question) ||
- !PeriodOrLSquare->isOneOf(tok::l_square, tok::period))
- return false;
- Question->TokenText =
- StringRef(Question->TokenText.begin(),
- PeriodOrLSquare->TokenText.end() - Question->TokenText.begin());
- Question->ColumnWidth += PeriodOrLSquare->ColumnWidth;
-
- if (PeriodOrLSquare->is(tok::l_square)) {
- Question->Tok.setKind(tok::question); // no '?[' in clang tokens.
- Question->setType(TT_CSharpNullConditionalLSquare);
- } else {
- Question->Tok.setKind(tok::question); // no '?.' in clang tokens.
- Question->setType(TT_CSharpNullConditional);
- }
-
+ NullishCoalescing->Tok.setKind(tok::equal); // no '??=' in clang tokens.
+ NullishCoalescing->TokenText =
+ StringRef(NullishCoalescing->TokenText.begin(),
+ Equal->TokenText.end() - NullishCoalescing->TokenText.begin());
+ NullishCoalescing->ColumnWidth += Equal->ColumnWidth;
+ NullishCoalescing->setType(TT_NullCoalescingEqual);
Tokens.erase(Tokens.end() - 1);
return true;
}
@@ -1034,6 +1016,13 @@ FormatToken *FormatTokenLexer::getNextToken() {
tok::pp_define) &&
it != Macros.end()) {
FormatTok->setType(it->second);
+ if (it->second == TT_IfMacro) {
+ // The lexer token currently has type tok::kw_unknown. However, for this
+ // substitution to be treated correctly in the TokenAnnotator, faking
+ // the tok value seems to be needed. Not sure if there's a more elegant
+ // way.
+ FormatTok->Tok.setKind(tok::kw_if);
+ }
} else if (FormatTok->is(tok::identifier)) {
if (MacroBlockBeginRegex.match(Text)) {
FormatTok->setType(TT_MacroBlockBegin);
diff --git a/clang/lib/Format/FormatTokenLexer.h b/clang/lib/Format/FormatTokenLexer.h
index 6b08677e3369..a9e3b2fd498a 100644
--- a/clang/lib/Format/FormatTokenLexer.h
+++ b/clang/lib/Format/FormatTokenLexer.h
@@ -54,8 +54,7 @@ private:
bool tryMergeJSPrivateIdentifier();
bool tryMergeCSharpStringLiteral();
bool tryMergeCSharpKeywordVariables();
- bool tryMergeCSharpDoubleQuestion();
- bool tryMergeCSharpNullConditional();
+ bool tryMergeNullishCoalescingEqual();
bool tryTransformCSharpForEach();
bool tryMergeForEach();
bool tryTransformTryUsageForC();
diff --git a/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 97de45bd1965..def551f863cd 100644
--- a/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -22,10 +22,6 @@ namespace clang {
namespace format {
namespace {
-// The maximal number of unwrapped lines that a short namespace spans.
-// Short namespaces don't need an end comment.
-static const int kShortNamespaceMaxLines = 1;
-
// Computes the name of a namespace given the namespace token.
// Returns "" for anonymous namespace.
std::string computeName(const FormatToken *NamespaceTok) {
@@ -66,8 +62,10 @@ std::string computeName(const FormatToken *NamespaceTok) {
}
std::string computeEndCommentText(StringRef NamespaceName, bool AddNewline,
- const FormatToken *NamespaceTok) {
- std::string text = "// ";
+ const FormatToken *NamespaceTok,
+ unsigned SpacesToAdd) {
+ std::string text = "//";
+ text.append(SpacesToAdd, ' ');
text += NamespaceTok->TokenText;
if (NamespaceTok->is(TT_NamespaceMacro))
text += "(";
@@ -278,9 +276,10 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
EndCommentNextTok->NewlinesBefore == 0 &&
EndCommentNextTok->isNot(tok::eof);
const std::string EndCommentText =
- computeEndCommentText(NamespaceName, AddNewline, NamespaceTok);
+ computeEndCommentText(NamespaceName, AddNewline, NamespaceTok,
+ Style.SpacesInLineCommentPrefix.Minimum);
if (!hasEndComment(EndCommentPrevTok)) {
- bool isShort = I - StartLineIndex <= kShortNamespaceMaxLines + 1;
+ bool isShort = I - StartLineIndex <= Style.ShortNamespaceLines + 1;
if (!isShort)
addEndComment(EndCommentPrevTok, EndCommentText, SourceMgr, &Fixes);
} else if (!validEndComment(EndCommentPrevTok, NamespaceName,
diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp
index db2b65b08898..a5e3ce69207b 100644
--- a/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/clang/lib/Format/SortJavaScriptImports.cpp
@@ -19,6 +19,7 @@
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TokenKinds.h"
#include "clang/Format/Format.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
@@ -69,6 +70,7 @@ struct JsImportedSymbol {
// This struct represents both exports and imports to build up the information
// required for sorting module references.
struct JsModuleReference {
+ bool FormattingOff = false;
bool IsExport = false;
// Module references are sorted into these categories, in order.
enum ReferenceCategory {
@@ -83,8 +85,16 @@ struct JsModuleReference {
// Prefix from "import * as prefix". Empty for symbol imports and `export *`.
// Implies an empty names list.
StringRef Prefix;
+ // Default import from "import DefaultName from '...';".
+ StringRef DefaultImport;
// Symbols from `import {SymbolA, SymbolB, ...} from ...;`.
SmallVector<JsImportedSymbol, 1> Symbols;
+ // Whether some symbols were merged into this one. Controls if the module
+ // reference needs re-formatting.
+ bool SymbolsMerged = false;
+ // The source location just after { and just before } in the import.
+ // Extracted eagerly to allow modification of Symbols later on.
+ SourceLocation SymbolsStart, SymbolsEnd;
// Textual position of the import/export, including preceding and trailing
// comments.
SourceRange Range;
@@ -103,7 +113,7 @@ bool operator<(const JsModuleReference &LHS, const JsModuleReference &RHS) {
// Empty URLs sort *last* (for export {...};).
if (LHS.URL.empty() != RHS.URL.empty())
return LHS.URL.empty() < RHS.URL.empty();
- if (int Res = LHS.URL.compare_lower(RHS.URL))
+ if (int Res = LHS.URL.compare_insensitive(RHS.URL))
return Res < 0;
// '*' imports (with prefix) sort before {a, b, ...} imports.
if (LHS.Prefix.empty() != RHS.Prefix.empty())
@@ -138,38 +148,31 @@ public:
if (References.empty())
return {Result, 0};
- SmallVector<unsigned, 16> Indices;
- for (unsigned i = 0, e = References.size(); i != e; ++i)
- Indices.push_back(i);
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
- return References[LHSI] < References[RHSI];
- });
- bool ReferencesInOrder = llvm::is_sorted(Indices);
+ // The text range of all parsed imports, to be replaced later.
+ SourceRange InsertionPoint = References[0].Range;
+ InsertionPoint.setEnd(References[References.size() - 1].Range.getEnd());
+
+ References = sortModuleReferences(References);
std::string ReferencesText;
- bool SymbolsInOrder = true;
- for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
- JsModuleReference Reference = References[Indices[i]];
- if (appendReference(ReferencesText, Reference))
- SymbolsInOrder = false;
- if (i + 1 < e) {
+ for (unsigned I = 0, E = References.size(); I != E; ++I) {
+ JsModuleReference Reference = References[I];
+ appendReference(ReferencesText, Reference);
+ if (I + 1 < E) {
// Insert breaks between imports and exports.
ReferencesText += "\n";
// Separate imports groups with two line breaks, but keep all exports
// in a single group.
if (!Reference.IsExport &&
- (Reference.IsExport != References[Indices[i + 1]].IsExport ||
- Reference.Category != References[Indices[i + 1]].Category))
+ (Reference.IsExport != References[I + 1].IsExport ||
+ Reference.Category != References[I + 1].Category))
ReferencesText += "\n";
}
}
-
- if (ReferencesInOrder && SymbolsInOrder)
+ llvm::StringRef PreviousText = getSourceText(InsertionPoint);
+ if (ReferencesText == PreviousText)
return {Result, 0};
- SourceRange InsertionPoint = References[0].Range;
- InsertionPoint.setEnd(References[References.size() - 1].Range.getEnd());
-
// The loop above might collapse previously existing line breaks between
// import blocks, and thus shrink the file. SortIncludes must not shrink
// overall source length as there is currently no re-calculation of ranges
@@ -177,17 +180,19 @@ public:
// This loop just backfills trailing spaces after the imports, which are
// harmless and will be stripped by the subsequent formatting pass.
// FIXME: A better long term fix is to re-calculate Ranges after sorting.
- unsigned PreviousSize = getSourceText(InsertionPoint).size();
+ unsigned PreviousSize = PreviousText.size();
while (ReferencesText.size() < PreviousSize) {
ReferencesText += " ";
}
// Separate references from the main code body of the file.
- if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2)
+ if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2 &&
+ !(FirstNonImportLine->First->is(tok::comment) &&
+ FirstNonImportLine->First->TokenText.trim() == "// clang-format on"))
ReferencesText += "\n";
LLVM_DEBUG(llvm::dbgs() << "Replacing imports:\n"
- << getSourceText(InsertionPoint) << "\nwith:\n"
+ << PreviousText << "\nwith:\n"
<< ReferencesText << "\n");
auto Err = Result.add(tooling::Replacement(
Env.getSourceManager(), CharSourceRange::getCharRange(InsertionPoint),
@@ -239,26 +244,99 @@ private:
SM.getFileOffset(End) - SM.getFileOffset(Begin));
}
- // Appends ``Reference`` to ``Buffer``, returning true if text within the
- // ``Reference`` changed (e.g. symbol order).
- bool appendReference(std::string &Buffer, JsModuleReference &Reference) {
+ // Sorts the given module references.
+ // Imports can have formatting disabled (FormattingOff), so the code below
+ // skips runs of "no-formatting" module references, and sorts/merges the
+ // references that have formatting enabled in individual chunks.
+ SmallVector<JsModuleReference, 16>
+ sortModuleReferences(const SmallVector<JsModuleReference, 16> &References) {
+ // Sort module references.
+ // Imports can have formatting disabled (FormattingOff), so the code below
+ // skips runs of "no-formatting" module references, and sorts other
+ // references per group.
+ const auto *Start = References.begin();
+ SmallVector<JsModuleReference, 16> ReferencesSorted;
+ while (Start != References.end()) {
+ while (Start != References.end() && Start->FormattingOff) {
+ // Skip over all imports w/ disabled formatting.
+ ReferencesSorted.push_back(*Start);
+ Start++;
+ }
+ SmallVector<JsModuleReference, 16> SortChunk;
+ while (Start != References.end() && !Start->FormattingOff) {
+ // Skip over all imports w/ disabled formatting.
+ SortChunk.push_back(*Start);
+ Start++;
+ }
+ llvm::stable_sort(SortChunk);
+ mergeModuleReferences(SortChunk);
+ ReferencesSorted.insert(ReferencesSorted.end(), SortChunk.begin(),
+ SortChunk.end());
+ }
+ return ReferencesSorted;
+ }
+
+ // Merge module references.
+ // After sorting, find all references that import named symbols from the
+ // same URL and merge their names. E.g.
+ // import {X} from 'a';
+ // import {Y} from 'a';
+ // should be rewritten to:
+ // import {X, Y} from 'a';
+ // Note: this modifies the passed in ``References`` vector (by removing no
+ // longer needed references).
+ void mergeModuleReferences(SmallVector<JsModuleReference, 16> &References) {
+ if (References.empty())
+ return;
+ JsModuleReference *PreviousReference = References.begin();
+ auto *Reference = std::next(References.begin());
+ while (Reference != References.end()) {
+ // Skip:
+ // import 'foo';
+ // import * as foo from 'foo'; on either previous or this.
+ // import Default from 'foo'; on either previous or this.
+ // mismatching
+ if (Reference->Category == JsModuleReference::SIDE_EFFECT ||
+ PreviousReference->Category == JsModuleReference::SIDE_EFFECT ||
+ Reference->IsExport != PreviousReference->IsExport ||
+ !PreviousReference->Prefix.empty() || !Reference->Prefix.empty() ||
+ !PreviousReference->DefaultImport.empty() ||
+ !Reference->DefaultImport.empty() || Reference->Symbols.empty() ||
+ PreviousReference->URL != Reference->URL) {
+ PreviousReference = Reference;
+ ++Reference;
+ continue;
+ }
+ // Merge symbols from identical imports.
+ PreviousReference->Symbols.append(Reference->Symbols);
+ PreviousReference->SymbolsMerged = true;
+ // Remove the merged import.
+ Reference = References.erase(Reference);
+ }
+ }
+
+ // Appends ``Reference`` to ``Buffer``.
+ void appendReference(std::string &Buffer, JsModuleReference &Reference) {
+ if (Reference.FormattingOff) {
+ Buffer +=
+ getSourceText(Reference.Range.getBegin(), Reference.Range.getEnd());
+ return;
+ }
// Sort the individual symbols within the import.
// E.g. `import {b, a} from 'x';` -> `import {a, b} from 'x';`
SmallVector<JsImportedSymbol, 1> Symbols = Reference.Symbols;
llvm::stable_sort(
Symbols, [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
- return LHS.Symbol.compare_lower(RHS.Symbol) < 0;
+ return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
});
- if (Symbols == Reference.Symbols) {
- // No change in symbol order.
+ if (!Reference.SymbolsMerged && Symbols == Reference.Symbols) {
+ // Symbols didn't change, just emit the entire module reference.
StringRef ReferenceStmt = getSourceText(Reference.Range);
Buffer += ReferenceStmt;
- return false;
+ return;
}
// Stitch together the module reference start...
- SourceLocation SymbolsStart = Reference.Symbols.front().Range.getBegin();
- SourceLocation SymbolsEnd = Reference.Symbols.back().Range.getEnd();
- Buffer += getSourceText(Reference.Range.getBegin(), SymbolsStart);
+ Buffer += getSourceText(Reference.Range.getBegin(), Reference.SymbolsStart);
// ... then the references in order ...
for (auto I = Symbols.begin(), E = Symbols.end(); I != E; ++I) {
if (I != Symbols.begin())
@@ -266,8 +344,7 @@ private:
Buffer += getSourceText(I->Range);
}
// ... followed by the module reference end.
- Buffer += getSourceText(SymbolsEnd, Reference.Range.getEnd());
- return true;
+ Buffer += getSourceText(Reference.SymbolsEnd, Reference.Range.getEnd());
}
// Parses module references in the given lines. Returns the module references,
@@ -280,9 +357,30 @@ private:
SourceLocation Start;
AnnotatedLine *FirstNonImportLine = nullptr;
bool AnyImportAffected = false;
- for (auto Line : AnnotatedLines) {
+ bool FormattingOff = false;
+ for (auto *Line : AnnotatedLines) {
Current = Line->First;
LineEnd = Line->Last;
+ // clang-format comments toggle formatting on/off.
+ // This is tracked in FormattingOff here and on JsModuleReference.
+ while (Current && Current->is(tok::comment)) {
+ StringRef CommentText = Current->TokenText.trim();
+ if (CommentText == "// clang-format off") {
+ FormattingOff = true;
+ } else if (CommentText == "// clang-format on") {
+ FormattingOff = false;
+ // Special case: consider a trailing "clang-format on" line to be part
+ // of the module reference, so that it gets moved around together with
+ // it (as opposed to the next module reference, which might get sorted
+ // around).
+ if (!References.empty()) {
+ References.back().Range.setEnd(Current->Tok.getEndLoc());
+ Start = Current->Tok.getEndLoc().getLocWithOffset(1);
+ }
+ }
+ // Handle all clang-format comments on a line, e.g. for an empty block.
+ Current = Current->Next;
+ }
skipComments();
if (Start.isInvalid() || References.empty())
// After the first file level comment, consider line comments to be part
@@ -295,6 +393,7 @@ private:
continue;
}
JsModuleReference Reference;
+ Reference.FormattingOff = FormattingOff;
Reference.Range.setBegin(Start);
if (!parseModuleReference(Keywords, Reference)) {
if (!FirstNonImportLine)
@@ -306,13 +405,14 @@ private:
Reference.Range.setEnd(LineEnd->Tok.getEndLoc());
LLVM_DEBUG({
llvm::dbgs() << "JsModuleReference: {"
- << "is_export: " << Reference.IsExport
+ << "formatting_off: " << Reference.FormattingOff
+ << ", is_export: " << Reference.IsExport
<< ", cat: " << Reference.Category
<< ", url: " << Reference.URL
<< ", prefix: " << Reference.Prefix;
- for (size_t i = 0; i < Reference.Symbols.size(); ++i)
- llvm::dbgs() << ", " << Reference.Symbols[i].Symbol << " as "
- << Reference.Symbols[i].Alias;
+ for (size_t I = 0; I < Reference.Symbols.size(); ++I)
+ llvm::dbgs() << ", " << Reference.Symbols[I].Symbol << " as "
+ << Reference.Symbols[I].Alias;
llvm::dbgs() << ", text: " << getSourceText(Reference.Range);
llvm::dbgs() << "}\n";
});
@@ -393,7 +493,9 @@ private:
bool parseNamedBindings(const AdditionalKeywords &Keywords,
JsModuleReference &Reference) {
+ // eat a potential "import X, " prefix.
if (Current->is(tok::identifier)) {
+ Reference.DefaultImport = Current->TokenText;
nextToken();
if (Current->is(Keywords.kw_from))
return true;
@@ -405,6 +507,7 @@ private:
return false;
// {sym as alias, sym2 as ...} from '...';
+ Reference.SymbolsStart = Current->Tok.getEndLoc();
while (Current->isNot(tok::r_brace)) {
nextToken();
if (Current->is(tok::r_brace))
@@ -432,6 +535,11 @@ private:
if (!Current->isOneOf(tok::r_brace, tok::comma))
return false;
}
+ Reference.SymbolsEnd = Current->Tok.getLocation();
+ // For named imports with a trailing comma ("import {X,}"), consider the
+ // comma to be the end of the import list, so that it doesn't get removed.
+ if (Current->Previous->is(tok::comma))
+ Reference.SymbolsEnd = Current->Previous->Tok.getLocation();
nextToken(); // consume r_brace
return true;
}
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 34c291ecc492..54e6c7d38e7d 100755..100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -116,10 +116,17 @@ private:
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
// Try to do a better job at looking for ">>" within the condition of
- // a statement.
+ // a statement. Conservatively insert spaces between consecutive ">"
+ // tokens to prevent splitting right bitshift operators and potentially
+ // altering program semantics. This check is overly conservative and
+ // will prevent spaces from being inserted in select nested template
+ // parameter cases, but should not alter program semantics.
if (CurrentToken->Next && CurrentToken->Next->is(tok::greater) &&
Left->ParentBracket != tok::less &&
- isKeywordWithCondition(*Line.First))
+ (isKeywordWithCondition(*Line.First) ||
+ CurrentToken->getStartOfNonWhitespace() ==
+ CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset(
+ -1)))
return false;
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
@@ -222,7 +229,19 @@ private:
}
if (Left->is(TT_OverloadedOperatorLParen)) {
- Contexts.back().IsExpression = false;
+ // Find the previous kw_operator token.
+ FormatToken *Prev = Left;
+ while (!Prev->is(tok::kw_operator)) {
+ Prev = Prev->Previous;
+ assert(Prev && "Expect a kw_operator prior to the OperatorLParen!");
+ }
+
+ // If faced with "a.operator*(argument)" or "a->operator*(argument)",
+ // i.e. the operator is called as a member function,
+ // then the argument must be an expression.
+ bool OperatorCalledAsMemberFunction =
+ Prev->Previous && Prev->Previous->isOneOf(tok::period, tok::arrow);
+ Contexts.back().IsExpression = OperatorCalledAsMemberFunction;
} else if (Style.Language == FormatStyle::LK_JavaScript &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
@@ -344,7 +363,7 @@ private:
Left->Previous && Left->Previous->is(tok::l_paren)) {
// Detect the case where macros are used to generate lambdas or
// function bodies, e.g.:
- // auto my_lambda = MARCO((Type *type, int i) { .. body .. });
+ // auto my_lambda = MACRO((Type *type, int i) { .. body .. });
for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) {
if (Tok->is(TT_BinaryOperator) &&
Tok->isOneOf(tok::star, tok::amp, tok::ampamp))
@@ -390,9 +409,13 @@ private:
!CurrentToken->Next->HasUnescapedNewline &&
!CurrentToken->Next->isTrailingComment())
HasMultipleParametersOnALine = true;
+ bool ProbablyFunctionTypeLParen =
+ (CurrentToken->is(tok::l_paren) && CurrentToken->Next &&
+ CurrentToken->Next->isOneOf(tok::star, tok::amp, tok::caret));
if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) ||
CurrentToken->Previous->isSimpleTypeSpecifier()) &&
- !CurrentToken->is(tok::l_brace))
+ !(CurrentToken->is(tok::l_brace) ||
+ (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen)))
Contexts.back().IsExpression = false;
if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
@@ -722,6 +745,21 @@ private:
return false;
}
+ bool couldBeInStructArrayInitializer() const {
+ if (Contexts.size() < 2)
+ return false;
+ // We want to back up no more then 2 context levels i.e.
+ // . { { <-
+ const auto End = std::next(Contexts.rbegin(), 2);
+ auto Last = Contexts.rbegin();
+ unsigned Depth = 0;
+ for (; Last != End; ++Last) {
+ if (Last->ContextKind == tok::l_brace)
+ ++Depth;
+ }
+ return Depth == 2 && Last->ContextKind != tok::l_brace;
+ }
+
bool parseBrace() {
if (CurrentToken) {
FormatToken *Left = CurrentToken->Previous;
@@ -739,10 +777,17 @@ private:
Left->Previous->is(TT_JsTypeColon))
Contexts.back().IsExpression = false;
+ unsigned CommaCount = 0;
while (CurrentToken) {
if (CurrentToken->is(tok::r_brace)) {
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
+ if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
+ if (Left->ParentBracket == tok::l_brace &&
+ couldBeInStructArrayInitializer() && CommaCount > 0) {
+ Contexts.back().InStructArrayInitializer = true;
+ }
+ }
next();
return true;
}
@@ -766,9 +811,11 @@ private:
Style.Language == FormatStyle::LK_JavaScript)
Left->setType(TT_DictLiteral);
}
- if (CurrentToken->is(tok::comma) &&
- Style.Language == FormatStyle::LK_JavaScript)
- Left->setType(TT_DictLiteral);
+ if (CurrentToken->is(tok::comma)) {
+ if (Style.Language == FormatStyle::LK_JavaScript)
+ Left->setType(TT_DictLiteral);
+ ++CommaCount;
+ }
if (!consumeToken())
return false;
}
@@ -1048,13 +1095,6 @@ private:
CurrentToken->Previous->setType(TT_OverloadedOperator);
break;
case tok::question:
- if (Tok->is(TT_CSharpNullConditionalLSquare)) {
- if (!parseSquare())
- return false;
- break;
- }
- if (Tok->isOneOf(TT_CSharpNullConditional, TT_CSharpNullCoalescing))
- break;
if (Style.Language == FormatStyle::LK_JavaScript && Tok->Next &&
Tok->Next->isOneOf(tok::semi, tok::comma, tok::colon, tok::r_paren,
tok::r_brace)) {
@@ -1339,6 +1379,12 @@ public:
return LT_ObjCMethodDecl;
}
+ for (const auto &ctx : Contexts) {
+ if (ctx.InStructArrayInitializer) {
+ return LT_ArrayOfStructInitializer;
+ }
+ }
+
return LT_Other;
}
@@ -1363,9 +1409,9 @@ private:
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
if (!CurrentToken->isOneOf(
- TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro,
+ TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro,
TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
- TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_JsFatArrow,
+ TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
TT_UntouchableMacroFunc, TT_ConstraintJunctions,
@@ -1414,6 +1460,7 @@ private:
bool IsForEachMacro = false;
bool InCpp11AttributeSpecifier = false;
bool InCSharpAttributeSpecifier = false;
+ bool InStructArrayInitializer = false;
};
/// Puts a new \c Context onto the stack \c Contexts for the lifetime
@@ -1429,7 +1476,16 @@ private:
P.Contexts.back().IsExpression));
}
- ~ScopedContextCreator() { P.Contexts.pop_back(); }
+ ~ScopedContextCreator() {
+ if (P.Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
+ if (P.Contexts.back().InStructArrayInitializer) {
+ P.Contexts.pop_back();
+ P.Contexts.back().InStructArrayInitializer = true;
+ return;
+ }
+ }
+ P.Contexts.pop_back();
+ }
};
void modifyContext(const FormatToken &Current) {
@@ -1564,39 +1620,29 @@ private:
// The token type is already known.
return;
- if (Style.isCSharp() && CurrentToken->is(tok::question)) {
- if (CurrentToken->TokenText == "??") {
- Current.setType(TT_CSharpNullCoalescing);
- return;
- }
- if (CurrentToken->TokenText == "?.") {
- Current.setType(TT_CSharpNullConditional);
- return;
- }
- if (CurrentToken->TokenText == "?[") {
- Current.setType(TT_CSharpNullConditionalLSquare);
- return;
- }
- }
-
- if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Current.is(tok::exclaim)) {
- if (Current.Previous &&
- (Keywords.IsJavaScriptIdentifier(
- *Current.Previous, /* AcceptIdentifierName= */ true) ||
- Current.Previous->isOneOf(
- tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
- Keywords.kw_type, Keywords.kw_get, Keywords.kw_set) ||
- Current.Previous->Tok.isLiteral())) {
- Current.setType(TT_JsNonNullAssertion);
- return;
- }
- if (Current.Next &&
- Current.Next->isOneOf(TT_BinaryOperator, Keywords.kw_as)) {
- Current.setType(TT_JsNonNullAssertion);
+ if ((Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) &&
+ Current.is(tok::exclaim)) {
+ if (Current.Previous) {
+ bool IsIdentifier =
+ Style.Language == FormatStyle::LK_JavaScript
+ ? Keywords.IsJavaScriptIdentifier(
+ *Current.Previous, /* AcceptIdentifierName= */ true)
+ : Current.Previous->is(tok::identifier);
+ if (IsIdentifier ||
+ Current.Previous->isOneOf(
+ tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
+ tok::kw_false, tok::kw_true, Keywords.kw_type, Keywords.kw_get,
+ Keywords.kw_set) ||
+ Current.Previous->Tok.isLiteral()) {
+ Current.setType(TT_NonNullAssertion);
return;
}
}
+ if (Current.Next &&
+ Current.Next->isOneOf(TT_BinaryOperator, Keywords.kw_as)) {
+ Current.setType(TT_NonNullAssertion);
+ return;
+ }
}
// Line.MightBeFunctionDecl can only be true after the parentheses of a
@@ -1917,12 +1963,12 @@ private:
if (Tok.Next->isOneOf(tok::identifier, tok::kw_this))
return true;
- if (Tok.Next->is(tok::l_paren) &&
- !(Tok.Previous && Tok.Previous->is(tok::identifier) &&
- Tok.Previous->Previous &&
- Tok.Previous->Previous->isOneOf(tok::arrowstar, tok::arrow,
- tok::star)))
- return true;
+ // Look for a cast `( x ) (`.
+ if (Tok.Next->is(tok::l_paren) && Tok.Previous && Tok.Previous->Previous) {
+ if (Tok.Previous->is(tok::identifier) &&
+ Tok.Previous->Previous->is(tok::l_paren))
+ return true;
+ }
if (!Tok.Next->Next)
return false;
@@ -2184,7 +2230,7 @@ private:
return prec::Assignment;
if (Current->is(TT_LambdaArrow))
return prec::Comma;
- if (Current->is(TT_JsFatArrow))
+ if (Current->is(TT_FatArrow))
return prec::Assignment;
if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName) ||
(Current->is(tok::comment) && NextNonComment &&
@@ -2430,6 +2476,14 @@ static bool isFunctionDeclarationName(const FormatToken &Current,
if (Next->MatchingParen->Next &&
Next->MatchingParen->Next->is(TT_PointerOrReference))
return true;
+ // Check for K&R C function definitions, e.g.:
+ // int f(i)
+ // {
+ // return i + 1;
+ // }
+ if (Next->Next && Next->Next->is(tok::identifier) &&
+ !(Next->MatchingParen->Next && Next->MatchingParen->Next->is(tok::semi)))
+ return true;
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
Tok = Tok->Next) {
if (Tok->is(TT_TypeDeclarationParen))
@@ -2483,6 +2537,12 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
: Line.FirstStartColumn + Line.First->ColumnWidth;
FormatToken *Current = Line.First->Next;
bool InFunctionDecl = Line.MightBeFunctionDecl;
+ bool AlignArrayOfStructures =
+ (Style.AlignArrayOfStructures != FormatStyle::AIAS_None &&
+ Line.Type == LT_ArrayOfStructInitializer);
+ if (AlignArrayOfStructures)
+ calculateArrayInitializerColumnList(Line);
+
while (Current) {
if (isFunctionDeclarationName(*Current, Line))
Current->setType(TT_FunctionDeclarationName);
@@ -2602,6 +2662,45 @@ void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
}
}
+void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) {
+ if (Line.First == Line.Last) {
+ return;
+ }
+ auto *CurrentToken = Line.First;
+ CurrentToken->ArrayInitializerLineStart = true;
+ unsigned Depth = 0;
+ while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ if (CurrentToken->is(tok::l_brace)) {
+ CurrentToken->IsArrayInitializer = true;
+ if (CurrentToken->Next != nullptr)
+ CurrentToken->Next->MustBreakBefore = true;
+ CurrentToken =
+ calculateInitializerColumnList(Line, CurrentToken->Next, Depth + 1);
+ } else {
+ CurrentToken = CurrentToken->Next;
+ }
+ }
+}
+
+FormatToken *TokenAnnotator::calculateInitializerColumnList(
+ AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) {
+ while (CurrentToken != nullptr && CurrentToken != Line.Last) {
+ if (CurrentToken->is(tok::l_brace))
+ ++Depth;
+ else if (CurrentToken->is(tok::r_brace))
+ --Depth;
+ if (Depth == 2 && CurrentToken->isOneOf(tok::l_brace, tok::comma)) {
+ CurrentToken = CurrentToken->Next;
+ if (CurrentToken == nullptr)
+ break;
+ CurrentToken->StartsColumn = true;
+ CurrentToken = CurrentToken->Previous;
+ }
+ CurrentToken = CurrentToken->Next;
+ }
+ return CurrentToken;
+}
+
unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
const FormatToken &Tok,
bool InFunctionDecl) {
@@ -2809,6 +2908,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Right) {
if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
return true;
+ if (Style.isJson() && Left.is(tok::string_literal) && Right.is(tok::colon))
+ return false;
if (Left.is(Keywords.kw_assert) && Style.Language == FormatStyle::LK_Java)
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
@@ -2897,16 +2998,17 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
(Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier()))
return true;
- return (Left.Tok.isLiteral() ||
- (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
- (Style.PointerAlignment != FormatStyle::PAS_Left ||
- (Line.IsMultiVariableDeclStmt &&
- (Left.NestingLevel == 0 ||
- (Left.NestingLevel == 1 && Line.First->is(tok::kw_for)))))));
+ return (
+ Left.Tok.isLiteral() ||
+ (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ (getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left ||
+ (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == 0 ||
+ (Left.NestingLevel == 1 && Line.First->is(tok::kw_for)))))));
}
if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
(!Left.is(TT_PointerOrReference) ||
- (Style.PointerAlignment != FormatStyle::PAS_Right &&
+ (getTokenPointerOrReferenceAlignment(Left) != FormatStyle::PAS_Right &&
!Line.IsMultiVariableDeclStmt)))
return true;
if (Left.is(TT_PointerOrReference)) {
@@ -2922,7 +3024,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Right.is(tok::l_brace) && Right.is(BK_Block)) ||
(!Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
tok::l_paren) &&
- (Style.PointerAlignment != FormatStyle::PAS_Right &&
+ (getTokenPointerOrReferenceAlignment(Left) !=
+ FormatStyle::PAS_Right &&
!Line.IsMultiVariableDeclStmt) &&
Left.Previous &&
!Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
@@ -2957,6 +3060,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Space between the type and the * in:
// operator void*()
// operator char*()
+ // operator void const*()
+ // operator void volatile*()
// operator /*comment*/ const char*()
// operator volatile /*comment*/ char*()
// operator Foo*()
@@ -2964,11 +3069,15 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// operator std::Foo*()
// operator C<T>::D<U>*()
// dependent on PointerAlignment style.
- if (Previous &&
- (Previous->endsSequence(tok::kw_operator) ||
- Previous->endsSequence(tok::kw_const, tok::kw_operator) ||
- Previous->endsSequence(tok::kw_volatile, tok::kw_operator)))
- return (Style.PointerAlignment != FormatStyle::PAS_Left);
+ if (Previous) {
+ if (Previous->endsSequence(tok::kw_operator))
+ return (Style.PointerAlignment != FormatStyle::PAS_Left);
+ if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile))
+ return (Style.PointerAlignment != FormatStyle::PAS_Left) ||
+ (Style.SpaceAroundPointerQualifiers ==
+ FormatStyle::SAPQ_After) ||
+ (Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both);
+ }
}
const auto SpaceRequiredForArrayInitializerLSquare =
[](const FormatToken &LSquareTok, const FormatStyle &Style) {
@@ -3023,9 +3132,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
return true;
if (Style.SpaceBeforeParens ==
- FormatStyle::SBPO_ControlStatementsExceptForEachMacros &&
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros &&
Left.is(TT_ForEachMacro))
return false;
+ if (Style.SpaceBeforeParens ==
+ FormatStyle::SBPO_ControlStatementsExceptControlMacros &&
+ Left.is(TT_IfMacro))
+ return false;
return Line.Type == LT_ObjCDecl || Left.is(tok::semi) ||
(Style.SpaceBeforeParens != FormatStyle::SBPO_Never &&
(Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while,
@@ -3081,13 +3194,16 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// Match const and volatile ref-qualifiers without any additional
// qualifiers such as
// void Fn() const &;
- return Style.PointerAlignment != FormatStyle::PAS_Left;
+ return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
return true;
}
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
+ auto HasExistingWhitespace = [&Right]() {
+ return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
+ };
if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
return true; // Never ever merge two identifiers.
if (Style.isCpp()) {
@@ -3120,7 +3236,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Preserve the existence of a space before a percent for cases like 0x%04x
// and "%d %d"
if (Left.is(tok::numeric_constant) && Right.is(tok::percent))
- return Right.WhitespaceRange.getEnd() != Right.WhitespaceRange.getBegin();
+ return HasExistingWhitespace();
+ } else if (Style.isJson()) {
+ if (Right.is(tok::colon))
+ return false;
} else if (Style.isCSharp()) {
// Require spaces around '{' and before '}' unless they appear in
// interpolated strings. Interpolated strings are merged into a single token
@@ -3146,7 +3265,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
// Spaces around '=>'.
- if (Left.is(TT_JsFatArrow) || Right.is(TT_JsFatArrow))
+ if (Left.is(TT_FatArrow) || Right.is(TT_FatArrow))
return true;
// No spaces around attribute target colons
@@ -3165,30 +3284,14 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Right.is(TT_CSharpNullable))
return false;
- // Require space after ? in nullable types except in generics and casts.
- if (Left.is(TT_CSharpNullable))
- return !Right.isOneOf(TT_TemplateCloser, tok::r_paren);
-
- // No space before or after '?.'.
- if (Left.is(TT_CSharpNullConditional) || Right.is(TT_CSharpNullConditional))
- return false;
-
- // Space before and after '??'.
- if (Left.is(TT_CSharpNullCoalescing) || Right.is(TT_CSharpNullCoalescing))
- return true;
-
- // No space before '?['.
- if (Right.is(TT_CSharpNullConditionalLSquare))
+ // No space before null forgiving '!'.
+ if (Right.is(TT_NonNullAssertion))
return false;
// No space between consecutive commas '[,,]'.
if (Left.is(tok::comma) && Right.is(tok::comma))
return false;
- // Possible space inside `?[ 0 ]`.
- if (Left.is(TT_CSharpNullConditionalLSquare))
- return Style.SpacesInSquareBrackets;
-
// space after var in `var (key, value)`
if (Left.is(Keywords.kw_var) && Right.is(tok::l_paren))
return true;
@@ -3210,7 +3313,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Right.is(tok::l_paren))
return true;
} else if (Style.Language == FormatStyle::LK_JavaScript) {
- if (Left.is(TT_JsFatArrow))
+ if (Left.is(TT_FatArrow))
return true;
// for await ( ...
if (Right.is(tok::l_paren) && Left.is(Keywords.kw_await) && Left.Previous &&
@@ -3221,7 +3324,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
const FormatToken *Next = Right.MatchingParen->getNextNonComment();
// An async arrow function, for example: `x = async () => foo();`,
// as opposed to calling a function called async: `x = async();`
- if (Next && Next->is(TT_JsFatArrow))
+ if (Next && Next->is(TT_FatArrow))
return true;
}
if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
@@ -3295,9 +3398,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// locations that should have whitespace following are identified by the
// above set of follower tokens.
return false;
- if (Right.is(TT_JsNonNullAssertion))
+ if (Right.is(TT_NonNullAssertion))
return false;
- if (Left.is(TT_JsNonNullAssertion) &&
+ if (Left.is(TT_NonNullAssertion) &&
Right.isOneOf(Keywords.kw_as, Keywords.kw_in))
return true; // "x! as string", "x! in y"
} else if (Style.Language == FormatStyle::LK_Java) {
@@ -3313,7 +3416,7 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
if (Left.is(TT_ImplicitStringLiteral))
- return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
+ return HasExistingWhitespace();
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
@@ -3370,6 +3473,12 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Style.BitFieldColonSpacing == FormatStyle::BFCS_Before;
return true;
}
+ // Do not merge "- -" into "--".
+ if ((Left.isOneOf(tok::minus, tok::minusminus) &&
+ Right.isOneOf(tok::minus, tok::minusminus)) ||
+ (Left.isOneOf(tok::plus, tok::plusplus) &&
+ Right.isOneOf(tok::plus, tok::plusplus)))
+ return true;
if (Left.is(TT_UnaryOperator)) {
if (!Right.is(tok::l_paren)) {
// The alternative operators for ~ and ! are "compl" and "not".
@@ -3394,12 +3503,21 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
+ auto ShouldAddSpacesInAngles = [this, &HasExistingWhitespace]() {
+ if (this->Style.SpacesInAngles == FormatStyle::SIAS_Always)
+ return true;
+ if (this->Style.SpacesInAngles == FormatStyle::SIAS_Leave)
+ return HasExistingWhitespace();
+ return false;
+ };
+
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral)))
return !Style.Cpp11BracedListStyle;
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
- (Style.Standard < FormatStyle::LS_Cpp11 || Style.SpacesInAngles);
+ ((Style.Standard < FormatStyle::LS_Cpp11) ||
+ ShouldAddSpacesInAngles());
}
if (Right.isOneOf(tok::arrow, tok::arrowstar, tok::periodstar) ||
Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
@@ -3415,26 +3533,27 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
// this turns out to be too lenient, add analysis of the identifier itself.
- return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
+ return HasExistingWhitespace();
if (Right.is(tok::coloncolon) &&
!Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
// Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
- (Style.Standard < FormatStyle::LS_Cpp11 || Style.SpacesInAngles)) ||
+ ((Style.Standard < FormatStyle::LS_Cpp11) ||
+ ShouldAddSpacesInAngles())) ||
!(Left.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
tok::kw___super, TT_TemplateOpener,
TT_TemplateCloser)) ||
(Left.is(tok::l_paren) && Style.SpacesInParentheses);
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
- return Style.SpacesInAngles;
+ return ShouldAddSpacesInAngles();
// Space before TT_StructuredBindingLSquare.
if (Right.is(TT_StructuredBindingLSquare))
return !Left.isOneOf(tok::amp, tok::ampamp) ||
- Style.PointerAlignment != FormatStyle::PAS_Right;
+ getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right;
// Space before & or && following a TT_StructuredBindingLSquare.
if (Right.Next && Right.Next->is(TT_StructuredBindingLSquare) &&
Right.isOneOf(tok::amp, tok::ampamp))
- return Style.PointerAlignment != FormatStyle::PAS_Left;
+ return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
(Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
!Right.is(tok::r_paren)))
@@ -3473,42 +3592,11 @@ isItAnEmptyLambdaAllowed(const FormatToken &Tok,
return Tok.Children.empty() && ShortLambdaOption != FormatStyle::SLS_None;
}
-static bool
-isItAInlineLambdaAllowed(const FormatToken &Tok,
- FormatStyle::ShortLambdaStyle ShortLambdaOption) {
- return (ShortLambdaOption == FormatStyle::SLS_Inline &&
- IsFunctionArgument(Tok)) ||
- (ShortLambdaOption == FormatStyle::SLS_All);
-}
-
-static bool isOneChildWithoutMustBreakBefore(const FormatToken &Tok) {
- if (Tok.Children.size() != 1)
- return false;
- FormatToken *curElt = Tok.Children[0]->First;
- while (curElt) {
- if (curElt->MustBreakBefore)
- return false;
- curElt = curElt->Next;
- }
- return true;
-}
static bool isAllmanLambdaBrace(const FormatToken &Tok) {
return (Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
!Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
}
-static bool isAllmanBraceIncludedBreakableLambda(
- const FormatToken &Tok, FormatStyle::ShortLambdaStyle ShortLambdaOption) {
- if (!isAllmanLambdaBrace(Tok))
- return false;
-
- if (isItAnEmptyLambdaAllowed(Tok, ShortLambdaOption))
- return false;
-
- return !isItAInlineLambdaAllowed(Tok, ShortLambdaOption) ||
- !isOneChildWithoutMustBreakBefore(Tok);
-}
-
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken &Right) {
const FormatToken &Left = *Right.Previous;
@@ -3521,6 +3609,17 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return false;
if (Right.is(TT_CSharpGenericTypeConstraint))
return true;
+
+ // Break after C# [...] and before public/protected/private/internal.
+ if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
+ (Right.isAccessSpecifier(/*ColonRequired=*/false) ||
+ Right.is(Keywords.kw_internal)))
+ return true;
+ // Break between ] and [ but only when there are really 2 attributes.
+ if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
+ Left.is(tok::r_square) && Right.is(tok::l_square))
+ return true;
+
} else if (Style.Language == FormatStyle::LK_JavaScript) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
@@ -3545,7 +3644,7 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// instead of bin-packing.
return true;
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
- Left.Previous->is(TT_JsFatArrow)) {
+ Left.Previous->is(TT_FatArrow)) {
// JS arrow function (=> {...}).
switch (Style.AllowShortLambdasOnASingleLine) {
case FormatStyle::SLS_All:
@@ -3584,6 +3683,26 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
+ // Basic JSON newline processing.
+ if (Style.isJson()) {
+ // Always break after a JSON record opener.
+ // {
+ // }
+ if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace))
+ return true;
+ // Always break after a JSON array opener.
+ // [
+ // ]
+ if (Left.is(TT_ArrayInitializerLSquare) && Left.is(tok::l_square) &&
+ !Right.is(tok::r_square))
+ return true;
+ // Always break afer successive entries.
+ // 1,
+ // 2
+ if (Left.is(tok::comma))
+ return true;
+ }
+
// If the last token before a '}', ']', or ')' is a comma or a trailing
// comment, the intention is to insert a line break after it in order to make
// shuffling around entries easier. Import statements, especially in
@@ -3640,6 +3759,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
Right.is(TT_InheritanceComma))
return true;
+ if (Style.BreakInheritanceList == FormatStyle::BILS_AfterComma &&
+ Left.is(TT_InheritanceComma))
+ return true;
if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
// Multiline raw string literals are special wrt. line breaks. The author
// has made a deliberate choice and might have aligned the contents of the
@@ -3656,13 +3778,6 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_InlineASMBrace))
return Right.HasUnescapedNewline;
- auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
- if (Style.BraceWrapping.BeforeLambdaBody &&
- (isAllmanBraceIncludedBreakableLambda(Left, ShortLambdaOption) ||
- isAllmanBraceIncludedBreakableLambda(Right, ShortLambdaOption))) {
- return true;
- }
-
if (isAllmanBrace(Left) || isAllmanBrace(Right))
return (Line.startsWith(tok::kw_enum) && Style.BraceWrapping.AfterEnum) ||
(Line.startsWith(tok::kw_typedef, tok::kw_enum) &&
@@ -3685,6 +3800,11 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
return true;
}
+ if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace) &&
+ Left.isOneOf(tok::star, tok::amp, tok::ampamp, TT_TemplateCloser)) {
+ return true;
+ }
+
// Put multiple Java annotation on a new line.
if ((Style.Language == FormatStyle::LK_Java ||
Style.Language == FormatStyle::LK_JavaScript) &&
@@ -3819,6 +3939,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// Only break after commas for generic type constraints.
if (Line.First->is(TT_CSharpGenericTypeConstraint))
return Left.is(TT_CSharpGenericTypeConstraintComma);
+ // Keep nullable operators attached to their identifiers.
+ if (Right.is(TT_CSharpNullable)) {
+ return false;
+ }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
Keywords.kw_implements))
@@ -3841,7 +3965,10 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Left.isOneOf(tok::r_square, tok::r_paren)) &&
Right.isOneOf(tok::l_square, tok::l_paren))
return false; // Otherwise automatic semicolon insertion would trigger.
- if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
+ if (NonComment && NonComment->is(tok::identifier) &&
+ NonComment->TokenText == "asserts")
+ return false;
+ if (Left.is(TT_FatArrow) && Right.is(tok::l_brace))
return false;
if (Left.is(TT_JsTypeColon))
return true;
@@ -3877,7 +4004,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
}
if (Left.is(Keywords.kw_as))
return true;
- if (Left.is(TT_JsNonNullAssertion))
+ if (Left.is(TT_NonNullAssertion))
return true;
if (Left.is(Keywords.kw_declare) &&
Right.isOneOf(Keywords.kw_module, tok::kw_namespace,
@@ -3909,7 +4036,8 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return !Right.is(tok::l_paren);
if (Right.is(TT_PointerOrReference))
return Line.IsMultiVariableDeclStmt ||
- (Style.PointerAlignment == FormatStyle::PAS_Right &&
+ (getTokenPointerOrReferenceAlignment(Right) ==
+ FormatStyle::PAS_Right &&
(!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName)));
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator))
@@ -4081,7 +4209,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
- if (Style.BraceWrapping.BeforeLambdaBody) {
+ if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) {
if (isAllmanLambdaBrace(Left))
return !isItAnEmptyLambdaAllowed(Left, ShortLambdaOption);
if (isAllmanLambdaBrace(Right))
@@ -4093,7 +4221,6 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Right.isMemberAccess() ||
Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow, tok::lessless,
tok::colon, tok::l_square, tok::at) ||
- (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) ||
(Left.is(tok::r_paren) &&
Right.isOneOf(tok::identifier, tok::kw_const)) ||
(Left.is(tok::l_paren) && !Right.is(tok::r_paren)) ||
@@ -4124,5 +4251,41 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
llvm::errs() << "----\n";
}
+FormatStyle::PointerAlignmentStyle
+TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) {
+ assert(Reference.isOneOf(tok::amp, tok::ampamp));
+ switch (Style.ReferenceAlignment) {
+ case FormatStyle::RAS_Pointer:
+ return Style.PointerAlignment;
+ case FormatStyle::RAS_Left:
+ return FormatStyle::PAS_Left;
+ case FormatStyle::RAS_Right:
+ return FormatStyle::PAS_Right;
+ case FormatStyle::RAS_Middle:
+ return FormatStyle::PAS_Middle;
+ }
+ assert(0); //"Unhandled value of ReferenceAlignment"
+ return Style.PointerAlignment;
+}
+
+FormatStyle::PointerAlignmentStyle
+TokenAnnotator::getTokenPointerOrReferenceAlignment(
+ const FormatToken &PointerOrReference) {
+ if (PointerOrReference.isOneOf(tok::amp, tok::ampamp)) {
+ switch (Style.ReferenceAlignment) {
+ case FormatStyle::RAS_Pointer:
+ return Style.PointerAlignment;
+ case FormatStyle::RAS_Left:
+ return FormatStyle::PAS_Left;
+ case FormatStyle::RAS_Right:
+ return FormatStyle::PAS_Right;
+ case FormatStyle::RAS_Middle:
+ return FormatStyle::PAS_Middle;
+ }
+ }
+ assert(PointerOrReference.is(tok::star));
+ return Style.PointerAlignment;
+}
+
} // namespace format
} // namespace clang
diff --git a/clang/lib/Format/TokenAnnotator.h b/clang/lib/Format/TokenAnnotator.h
index 537710029b00..0f9c02dbeb34 100644
--- a/clang/lib/Format/TokenAnnotator.h
+++ b/clang/lib/Format/TokenAnnotator.h
@@ -31,7 +31,8 @@ enum LineType {
LT_ObjCProperty, // An @property line.
LT_Other,
LT_PreprocessorDirective,
- LT_VirtualFunctionDecl
+ LT_VirtualFunctionDecl,
+ LT_ArrayOfStructInitializer,
};
class AnnotatedLine {
@@ -189,6 +190,17 @@ private:
void calculateUnbreakableTailLengths(AnnotatedLine &Line);
+ void calculateArrayInitializerColumnList(AnnotatedLine &Line);
+
+ FormatToken *calculateInitializerColumnList(AnnotatedLine &Line,
+ FormatToken *CurrentToken,
+ unsigned Depth);
+ FormatStyle::PointerAlignmentStyle
+ getTokenReferenceAlignment(const FormatToken &PointerOrReference);
+
+ FormatStyle::PointerAlignmentStyle
+ getTokenPointerOrReferenceAlignment(const FormatToken &PointerOrReference);
+
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
diff --git a/clang/lib/Format/UnwrappedLineFormatter.cpp b/clang/lib/Format/UnwrappedLineFormatter.cpp
index d1138bbc9c36..cca85c1074de 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -57,7 +57,9 @@ public:
while (IndentForLevel.size() <= Line.Level)
IndentForLevel.push_back(-1);
if (Line.InPPDirective) {
- Indent = Line.Level * Style.IndentWidth + AdditionalIndent;
+ unsigned IndentWidth =
+ (Style.PPIndentWidth >= 0) ? Style.PPIndentWidth : Style.IndentWidth;
+ Indent = Line.Level * IndentWidth + AdditionalIndent;
} else {
IndentForLevel.resize(Line.Level + 1);
Indent = getIndent(IndentForLevel, Line.Level);
@@ -101,8 +103,13 @@ private:
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
(RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
- RootToken.Next && RootToken.Next->is(tok::colon)))
- return Style.AccessModifierOffset;
+ RootToken.Next && RootToken.Next->is(tok::colon))) {
+ // The AccessModifierOffset may be overriden by IndentAccessModifiers,
+ // in which case we take a negative value of the IndentWidth to simulate
+ // the upper indent level.
+ return Style.IndentAccessModifiers ? -Style.IndentWidth
+ : Style.AccessModifierOffset;
+ }
return 0;
}
@@ -371,7 +378,7 @@ private:
if (Previous->is(tok::comment))
Previous = Previous->getPreviousNonComment();
if (Previous) {
- if (Previous->is(tok::greater))
+ if (Previous->is(tok::greater) && !I[-1]->InPPDirective)
return 0;
if (Previous->is(tok::identifier)) {
const FormatToken *PreviousPrevious =
@@ -416,7 +423,17 @@ private:
}
return MergedLines;
}
- if (TheLine->First->is(tok::kw_if)) {
+ auto IsElseLine = [&TheLine]() -> bool {
+ const FormatToken *First = TheLine->First;
+ if (First->is(tok::kw_else))
+ return true;
+
+ return First->is(tok::r_brace) && First->Next &&
+ First->Next->is(tok::kw_else);
+ };
+ if (TheLine->First->is(tok::kw_if) ||
+ (IsElseLine() && (Style.AllowShortIfStatementsOnASingleLine ==
+ FormatStyle::SIS_AllIfsAndElse))) {
return Style.AllowShortIfStatementsOnASingleLine
? tryMergeSimpleControlStatement(I, E, Limit)
: 0;
@@ -466,7 +483,8 @@ private:
return 0;
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
- if (!Line.First->is(tok::kw_do) && Line.Last->isNot(tok::r_paren))
+ if (!Line.First->is(tok::kw_do) && !Line.First->is(tok::kw_else) &&
+ !Line.Last->is(tok::kw_else) && Line.Last->isNot(tok::r_paren))
return 0;
// Only merge do while if do is the only statement on the line.
if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
@@ -477,7 +495,8 @@ private:
TT_LineComment))
return 0;
// Only inline simple if's (no nested if or else), unless specified
- if (Style.AllowShortIfStatementsOnASingleLine != FormatStyle::SIS_Always) {
+ if (Style.AllowShortIfStatementsOnASingleLine ==
+ FormatStyle::SIS_WithoutElse) {
if (I + 2 != E && Line.startsWith(tok::kw_if) &&
I[2]->First->is(tok::kw_else))
return 0;
@@ -804,8 +823,20 @@ protected:
return true;
if (NewLine) {
- int AdditionalIndent = State.Stack.back().Indent -
- Previous.Children[0]->Level * Style.IndentWidth;
+ const ParenState &P = State.Stack.back();
+
+ int AdditionalIndent =
+ P.Indent - Previous.Children[0]->Level * Style.IndentWidth;
+
+ if (Style.LambdaBodyIndentation == FormatStyle::LBI_OuterScope &&
+ P.NestedBlockIndent == P.LastSpace) {
+ if (State.NextToken->MatchingParen &&
+ State.NextToken->MatchingParen->is(TT_LambdaLBrace)) {
+ State.Stack.pop_back();
+ }
+ if (LBrace->is(TT_LambdaLBrace))
+ AdditionalIndent = 0;
+ }
Penalty +=
BlockFormatter->format(Previous.Children, DryRun, AdditionalIndent,
@@ -1103,6 +1134,7 @@ unsigned UnwrappedLineFormatter::format(
unsigned Penalty = 0;
LevelIndentTracker IndentTracker(Style, Keywords, Lines[0]->Level,
AdditionalIndent);
+ const AnnotatedLine *PrevPrevLine = nullptr;
const AnnotatedLine *PreviousLine = nullptr;
const AnnotatedLine *NextLine = nullptr;
@@ -1141,7 +1173,7 @@ unsigned UnwrappedLineFormatter::format(
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun) {
bool LastLine = Line->First->is(tok::eof);
- formatFirstToken(TheLine, PreviousLine, Lines, Indent,
+ formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines, Indent,
LastLine ? LastStartColumn : NextStartColumn + Indent);
}
@@ -1187,7 +1219,7 @@ unsigned UnwrappedLineFormatter::format(
TheLine.LeadingEmptyLinesAffected);
// Format the first token.
if (ReformatLeadingWhitespace)
- formatFirstToken(TheLine, PreviousLine, Lines,
+ formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines,
TheLine.First->OriginalColumn,
TheLine.First->OriginalColumn);
else
@@ -1203,6 +1235,7 @@ unsigned UnwrappedLineFormatter::format(
}
if (!DryRun)
markFinalized(TheLine.First);
+ PrevPrevLine = PreviousLine;
PreviousLine = &TheLine;
}
PenaltyCache[CacheKey] = Penalty;
@@ -1211,6 +1244,7 @@ unsigned UnwrappedLineFormatter::format(
void UnwrappedLineFormatter::formatFirstToken(
const AnnotatedLine &Line, const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
const SmallVectorImpl<AnnotatedLine *> &Lines, unsigned Indent,
unsigned NewlineIndent) {
FormatToken &RootToken = *Line.First;
@@ -1242,6 +1276,8 @@ void UnwrappedLineFormatter::formatFirstToken(
if (!Style.KeepEmptyLinesAtTheStartOfBlocks && PreviousLine &&
PreviousLine->Last->is(tok::l_brace) &&
!PreviousLine->startsWithNamespace() &&
+ !(PrevPrevLine && PrevPrevLine->startsWithNamespace() &&
+ PreviousLine->startsWith(tok::l_brace)) &&
!startsExternCBlock(*PreviousLine))
Newlines = 1;
@@ -1249,16 +1285,17 @@ void UnwrappedLineFormatter::formatFirstToken(
if (PreviousLine && RootToken.isAccessSpecifier()) {
switch (Style.EmptyLineBeforeAccessModifier) {
case FormatStyle::ELBAMS_Never:
- if (RootToken.NewlinesBefore > 1)
+ if (Newlines > 1)
Newlines = 1;
break;
case FormatStyle::ELBAMS_Leave:
Newlines = std::max(RootToken.NewlinesBefore, 1u);
break;
case FormatStyle::ELBAMS_LogicalBlock:
- if (PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) &&
- RootToken.NewlinesBefore <= 1)
+ if (PreviousLine->Last->isOneOf(tok::semi, tok::r_brace) && Newlines <= 1)
Newlines = 2;
+ if (PreviousLine->First->isAccessSpecifier())
+ Newlines = 1; // Previous is an access modifier remove all new lines.
break;
case FormatStyle::ELBAMS_Always: {
const FormatToken *previousToken;
@@ -1266,28 +1303,38 @@ void UnwrappedLineFormatter::formatFirstToken(
previousToken = PreviousLine->Last->getPreviousNonComment();
else
previousToken = PreviousLine->Last;
- if ((!previousToken || !previousToken->is(tok::l_brace)) &&
- RootToken.NewlinesBefore <= 1)
+ if ((!previousToken || !previousToken->is(tok::l_brace)) && Newlines <= 1)
Newlines = 2;
} break;
}
}
- // Remove empty lines after access specifiers.
+ // Insert or remove empty line after access specifiers.
if (PreviousLine && PreviousLine->First->isAccessSpecifier() &&
- (!PreviousLine->InPPDirective || !RootToken.HasUnescapedNewline))
- Newlines = std::min(1u, Newlines);
+ (!PreviousLine->InPPDirective || !RootToken.HasUnescapedNewline)) {
+ // EmptyLineBeforeAccessModifier is handling the case when two access
+ // modifiers follow each other.
+ if (!RootToken.isAccessSpecifier()) {
+ switch (Style.EmptyLineAfterAccessModifier) {
+ case FormatStyle::ELAAMS_Never:
+ Newlines = 1;
+ break;
+ case FormatStyle::ELAAMS_Leave:
+ Newlines = std::max(Newlines, 1u);
+ break;
+ case FormatStyle::ELAAMS_Always:
+ if (RootToken.is(tok::r_brace)) // Do not add at end of class.
+ Newlines = 1u;
+ else
+ Newlines = std::max(Newlines, 2u);
+ break;
+ }
+ }
+ }
if (Newlines)
Indent = NewlineIndent;
- // If in Whitemsmiths mode, indent start and end of blocks
- if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
- if (RootToken.isOneOf(tok::l_brace, tok::r_brace, tok::kw_case,
- tok::kw_default))
- Indent += Style.IndentWidth;
- }
-
// Preprocessor directives get indented before the hash only if specified
if (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
(Line.Type == LT_PreprocessorDirective ||
diff --git a/clang/lib/Format/UnwrappedLineFormatter.h b/clang/lib/Format/UnwrappedLineFormatter.h
index a1ff16999589..3e33de07fa12 100644
--- a/clang/lib/Format/UnwrappedLineFormatter.h
+++ b/clang/lib/Format/UnwrappedLineFormatter.h
@@ -47,6 +47,7 @@ private:
/// of the \c UnwrappedLine if there was no structural parsing error.
void formatFirstToken(const AnnotatedLine &Line,
const AnnotatedLine *PreviousLine,
+ const AnnotatedLine *PrevPrevLine,
const SmallVectorImpl<AnnotatedLine *> &Lines,
unsigned Indent, unsigned NewlineIndent);
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index f689a6361a3a..103e3559b120 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -431,7 +431,7 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
}
LLVM_FALLTHROUGH;
default:
- parseStructuralElement();
+ parseStructuralElement(/*IsTopLevel=*/true);
break;
}
} while (!eof());
@@ -579,17 +579,23 @@ size_t UnwrappedLineParser::computePPHash() const {
return h;
}
-void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
- bool MunchSemi) {
+void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
+ bool MunchSemi,
+ bool UnindentWhitesmithsBraces) {
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
FormatTok->setBlockKind(BK_Block);
+ // For Whitesmiths mode, jump to the next level prior to skipping over the
+ // braces.
+ if (AddLevels > 0 && Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths)
+ ++Line->Level;
+
size_t PPStartHash = computePPHash();
unsigned InitialLevel = Line->Level;
- nextToken(/*LevelDifference=*/AddLevel ? 1 : 0);
+ nextToken(/*LevelDifference=*/AddLevels);
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
@@ -602,10 +608,16 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
? (UnwrappedLine::kInvalidIndex)
: (CurrentLines->size() - 1 - NbPreprocessorDirectives);
+ // Whitesmiths is weird here. The brace needs to be indented for the namespace
+ // block, but the block itself may not be indented depending on the style
+ // settings. This allows the format to back up one level in those cases.
+ if (UnindentWhitesmithsBraces)
+ --Line->Level;
+
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
MustBeDeclaration);
- if (AddLevel)
- ++Line->Level;
+ if (AddLevels > 0u && Style.BreakBeforeBraces != FormatStyle::BS_Whitesmiths)
+ Line->Level += AddLevels;
parseLevel(/*HasOpeningBrace=*/true);
if (eof())
@@ -621,7 +633,7 @@ void UnwrappedLineParser::parseBlock(bool MustBeDeclaration, bool AddLevel,
size_t PPEndHash = computePPHash();
// Munch the closing brace.
- nextToken(/*LevelDifference=*/AddLevel ? -1 : 0);
+ nextToken(/*LevelDifference=*/-AddLevels);
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
@@ -739,6 +751,8 @@ void UnwrappedLineParser::parsePPDirective() {
case tok::pp_else:
parsePPElse();
break;
+ case tok::pp_elifdef:
+ case tok::pp_elifndef:
case tok::pp_elif:
parsePPElIf();
break;
@@ -980,6 +994,33 @@ static bool isJSDeclOrStmt(const AdditionalKeywords &Keywords,
Keywords.kw_import, tok::kw_export);
}
+// This function checks whether a token starts the first parameter declaration
+// in a K&R C (aka C78) function definition, e.g.:
+// int f(a, b)
+// short a, b;
+// {
+// return a + b;
+// }
+static bool isC78ParameterDecl(const FormatToken *Tok) {
+ if (!Tok)
+ return false;
+
+ if (!Tok->isOneOf(tok::kw_int, tok::kw_char, tok::kw_float, tok::kw_double,
+ tok::kw_struct, tok::kw_union, tok::kw_long, tok::kw_short,
+ tok::kw_unsigned, tok::kw_register, tok::identifier))
+ return false;
+
+ Tok = Tok->Previous;
+ if (!Tok || Tok->isNot(tok::r_paren))
+ return false;
+
+ Tok = Tok->Previous;
+ if (!Tok || Tok->isNot(tok::identifier))
+ return false;
+
+ return Tok->Previous && Tok->Previous->isOneOf(tok::l_paren, tok::comma);
+}
+
// readTokenWithJavaScriptASI reads the next token and terminates the current
// line if JavaScript Automatic Semicolon Insertion must
// happen between the current token and the next token.
@@ -1027,7 +1068,7 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
return addUnwrappedLine();
}
-void UnwrappedLineParser::parseStructuralElement() {
+void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
assert(!FormatTok->is(tok::l_brace));
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
@@ -1125,12 +1166,12 @@ void UnwrappedLineParser::parseStructuralElement() {
if (Style.BraceWrapping.AfterExternBlock) {
addUnwrappedLine();
}
- parseBlock(/*MustBeDeclaration=*/true,
- /*AddLevel=*/Style.BraceWrapping.AfterExternBlock);
+ unsigned AddLevels = Style.BraceWrapping.AfterExternBlock ? 1u : 0u;
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels);
} else {
- parseBlock(/*MustBeDeclaration=*/true,
- /*AddLevel=*/Style.IndentExternBlock ==
- FormatStyle::IEBS_Indent);
+ unsigned AddLevels =
+ Style.IndentExternBlock == FormatStyle::IEBS_Indent ? 1u : 0u;
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels);
}
addUnwrappedLine();
return;
@@ -1159,7 +1200,7 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
if (FormatTok->is(TT_MacroBlockBegin)) {
- parseBlock(/*MustBeDeclaration=*/false, /*AddLevel=*/true,
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
/*MunchSemi=*/false);
return;
}
@@ -1304,15 +1345,7 @@ void UnwrappedLineParser::parseStructuralElement() {
case tok::kw_struct:
case tok::kw_union:
case tok::kw_class:
- // parseRecord falls through and does not yet add an unwrapped line as a
- // record declaration or definition can start a structural element.
- parseRecord();
- // This does not apply for Java, JavaScript and C#.
- if (Style.Language == FormatStyle::LK_Java ||
- Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
- if (FormatTok->is(tok::semi))
- nextToken();
- addUnwrappedLine();
+ if (parseStructLike()) {
return;
}
break;
@@ -1337,6 +1370,18 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
case tok::l_paren:
parseParens();
+ // Break the unwrapped line if a K&R C function definition has a parameter
+ // declaration.
+ if (!IsTopLevel || !Style.isCpp())
+ break;
+ if (!Previous || Previous->isNot(tok::identifier))
+ break;
+ if (Previous->Previous && Previous->Previous->is(tok::at))
+ break;
+ if (isC78ParameterDecl(FormatTok)) {
+ addUnwrappedLine();
+ return;
+ }
break;
case tok::kw_operator:
nextToken();
@@ -1426,6 +1471,13 @@ void UnwrappedLineParser::parseStructuralElement() {
return;
}
+ if (FormatTok->is(Keywords.kw_interface)) {
+ if (parseStructLike()) {
+ return;
+ }
+ break;
+ }
+
if (Style.isCpp() && FormatTok->is(TT_StatementMacro)) {
parseStatementMacro();
return;
@@ -1469,9 +1521,9 @@ void UnwrappedLineParser::parseStructuralElement() {
}
case tok::equal:
// Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
- // TT_JsFatArrow. The always start an expression or a child block if
- // followed by a curly.
- if (FormatTok->is(TT_JsFatArrow)) {
+ // TT_FatArrow. They always start an expression or a child block if
+ // followed by a curly brace.
+ if (FormatTok->is(TT_FatArrow)) {
nextToken();
if (FormatTok->is(tok::l_brace)) {
// C# may break after => if the next character is a newline.
@@ -1577,7 +1629,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
--Line->Level;
break;
case tok::equal:
- if (FormatTok->is(TT_JsFatArrow)) {
+ if (FormatTok->is(TT_FatArrow)) {
++Line->Level;
do {
nextToken();
@@ -1777,14 +1829,20 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
bool HasError = false;
// FIXME: Once we have an expression parser in the UnwrappedLineParser,
- // replace this by using parseAssigmentExpression() inside.
+ // replace this by using parseAssignmentExpression() inside.
do {
if (Style.isCSharp()) {
- if (FormatTok->is(TT_JsFatArrow)) {
+ // Fat arrows (=>) have tok::TokenKind tok::equal but TokenType
+ // TT_FatArrow. They always start an expression or a child block if
+ // followed by a curly brace.
+ if (FormatTok->is(TT_FatArrow)) {
nextToken();
- // Fat arrows can be followed by simple expressions or by child blocks
- // in curly braces.
if (FormatTok->is(tok::l_brace)) {
+ // C# may break after => if the next character is a newline.
+ if (Style.isCSharp() && Style.BraceWrapping.AfterFunction == true) {
+ // calling `addUnwrappedLine()` here causes odd parsing errors.
+ FormatTok->MustBreakBefore = true;
+ }
parseChildBlock();
continue;
}
@@ -1796,7 +1854,7 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
tryToParseJSFunction();
continue;
}
- if (FormatTok->is(TT_JsFatArrow)) {
+ if (FormatTok->is(TT_FatArrow)) {
nextToken();
// Fat arrows can be followed by simple expressions or by child blocks
// in curly braces.
@@ -1914,6 +1972,12 @@ void UnwrappedLineParser::parseParens() {
parseBracedList();
}
break;
+ case tok::equal:
+ if (Style.isCSharp() && FormatTok->is(TT_FatArrow))
+ parseStructuralElement();
+ else
+ nextToken();
+ break;
case tok::kw_class:
if (Style.Language == FormatStyle::LK_JavaScript)
parseRecord(/*ParseAsExpr=*/true);
@@ -2008,7 +2072,15 @@ void UnwrappedLineParser::parseIfThenElse() {
parseBlock(/*MustBeDeclaration=*/false);
addUnwrappedLine();
} else if (FormatTok->Tok.is(tok::kw_if)) {
+ FormatToken *Previous = AllTokens[Tokens->getPosition() - 1];
+ bool PrecededByComment = Previous->is(tok::comment);
+ if (PrecededByComment) {
+ addUnwrappedLine();
+ ++Line->Level;
+ }
parseIfThenElse();
+ if (PrecededByComment)
+ --Line->Level;
} else {
addUnwrappedLine();
++Line->Level;
@@ -2128,15 +2200,34 @@ void UnwrappedLineParser::parseNamespace() {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
- bool AddLevel = Style.NamespaceIndentation == FormatStyle::NI_All ||
- (Style.NamespaceIndentation == FormatStyle::NI_Inner &&
- DeclarationScopeStack.size() > 1);
- parseBlock(/*MustBeDeclaration=*/true, AddLevel);
+ unsigned AddLevels =
+ Style.NamespaceIndentation == FormatStyle::NI_All ||
+ (Style.NamespaceIndentation == FormatStyle::NI_Inner &&
+ DeclarationScopeStack.size() > 1)
+ ? 1u
+ : 0u;
+ bool ManageWhitesmithsBraces =
+ AddLevels == 0u &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths;
+
+ // If we're in Whitesmiths mode, indent the brace if we're not indenting
+ // the whole block.
+ if (ManageWhitesmithsBraces)
+ ++Line->Level;
+
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels,
+ /*MunchSemi=*/true,
+ /*UnindentWhitesmithsBraces=*/ManageWhitesmithsBraces);
+
// Munch the semicolon after a namespace. This is more common than one would
// think. Putting the semicolon into its own line is very ugly.
if (FormatTok->Tok.is(tok::semi))
nextToken();
- addUnwrappedLine();
+
+ addUnwrappedLine(AddLevels > 0 ? LineLevel::Remove : LineLevel::Keep);
+
+ if (ManageWhitesmithsBraces)
+ --Line->Level;
}
// FIXME: Add error handling.
}
@@ -2222,6 +2313,11 @@ void UnwrappedLineParser::parseDoWhile() {
return;
}
+ // If in Whitesmiths mode, the line with the while() needs to be indented
+ // to the same level as the block.
+ if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths)
+ ++Line->Level;
+
nextToken();
parseStructuralElement();
}
@@ -2234,25 +2330,19 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
if (LeftAlignLabel)
Line->Level = 0;
- bool RemoveWhitesmithsCaseIndent =
- (!Style.IndentCaseBlocks &&
- Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths);
-
- if (RemoveWhitesmithsCaseIndent)
- --Line->Level;
-
if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(
- this, Line->Level, Style.BraceWrapping.AfterCaseLabel,
- Style.BraceWrapping.IndentBraces || RemoveWhitesmithsCaseIndent);
+ CompoundStatementIndenter Indenter(this, Line->Level,
+ Style.BraceWrapping.AfterCaseLabel,
+ Style.BraceWrapping.IndentBraces);
parseBlock(/*MustBeDeclaration=*/false);
if (FormatTok->Tok.is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always) {
addUnwrappedLine();
- if (RemoveWhitesmithsCaseIndent) {
+ if (!Style.IndentCaseBlocks &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths) {
Line->Level++;
}
}
@@ -2495,6 +2585,21 @@ bool UnwrappedLineParser::parseEnum() {
// "} n, m;" will end up in one unwrapped line.
}
+bool UnwrappedLineParser::parseStructLike() {
+ // parseRecord falls through and does not yet add an unwrapped line as a
+ // record declaration or definition can start a structural element.
+ parseRecord();
+ // This does not apply to Java, JavaScript and C#.
+ if (Style.Language == FormatStyle::LK_Java ||
+ Style.Language == FormatStyle::LK_JavaScript || Style.isCSharp()) {
+ if (FormatTok->is(tok::semi))
+ nextToken();
+ addUnwrappedLine();
+ return true;
+ }
+ return false;
+}
+
namespace {
// A class used to set and restore the Token position when peeking
// ahead in the token source.
@@ -2577,7 +2682,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
while (FormatTok) {
if (FormatTok->is(tok::l_brace)) {
// Parse the constant's class body.
- parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
+ parseBlock(/*MustBeDeclaration=*/true, /*AddLevels=*/1u,
/*MunchSemi=*/false);
} else if (FormatTok->is(tok::l_paren)) {
parseParens();
@@ -2679,8 +2784,8 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
- parseBlock(/*MustBeDeclaration=*/true, /*AddLevel=*/true,
- /*MunchSemi=*/false);
+ unsigned AddLevels = Style.IndentAccessModifiers ? 2u : 1u;
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/false);
}
}
// There is no addUnwrappedLine() here so that we fall through to parsing a
@@ -2920,17 +3025,29 @@ LLVM_ATTRIBUTE_UNUSED static void printDebugInfo(const UnwrappedLine &Line,
llvm::dbgs() << "\n";
}
-void UnwrappedLineParser::addUnwrappedLine() {
+void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
if (Line->Tokens.empty())
return;
LLVM_DEBUG({
if (CurrentLines == &Lines)
printDebugInfo(*Line);
});
+
+ // If this line closes a block when in Whitesmiths mode, remember that
+ // information so that the level can be decreased after the line is added.
+ // This has to happen after the addition of the line since the line itself
+ // needs to be indented.
+ bool ClosesWhitesmithsBlock =
+ Line->MatchingOpeningBlockLineIndex != UnwrappedLine::kInvalidIndex &&
+ Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths;
+
CurrentLines->push_back(std::move(*Line));
Line->Tokens.clear();
Line->MatchingOpeningBlockLineIndex = UnwrappedLine::kInvalidIndex;
Line->FirstStartColumn = 0;
+
+ if (ClosesWhitesmithsBlock && AdjustLevel == LineLevel::Remove)
+ --Line->Level;
if (CurrentLines == &Lines && !PreprocessorDirectives.empty()) {
CurrentLines->append(
std::make_move_iterator(PreprocessorDirectives.begin()),
diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h
index 02b328cb72de..f22bb6323e3d 100644
--- a/clang/lib/Format/UnwrappedLineParser.h
+++ b/clang/lib/Format/UnwrappedLineParser.h
@@ -85,8 +85,9 @@ private:
void reset();
void parseFile();
void parseLevel(bool HasOpeningBrace);
- void parseBlock(bool MustBeDeclaration, bool AddLevel = true,
- bool MunchSemi = true);
+ void parseBlock(bool MustBeDeclaration, unsigned AddLevels = 1u,
+ bool MunchSemi = true,
+ bool UnindentWhitesmithsBraces = false);
void parseChildBlock();
void parsePPDirective();
void parsePPDefine();
@@ -96,7 +97,7 @@ private:
void parsePPEndIf();
void parsePPUnknown();
void readTokenWithJavaScriptASI();
- void parseStructuralElement();
+ void parseStructuralElement(bool IsTopLevel = false);
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
@@ -113,6 +114,7 @@ private:
void parseNew();
void parseAccessSpecifier();
bool parseEnum();
+ bool parseStructLike();
void parseConcept();
void parseRequires();
void parseRequiresExpression(unsigned int OriginalLevel);
@@ -140,7 +142,12 @@ private:
bool tryToParsePropertyAccessor();
void tryToParseJSFunction();
bool tryToParseSimpleAttribute();
- void addUnwrappedLine();
+
+ // Used by addUnwrappedLine to denote whether to keep or remove a level
+ // when resetting the line state.
+ enum class LineLevel { Remove, Keep };
+
+ void addUnwrappedLine(LineLevel AdjustLevel = LineLevel::Remove);
bool eof() const;
// LevelDifference is the difference of levels after and before the current
// token. For example:
diff --git a/clang/lib/Format/UsingDeclarationsSorter.cpp b/clang/lib/Format/UsingDeclarationsSorter.cpp
index b6559db61d0c..5608a5a75953 100644
--- a/clang/lib/Format/UsingDeclarationsSorter.cpp
+++ b/clang/lib/Format/UsingDeclarationsSorter.cpp
@@ -48,7 +48,7 @@ int compareLabels(StringRef A, StringRef B) {
return -1;
// Two names within a group compare case-insensitively.
- return NamesA[I].compare_lower(NamesB[I]);
+ return NamesA[I].compare_insensitive(NamesB[I]);
}
// I is the last index of NamesB and NamesB[I] is a non-namespace name.
@@ -57,7 +57,7 @@ int compareLabels(StringRef A, StringRef B) {
return 1;
// Two namespaces names within a group compare case-insensitively.
- int C = NamesA[I].compare_lower(NamesB[I]);
+ int C = NamesA[I].compare_insensitive(NamesB[I]);
if (C != 0)
return C;
}
diff --git a/clang/lib/Format/WhitespaceManager.cpp b/clang/lib/Format/WhitespaceManager.cpp
index 7d6964b7c72f..ca2222d1feff 100644
--- a/clang/lib/Format/WhitespaceManager.cpp
+++ b/clang/lib/Format/WhitespaceManager.cpp
@@ -13,6 +13,8 @@
#include "WhitespaceManager.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
namespace clang {
namespace format {
@@ -100,6 +102,7 @@ const tooling::Replacements &WhitespaceManager::generateReplacements() {
alignChainedConditionals();
alignTrailingComments();
alignEscapedNewlines();
+ alignArrayInitializers();
generateChanges();
return Replaces;
@@ -262,7 +265,8 @@ void WhitespaceManager::calculateLineBreakInformation() {
// Align a single sequence of tokens, see AlignTokens below.
template <typename F>
static void
-AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
+AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
+ unsigned Column, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes) {
bool FoundMatchOnLine = false;
int Shift = 0;
@@ -278,6 +282,14 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// double z);
// In the above example, we need to take special care to ensure that
// 'double z' is indented along with it's owning function 'b'.
+ // The same holds for calling a function:
+ // double a = foo(x);
+ // int b = bar(foo(y),
+ // foor(z));
+ // Similar for broken string literals:
+ // double x = 3.14;
+ // auto s = "Hello"
+ // "World";
// Special handling is required for 'nested' ternary operators.
SmallVector<unsigned, 16> ScopeStack;
@@ -298,8 +310,12 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
ScopeStack.push_back(i);
bool InsideNestedScope = ScopeStack.size() != 0;
+ bool ContinuedStringLiteral = i > Start &&
+ Changes[i].Tok->is(tok::string_literal) &&
+ Changes[i - 1].Tok->is(tok::string_literal);
+ bool SkipMatchCheck = InsideNestedScope || ContinuedStringLiteral;
- if (Changes[i].NewlinesBefore > 0 && !InsideNestedScope) {
+ if (Changes[i].NewlinesBefore > 0 && !SkipMatchCheck) {
Shift = 0;
FoundMatchOnLine = false;
}
@@ -307,7 +323,7 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// If this is the first matching token to be aligned, remember by how many
// spaces it has to be shifted, so the rest of the changes on the line are
// shifted by the same amount
- if (!FoundMatchOnLine && !InsideNestedScope && Matches(Changes[i])) {
+ if (!FoundMatchOnLine && !SkipMatchCheck && Matches(Changes[i])) {
FoundMatchOnLine = true;
Shift = Column - Changes[i].StartOfTokenColumn;
Changes[i].Spaces += Shift;
@@ -317,19 +333,62 @@ AlignTokenSequence(unsigned Start, unsigned End, unsigned Column, F &&Matches,
// as mentioned in the ScopeStack comment.
if (InsideNestedScope && Changes[i].NewlinesBefore > 0) {
unsigned ScopeStart = ScopeStack.back();
- if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName) ||
- (ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)) ||
- Changes[i].Tok->is(TT_ConditionalExpr) ||
- (Changes[i].Tok->Previous &&
- Changes[i].Tok->Previous->is(TT_ConditionalExpr)))
+ auto ShouldShiftBeAdded = [&] {
+ // Function declaration
+ if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName))
+ return true;
+
+ // Continued function declaration
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName))
+ return true;
+
+ // Continued function call
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->is(tok::identifier) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_paren))
+ return true;
+
+ // Ternary operator
+ if (Changes[i].Tok->is(TT_ConditionalExpr))
+ return true;
+
+ // Period Initializer .XXX = 1.
+ if (Changes[i].Tok->is(TT_DesignatedInitializerPeriod))
+ return true;
+
+ // Continued ternary operator
+ if (Changes[i].Tok->Previous &&
+ Changes[i].Tok->Previous->is(TT_ConditionalExpr))
+ return true;
+
+ return false;
+ };
+
+ if (ShouldShiftBeAdded())
Changes[i].Spaces += Shift;
}
+ if (ContinuedStringLiteral)
+ Changes[i].Spaces += Shift;
+
assert(Shift >= 0);
+
Changes[i].StartOfTokenColumn += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
+
+ // If PointerAlignment is PAS_Right, keep *s or &s next to the token
+ if (Style.PointerAlignment == FormatStyle::PAS_Right &&
+ Changes[i].Spaces != 0) {
+ for (int Previous = i - 1;
+ Previous >= 0 &&
+ Changes[Previous].Tok->getType() == TT_PointerOrReference;
+ --Previous) {
+ Changes[Previous + 1].Spaces -= Shift;
+ Changes[Previous].Spaces += Shift;
+ }
+ }
}
}
@@ -399,8 +458,8 @@ static unsigned AlignTokens(
// containing any matching token to be aligned and located after such token.
auto AlignCurrentSequence = [&] {
if (StartOfSequence > 0 && StartOfSequence < EndOfSequence)
- AlignTokenSequence(StartOfSequence, EndOfSequence, MinColumn, Matches,
- Changes);
+ AlignTokenSequence(Style, StartOfSequence, EndOfSequence, MinColumn,
+ Matches, Changes);
MinColumn = 0;
MaxColumn = UINT_MAX;
StartOfSequence = 0;
@@ -434,7 +493,10 @@ static unsigned AlignTokens(
AlignCurrentSequence();
// A new line starts, re-initialize line status tracking bools.
- FoundMatchOnLine = false;
+ // Keep the match state if a string literal is continued on this line.
+ if (i == 0 || !Changes[i].Tok->is(tok::string_literal) ||
+ !Changes[i - 1].Tok->is(tok::string_literal))
+ FoundMatchOnLine = false;
LineIsComment = true;
}
@@ -687,12 +749,6 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
if (Style.AlignConsecutiveDeclarations == FormatStyle::ACS_None)
return;
- // FIXME: Currently we don't handle properly the PointerAlignment: Right
- // The * and & are not aligned and are left dangling. Something has to be done
- // about it, but it raises the question of alignment of code like:
- // const char* const* v1;
- // float const* v2;
- // SomeVeryLongType const& v3;
AlignTokens(
Style,
[](Change const &C) {
@@ -709,6 +765,8 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
for (FormatToken *Next = C.Tok->Next; Next; Next = Next->Next) {
if (Next->is(tok::comment))
continue;
+ if (Next->is(TT_PointerOrReference))
+ return false;
if (!Next->Tok.getIdentifierInfo())
break;
if (Next->isOneOf(TT_StartOfName, TT_FunctionDeclarationName,
@@ -735,12 +793,11 @@ void WhitespaceManager::alignChainedConditionals() {
Changes, /*StartAt=*/0);
} else {
static auto AlignWrappedOperand = [](Change const &C) {
- auto Previous = C.Tok->getPreviousNonComment(); // Previous;
+ FormatToken *Previous = C.Tok->getPreviousNonComment();
return C.NewlinesBefore && Previous && Previous->is(TT_ConditionalExpr) &&
- (Previous->is(tok::question) ||
- (Previous->is(tok::colon) &&
- (C.Tok->FakeLParens.size() == 0 ||
- C.Tok->FakeLParens.back() != prec::Conditional)));
+ (Previous->is(tok::colon) &&
+ (C.Tok->FakeLParens.size() == 0 ||
+ C.Tok->FakeLParens.back() != prec::Conditional));
};
// Ensure we keep alignment of wrapped operands with non-wrapped operands
// Since we actually align the operators, the wrapped operands need the
@@ -902,6 +959,302 @@ void WhitespaceManager::alignEscapedNewlines(unsigned Start, unsigned End,
}
}
+void WhitespaceManager::alignArrayInitializers() {
+ if (Style.AlignArrayOfStructures == FormatStyle::AIAS_None)
+ return;
+
+ for (unsigned ChangeIndex = 1U, ChangeEnd = Changes.size();
+ ChangeIndex < ChangeEnd; ++ChangeIndex) {
+ auto &C = Changes[ChangeIndex];
+ if (C.Tok->IsArrayInitializer) {
+ bool FoundComplete = false;
+ for (unsigned InsideIndex = ChangeIndex + 1; InsideIndex < ChangeEnd;
+ ++InsideIndex) {
+ if (Changes[InsideIndex].Tok == C.Tok->MatchingParen) {
+ alignArrayInitializers(ChangeIndex, InsideIndex + 1);
+ ChangeIndex = InsideIndex + 1;
+ FoundComplete = true;
+ break;
+ }
+ }
+ if (!FoundComplete)
+ ChangeIndex = ChangeEnd;
+ }
+ }
+}
+
+void WhitespaceManager::alignArrayInitializers(unsigned Start, unsigned End) {
+
+ if (Style.AlignArrayOfStructures == FormatStyle::AIAS_Right)
+ alignArrayInitializersRightJustified(getCells(Start, End));
+ else if (Style.AlignArrayOfStructures == FormatStyle::AIAS_Left)
+ alignArrayInitializersLeftJustified(getCells(Start, End));
+}
+
+void WhitespaceManager::alignArrayInitializersRightJustified(
+ CellDescriptions &&CellDescs) {
+ auto &Cells = CellDescs.Cells;
+
+ // Now go through and fixup the spaces.
+ auto *CellIter = Cells.begin();
+ for (auto i = 0U; i < CellDescs.CellCount; i++, ++CellIter) {
+ unsigned NetWidth = 0U;
+ if (isSplitCell(*CellIter))
+ NetWidth = getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
+ auto CellWidth = getMaximumCellWidth(CellIter, NetWidth);
+
+ if (Changes[CellIter->Index].Tok->is(tok::r_brace)) {
+ // So in here we want to see if there is a brace that falls
+ // on a line that was split. If so on that line we make sure that
+ // the spaces in front of the brace are enough.
+ Changes[CellIter->Index].NewlinesBefore = 0;
+ Changes[CellIter->Index].Spaces = 0;
+ for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ Changes[Next->Index].Spaces = 0;
+ Changes[Next->Index].NewlinesBefore = 0;
+ }
+ // Unless the array is empty, we need the position of all the
+ // immediately adjacent cells
+ if (CellIter != Cells.begin()) {
+ auto ThisNetWidth =
+ getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
+ auto MaxNetWidth =
+ getMaximumNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces,
+ CellDescs.CellCount);
+ if (ThisNetWidth < MaxNetWidth)
+ Changes[CellIter->Index].Spaces = (MaxNetWidth - ThisNetWidth);
+ auto RowCount = 1U;
+ auto Offset = std::distance(Cells.begin(), CellIter);
+ for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ auto *End = Start + Offset;
+ ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
+ if (ThisNetWidth < MaxNetWidth)
+ Changes[Next->Index].Spaces = (MaxNetWidth - ThisNetWidth);
+ ++RowCount;
+ }
+ }
+ } else {
+ auto ThisWidth =
+ calculateCellWidth(CellIter->Index, CellIter->EndIndex, true) +
+ NetWidth;
+ if (Changes[CellIter->Index].NewlinesBefore == 0) {
+ Changes[CellIter->Index].Spaces = (CellWidth - (ThisWidth + NetWidth));
+ Changes[CellIter->Index].Spaces += (i > 0) ? 1 : 0;
+ }
+ alignToStartOfCell(CellIter->Index, CellIter->EndIndex);
+ for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ ThisWidth =
+ calculateCellWidth(Next->Index, Next->EndIndex, true) + NetWidth;
+ if (Changes[Next->Index].NewlinesBefore == 0) {
+ Changes[Next->Index].Spaces = (CellWidth - ThisWidth);
+ Changes[Next->Index].Spaces += (i > 0) ? 1 : 0;
+ }
+ alignToStartOfCell(Next->Index, Next->EndIndex);
+ }
+ }
+ }
+}
+
+void WhitespaceManager::alignArrayInitializersLeftJustified(
+ CellDescriptions &&CellDescs) {
+ auto &Cells = CellDescs.Cells;
+
+ // Now go through and fixup the spaces.
+ auto *CellIter = Cells.begin();
+ // The first cell needs to be against the left brace.
+ if (Changes[CellIter->Index].NewlinesBefore == 0)
+ Changes[CellIter->Index].Spaces = 0;
+ else
+ Changes[CellIter->Index].Spaces = CellDescs.InitialSpaces;
+ ++CellIter;
+ for (auto i = 1U; i < CellDescs.CellCount; i++, ++CellIter) {
+ auto MaxNetWidth = getMaximumNetWidth(
+ Cells.begin(), CellIter, CellDescs.InitialSpaces, CellDescs.CellCount);
+ auto ThisNetWidth =
+ getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
+ if (Changes[CellIter->Index].NewlinesBefore == 0) {
+ Changes[CellIter->Index].Spaces =
+ MaxNetWidth - ThisNetWidth +
+ (Changes[CellIter->Index].Tok->isNot(tok::r_brace) ? 1 : 0);
+ }
+ auto RowCount = 1U;
+ auto Offset = std::distance(Cells.begin(), CellIter);
+ for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ auto *End = Start + Offset;
+ auto ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
+ if (Changes[Next->Index].NewlinesBefore == 0) {
+ Changes[Next->Index].Spaces =
+ MaxNetWidth - ThisNetWidth +
+ (Changes[Next->Index].Tok->isNot(tok::r_brace) ? 1 : 0);
+ }
+ ++RowCount;
+ }
+ }
+}
+
+bool WhitespaceManager::isSplitCell(const CellDescription &Cell) {
+ if (Cell.HasSplit)
+ return true;
+ for (const auto *Next = Cell.NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ if (Next->HasSplit)
+ return true;
+ }
+ return false;
+}
+
+WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
+ unsigned End) {
+
+ unsigned Depth = 0;
+ unsigned Cell = 0;
+ unsigned CellCount = 0;
+ unsigned InitialSpaces = 0;
+ unsigned InitialTokenLength = 0;
+ unsigned EndSpaces = 0;
+ SmallVector<CellDescription> Cells;
+ const FormatToken *MatchingParen = nullptr;
+ for (unsigned i = Start; i < End; ++i) {
+ auto &C = Changes[i];
+ if (C.Tok->is(tok::l_brace))
+ ++Depth;
+ else if (C.Tok->is(tok::r_brace))
+ --Depth;
+ if (Depth == 2) {
+ if (C.Tok->is(tok::l_brace)) {
+ Cell = 0;
+ MatchingParen = C.Tok->MatchingParen;
+ if (InitialSpaces == 0) {
+ InitialSpaces = C.Spaces + C.TokenLength;
+ InitialTokenLength = C.TokenLength;
+ auto j = i - 1;
+ for (; Changes[j].NewlinesBefore == 0 && j > Start; --j) {
+ InitialSpaces += Changes[j].Spaces + Changes[j].TokenLength;
+ InitialTokenLength += Changes[j].TokenLength;
+ }
+ if (C.NewlinesBefore == 0) {
+ InitialSpaces += Changes[j].Spaces + Changes[j].TokenLength;
+ InitialTokenLength += Changes[j].TokenLength;
+ }
+ }
+ } else if (C.Tok->is(tok::comma)) {
+ if (!Cells.empty())
+ Cells.back().EndIndex = i;
+ Cell++;
+ }
+ } else if (Depth == 1) {
+ if (C.Tok == MatchingParen) {
+ if (!Cells.empty())
+ Cells.back().EndIndex = i;
+ Cells.push_back(CellDescription{i, ++Cell, i + 1, false, nullptr});
+ CellCount = Cell + 1;
+ // Go to the next non-comment and ensure there is a break in front
+ const auto *NextNonComment = C.Tok->getNextNonComment();
+ while (NextNonComment->is(tok::comma))
+ NextNonComment = NextNonComment->getNextNonComment();
+ auto j = i;
+ while (Changes[j].Tok != NextNonComment && j < End)
+ j++;
+ if (j < End && Changes[j].NewlinesBefore == 0 &&
+ Changes[j].Tok->isNot(tok::r_brace)) {
+ Changes[j].NewlinesBefore = 1;
+ // Account for the added token lengths
+ Changes[j].Spaces = InitialSpaces - InitialTokenLength;
+ }
+ } else if (C.Tok->is(tok::comment)) {
+ // Trailing comments stay at a space past the last token
+ C.Spaces = Changes[i - 1].Tok->is(tok::comma) ? 1 : 2;
+ } else if (C.Tok->is(tok::l_brace)) {
+ // We need to make sure that the ending braces is aligned to the
+ // start of our initializer
+ auto j = i - 1;
+ for (; j > 0 && !Changes[j].Tok->ArrayInitializerLineStart; --j)
+ ; // Nothing the loop does the work
+ EndSpaces = Changes[j].Spaces;
+ }
+ } else if (Depth == 0 && C.Tok->is(tok::r_brace)) {
+ C.NewlinesBefore = 1;
+ C.Spaces = EndSpaces;
+ }
+ if (C.Tok->StartsColumn) {
+ // This gets us past tokens that have been split over multiple
+ // lines
+ bool HasSplit = false;
+ if (Changes[i].NewlinesBefore > 0) {
+ // So if we split a line previously and the tail line + this token is
+ // less then the column limit we remove the split here and just put
+ // the column start at a space past the comma
+ auto j = i - 1;
+ if ((j - 1) > Start && Changes[j].Tok->is(tok::comma) &&
+ Changes[j - 1].NewlinesBefore > 0) {
+ --j;
+ auto LineLimit = Changes[j].Spaces + Changes[j].TokenLength;
+ if (LineLimit < Style.ColumnLimit) {
+ Changes[i].NewlinesBefore = 0;
+ Changes[i].Spaces = 1;
+ }
+ }
+ }
+ while (Changes[i].NewlinesBefore > 0 && Changes[i].Tok == C.Tok) {
+ Changes[i].Spaces = InitialSpaces;
+ ++i;
+ HasSplit = true;
+ }
+ if (Changes[i].Tok != C.Tok)
+ --i;
+ Cells.push_back(CellDescription{i, Cell, i, HasSplit, nullptr});
+ }
+ }
+
+ return linkCells({Cells, CellCount, InitialSpaces});
+}
+
+unsigned WhitespaceManager::calculateCellWidth(unsigned Start, unsigned End,
+ bool WithSpaces) const {
+ unsigned CellWidth = 0;
+ for (auto i = Start; i < End; i++) {
+ if (Changes[i].NewlinesBefore > 0)
+ CellWidth = 0;
+ CellWidth += Changes[i].TokenLength;
+ CellWidth += (WithSpaces ? Changes[i].Spaces : 0);
+ }
+ return CellWidth;
+}
+
+void WhitespaceManager::alignToStartOfCell(unsigned Start, unsigned End) {
+ if ((End - Start) <= 1)
+ return;
+ // If the line is broken anywhere in there make sure everything
+ // is aligned to the parent
+ for (auto i = Start + 1; i < End; i++) {
+ if (Changes[i].NewlinesBefore > 0)
+ Changes[i].Spaces = Changes[Start].Spaces;
+ }
+}
+
+WhitespaceManager::CellDescriptions
+WhitespaceManager::linkCells(CellDescriptions &&CellDesc) {
+ auto &Cells = CellDesc.Cells;
+ for (auto *CellIter = Cells.begin(); CellIter != Cells.end(); ++CellIter) {
+ if (CellIter->NextColumnElement == nullptr &&
+ ((CellIter + 1) != Cells.end())) {
+ for (auto *NextIter = CellIter + 1; NextIter != Cells.end(); ++NextIter) {
+ if (NextIter->Cell == CellIter->Cell) {
+ CellIter->NextColumnElement = &(*NextIter);
+ break;
+ }
+ }
+ }
+ }
+ return std::move(CellDesc);
+}
+
void WhitespaceManager::generateChanges() {
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
const Change &C = Changes[i];
diff --git a/clang/lib/Format/WhitespaceManager.h b/clang/lib/Format/WhitespaceManager.h
index 1398a3aee2b8..4f8f95040af6 100644
--- a/clang/lib/Format/WhitespaceManager.h
+++ b/clang/lib/Format/WhitespaceManager.h
@@ -18,6 +18,8 @@
#include "TokenAnnotator.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
#include <string>
#include <tuple>
@@ -173,6 +175,28 @@ public:
};
private:
+ struct CellDescription {
+ unsigned Index = 0;
+ unsigned Cell = 0;
+ unsigned EndIndex = 0;
+ bool HasSplit = false;
+ CellDescription *NextColumnElement = nullptr;
+
+ constexpr bool operator==(const CellDescription &Other) const {
+ return Index == Other.Index && Cell == Other.Cell &&
+ EndIndex == Other.EndIndex;
+ }
+ constexpr bool operator!=(const CellDescription &Other) const {
+ return !(*this == Other);
+ }
+ };
+
+ struct CellDescriptions {
+ SmallVector<CellDescription> Cells;
+ unsigned CellCount = 0;
+ unsigned InitialSpaces = 0;
+ };
+
/// Calculate \c IsTrailingComment, \c TokenLength for the last tokens
/// or token parts in a line and \c PreviousEndOfTokenColumn and
/// \c EscapedNewlineColumn for the first tokens or token parts in a line.
@@ -207,6 +231,89 @@ private:
/// the specified \p Column.
void alignEscapedNewlines(unsigned Start, unsigned End, unsigned Column);
+ /// Align Array Initializers over all \c Changes.
+ void alignArrayInitializers();
+
+ /// Align Array Initializers from change \p Start to change \p End at
+ /// the specified \p Column.
+ void alignArrayInitializers(unsigned Start, unsigned End);
+
+ /// Align Array Initializers being careful to right justify the columns
+ /// as described by \p CellDescs.
+ void alignArrayInitializersRightJustified(CellDescriptions &&CellDescs);
+
+ /// Align Array Initializers being careful to leftt justify the columns
+ /// as described by \p CellDescs.
+ void alignArrayInitializersLeftJustified(CellDescriptions &&CellDescs);
+
+ /// Calculate the cell width between two indexes.
+ unsigned calculateCellWidth(unsigned Start, unsigned End,
+ bool WithSpaces = false) const;
+
+ /// Get a set of fully specified CellDescriptions between \p Start and
+ /// \p End of the change list.
+ CellDescriptions getCells(unsigned Start, unsigned End);
+
+ /// Does this \p Cell contain a split element?
+ static bool isSplitCell(const CellDescription &Cell);
+
+ /// Get the width of the preceeding cells from \p Start to \p End.
+ template <typename I>
+ auto getNetWidth(const I &Start, const I &End, unsigned InitialSpaces) const {
+ auto NetWidth = InitialSpaces;
+ for (auto PrevIter = Start; PrevIter != End; ++PrevIter) {
+ // If we broke the line the initial spaces are already
+ // accounted for.
+ if (Changes[PrevIter->Index].NewlinesBefore > 0)
+ NetWidth = 0;
+ NetWidth +=
+ calculateCellWidth(PrevIter->Index, PrevIter->EndIndex, true) + 1;
+ }
+ return NetWidth;
+ }
+
+ /// Get the maximum width of a cell in a sequence of columns.
+ template <typename I>
+ unsigned getMaximumCellWidth(I CellIter, unsigned NetWidth) const {
+ unsigned CellWidth =
+ calculateCellWidth(CellIter->Index, CellIter->EndIndex, true);
+ if (Changes[CellIter->Index].NewlinesBefore == 0)
+ CellWidth += NetWidth;
+ for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ auto ThisWidth = calculateCellWidth(Next->Index, Next->EndIndex, true);
+ if (Changes[Next->Index].NewlinesBefore == 0)
+ ThisWidth += NetWidth;
+ CellWidth = std::max(CellWidth, ThisWidth);
+ }
+ return CellWidth;
+ }
+
+ /// Get The maximum width of all columns to a given cell.
+ template <typename I>
+ unsigned getMaximumNetWidth(const I &CellStart, const I &CellStop,
+ unsigned InitialSpaces,
+ unsigned CellCount) const {
+ auto MaxNetWidth = getNetWidth(CellStart, CellStop, InitialSpaces);
+ auto RowCount = 1U;
+ auto Offset = std::distance(CellStart, CellStop);
+ for (const auto *Next = CellStop->NextColumnElement; Next != nullptr;
+ Next = Next->NextColumnElement) {
+ auto Start = (CellStart + RowCount * CellCount);
+ auto End = Start + Offset;
+ MaxNetWidth =
+ std::max(MaxNetWidth, getNetWidth(Start, End, InitialSpaces));
+ ++RowCount;
+ }
+ return MaxNetWidth;
+ }
+
+ /// Align a split cell with a newline to the first element in the cell.
+ void alignToStartOfCell(unsigned Start, unsigned End);
+
+ /// Link the Cell pointers in the list of Cells.
+ static CellDescriptions linkCells(CellDescriptions &&CellDesc);
+
/// Fill \c Replaces with the replacements for all effective changes.
void generateChanges();
diff --git a/clang/lib/Frontend/ASTUnit.cpp b/clang/lib/Frontend/ASTUnit.cpp
index e7a87dc6b23c..996783aa9cf4 100644
--- a/clang/lib/Frontend/ASTUnit.cpp
+++ b/clang/lib/Frontend/ASTUnit.cpp
@@ -588,7 +588,7 @@ private:
//
// FIXME: We shouldn't need to do this, the target should be immutable once
// created. This complexity should be lifted elsewhere.
- Target->adjust(LangOpt);
+ Target->adjust(PP.getDiagnostics(), LangOpt);
// Initialize the preprocessor.
PP.Initialize(*Target);
@@ -807,7 +807,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
if (ToLoad >= LoadASTOnly)
AST->Ctx = new ASTContext(*AST->LangOpts, AST->getSourceManager(),
PP.getIdentifierTable(), PP.getSelectorTable(),
- PP.getBuiltinInfo());
+ PP.getBuiltinInfo(),
+ AST->getTranslationUnitKind());
DisableValidationForModuleKind disableValid =
DisableValidationForModuleKind::None;
@@ -1150,17 +1151,9 @@ bool ASTUnit::Parse(std::shared_ptr<PCHContainerOperations> PCHContainerOps,
Clang->setDiagnostics(&getDiagnostics());
// Create the target instance.
- Clang->setTarget(TargetInfo::CreateTargetInfo(
- Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
- if (!Clang->hasTarget())
+ if (!Clang->createTarget())
return true;
- // Inform the target of the language options.
- //
- // FIXME: We shouldn't need to do this, the target should be immutable once
- // created. This complexity should be lifted elsewhere.
- Clang->getTarget().adjust(Clang->getLangOpts());
-
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
@@ -1568,17 +1561,9 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(
Clang->setDiagnostics(&AST->getDiagnostics());
// Create the target instance.
- Clang->setTarget(TargetInfo::CreateTargetInfo(
- Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
- if (!Clang->hasTarget())
+ if (!Clang->createTarget())
return nullptr;
- // Inform the target of the language options.
- //
- // FIXME: We shouldn't need to do this, the target should be immutable once
- // created. This complexity should be lifted elsewhere.
- Clang->getTarget().adjust(Clang->getLangOpts());
-
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
@@ -2194,19 +2179,11 @@ void ASTUnit::CodeComplete(
ProcessWarningOptions(Diag, Inv.getDiagnosticOpts());
// Create the target instance.
- Clang->setTarget(TargetInfo::CreateTargetInfo(
- Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
- if (!Clang->hasTarget()) {
+ if (!Clang->createTarget()) {
Clang->setInvocation(nullptr);
return;
}
- // Inform the target of the language options.
- //
- // FIXME: We shouldn't need to do this, the target should be immutable once
- // created. This complexity should be lifted elsewhere.
- Clang->getTarget().adjust(Clang->getLangOpts());
-
assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
"Invocation must have exactly one source file!");
assert(Clang->getFrontendOpts().Inputs[0].getKind().getFormat() ==
diff --git a/clang/lib/Frontend/CompilerInstance.cpp b/clang/lib/Frontend/CompilerInstance.cpp
index 956877d34680..c642af1849bc 100644
--- a/clang/lib/Frontend/CompilerInstance.cpp
+++ b/clang/lib/Frontend/CompilerInstance.cpp
@@ -77,7 +77,7 @@ bool CompilerInstance::shouldBuildGlobalModuleIndex() const {
return (BuildGlobalModuleIndex ||
(TheASTReader && TheASTReader->isGlobalIndexUnavailable() &&
getFrontendOpts().GenerateGlobalModuleIndex)) &&
- !ModuleBuildFailed;
+ !DisableGeneratingGlobalModuleIndex;
}
void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
@@ -97,6 +97,62 @@ void CompilerInstance::setVerboseOutputStream(std::unique_ptr<raw_ostream> Value
void CompilerInstance::setTarget(TargetInfo *Value) { Target = Value; }
void CompilerInstance::setAuxTarget(TargetInfo *Value) { AuxTarget = Value; }
+bool CompilerInstance::createTarget() {
+ // Create the target instance.
+ setTarget(TargetInfo::CreateTargetInfo(getDiagnostics(),
+ getInvocation().TargetOpts));
+ if (!hasTarget())
+ return false;
+
+ // Check whether AuxTarget exists, if not, then create TargetInfo for the
+ // other side of CUDA/OpenMP/SYCL compilation.
+ if (!getAuxTarget() &&
+ (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
+ getLangOpts().SYCLIsDevice) &&
+ !getFrontendOpts().AuxTriple.empty()) {
+ auto TO = std::make_shared<TargetOptions>();
+ TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
+ if (getFrontendOpts().AuxTargetCPU)
+ TO->CPU = getFrontendOpts().AuxTargetCPU.getValue();
+ if (getFrontendOpts().AuxTargetFeatures)
+ TO->FeaturesAsWritten = getFrontendOpts().AuxTargetFeatures.getValue();
+ TO->HostTriple = getTarget().getTriple().str();
+ setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
+ }
+
+ if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
+ if (getLangOpts().getFPRoundingMode() !=
+ llvm::RoundingMode::NearestTiesToEven) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
+ getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
+ }
+ if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
+ getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
+ }
+ // FIXME: can we disable FEnvAccess?
+ }
+
+ // We should do it here because target knows nothing about
+ // language options when it's being created.
+ if (getLangOpts().OpenCL &&
+ !getTarget().validateOpenCLTarget(getLangOpts(), getDiagnostics()))
+ return false;
+
+ // Inform the target of the language options.
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ getTarget().adjust(getDiagnostics(), getLangOpts());
+
+ // Adjust target options based on codegen options.
+ getTarget().adjustTargetOptions(getCodeGenOpts(), getTargetOpts());
+
+ if (auto *Aux = getAuxTarget())
+ getTarget().setAuxTarget(Aux);
+
+ return true;
+}
+
llvm::vfs::FileSystem &CompilerInstance::getVirtualFileSystem() const {
return getFileManager().getVirtualFileSystem();
}
@@ -229,7 +285,7 @@ static void SetUpDiagnosticLog(DiagnosticOptions *DiagOpts,
// Create the output stream.
auto FileOS = std::make_unique<llvm::raw_fd_ostream>(
DiagOpts->DiagnosticLogFile, EC,
- llvm::sys::fs::OF_Append | llvm::sys::fs::OF_Text);
+ llvm::sys::fs::OF_Append | llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
<< DiagOpts->DiagnosticLogFile << EC.message();
@@ -401,7 +457,7 @@ void CompilerInstance::createPreprocessor(TranslationUnitKind TUKind) {
getSourceManager(), *HeaderInfo, *this,
/*IdentifierInfoLookup=*/nullptr,
/*OwnsHeaderSearch=*/true, TUKind);
- getTarget().adjust(getLangOpts());
+ getTarget().adjust(getDiagnostics(), getLangOpts());
PP->Initialize(getTarget(), getAuxTarget());
if (PPOpts.DetailedRecord)
@@ -495,7 +551,7 @@ void CompilerInstance::createASTContext() {
Preprocessor &PP = getPreprocessor();
auto *Context = new ASTContext(getLangOpts(), PP.getSourceManager(),
PP.getIdentifierTable(), PP.getSelectorTable(),
- PP.getBuiltinInfo());
+ PP.getBuiltinInfo(), PP.TUKind);
Context->InitBuiltinTypes(getTarget(), getAuxTarget());
setASTContext(Context);
}
@@ -647,31 +703,37 @@ void CompilerInstance::createSema(TranslationUnitKind TUKind,
// Output Files
void CompilerInstance::clearOutputFiles(bool EraseFiles) {
+ // Ignore errors that occur when trying to discard the temp file.
for (OutputFile &OF : OutputFiles) {
if (EraseFiles) {
- if (!OF.TempFilename.empty()) {
- llvm::sys::fs::remove(OF.TempFilename);
- continue;
- }
+ if (OF.File)
+ consumeError(OF.File->discard());
if (!OF.Filename.empty())
llvm::sys::fs::remove(OF.Filename);
continue;
}
- if (OF.TempFilename.empty())
+ if (!OF.File)
continue;
+ if (OF.File->TmpName.empty()) {
+ consumeError(OF.File->discard());
+ continue;
+ }
+
// If '-working-directory' was passed, the output filename should be
// relative to that.
SmallString<128> NewOutFile(OF.Filename);
FileMgr->FixupRelativePath(NewOutFile);
- std::error_code EC = llvm::sys::fs::rename(OF.TempFilename, NewOutFile);
- if (!EC)
+
+ llvm::Error E = OF.File->keep(NewOutFile);
+ if (!E)
continue;
+
getDiagnostics().Report(diag::err_unable_to_rename_temp)
- << OF.TempFilename << OF.Filename << EC.message();
+ << OF.File->TmpName << OF.Filename << std::move(E);
- llvm::sys::fs::remove(OF.TempFilename);
+ llvm::sys::fs::remove(OF.File->TmpName);
}
OutputFiles.clear();
if (DeleteBuiltModules) {
@@ -681,11 +743,9 @@ void CompilerInstance::clearOutputFiles(bool EraseFiles) {
}
}
-std::unique_ptr<raw_pwrite_stream>
-CompilerInstance::createDefaultOutputFile(bool Binary, StringRef InFile,
- StringRef Extension,
- bool RemoveFileOnSignal,
- bool CreateMissingDirectories) {
+std::unique_ptr<raw_pwrite_stream> CompilerInstance::createDefaultOutputFile(
+ bool Binary, StringRef InFile, StringRef Extension, bool RemoveFileOnSignal,
+ bool CreateMissingDirectories, bool ForceUseTemporary) {
StringRef OutputPath = getFrontendOpts().OutputFile;
Optional<SmallString<128>> PathStorage;
if (OutputPath.empty()) {
@@ -698,9 +758,8 @@ CompilerInstance::createDefaultOutputFile(bool Binary, StringRef InFile,
}
}
- // Force a temporary file if RemoveFileOnSignal was disabled.
return createOutputFile(OutputPath, Binary, RemoveFileOnSignal,
- getFrontendOpts().UseTemporary || !RemoveFileOnSignal,
+ getFrontendOpts().UseTemporary || ForceUseTemporary,
CreateMissingDirectories);
}
@@ -753,7 +812,7 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
}
}
- std::string TempFile;
+ Optional<llvm::sys::fs::TempFile> Temp;
if (UseTemporary) {
// Create a temporary file.
// Insert -%%%%%%%% before the extension (if any), and because some tools
@@ -765,22 +824,34 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
TempPath += "-%%%%%%%%";
TempPath += OutputExtension;
TempPath += ".tmp";
- int fd;
- std::error_code EC =
- llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath);
-
- if (CreateMissingDirectories &&
- EC == llvm::errc::no_such_file_or_directory) {
- StringRef Parent = llvm::sys::path::parent_path(OutputPath);
- EC = llvm::sys::fs::create_directories(Parent);
- if (!EC) {
- EC = llvm::sys::fs::createUniqueFile(TempPath, fd, TempPath);
- }
- }
+ Expected<llvm::sys::fs::TempFile> ExpectedFile =
+ llvm::sys::fs::TempFile::create(
+ TempPath, llvm::sys::fs::all_read | llvm::sys::fs::all_write,
+ Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text);
+
+ llvm::Error E = handleErrors(
+ ExpectedFile.takeError(), [&](const llvm::ECError &E) -> llvm::Error {
+ std::error_code EC = E.convertToErrorCode();
+ if (CreateMissingDirectories &&
+ EC == llvm::errc::no_such_file_or_directory) {
+ StringRef Parent = llvm::sys::path::parent_path(OutputPath);
+ EC = llvm::sys::fs::create_directories(Parent);
+ if (!EC) {
+ ExpectedFile = llvm::sys::fs::TempFile::create(TempPath);
+ if (!ExpectedFile)
+ return llvm::errorCodeToError(
+ llvm::errc::no_such_file_or_directory);
+ }
+ }
+ return llvm::errorCodeToError(EC);
+ });
- if (!EC) {
- OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
- OSFile = TempFile = std::string(TempPath.str());
+ if (E) {
+ consumeError(std::move(E));
+ } else {
+ Temp = std::move(ExpectedFile.get());
+ OS.reset(new llvm::raw_fd_ostream(Temp->FD, /*shouldClose=*/false));
+ OSFile = Temp->TmpName;
}
// If we failed to create the temporary, fallback to writing to the file
// directly. This handles the corner case where we cannot write to the
@@ -792,19 +863,15 @@ CompilerInstance::createOutputFileImpl(StringRef OutputPath, bool Binary,
std::error_code EC;
OS.reset(new llvm::raw_fd_ostream(
*OSFile, EC,
- (Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_Text)));
+ (Binary ? llvm::sys::fs::OF_None : llvm::sys::fs::OF_TextWithCRLF)));
if (EC)
return llvm::errorCodeToError(EC);
}
- // Make sure the out stream file gets removed if we crash.
- if (RemoveFileOnSignal)
- llvm::sys::RemoveFileOnSignal(*OSFile);
-
// Add the output file -- but don't try to remove "-", since this means we are
// using stdin.
OutputFiles.emplace_back(((OutputPath != "-") ? OutputPath : "").str(),
- std::move(TempFile));
+ std::move(Temp));
if (!Binary || OS->supportsSeeking())
return std::move(OS);
@@ -878,51 +945,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (!Act.PrepareToExecute(*this))
return false;
- // Create the target instance.
- setTarget(TargetInfo::CreateTargetInfo(getDiagnostics(),
- getInvocation().TargetOpts));
- if (!hasTarget())
+ if (!createTarget())
return false;
- // Create TargetInfo for the other side of CUDA/OpenMP/SYCL compilation.
- if ((getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
- getLangOpts().SYCLIsDevice) &&
- !getFrontendOpts().AuxTriple.empty()) {
- auto TO = std::make_shared<TargetOptions>();
- TO->Triple = llvm::Triple::normalize(getFrontendOpts().AuxTriple);
- if (getFrontendOpts().AuxTargetCPU)
- TO->CPU = getFrontendOpts().AuxTargetCPU.getValue();
- if (getFrontendOpts().AuxTargetFeatures)
- TO->FeaturesAsWritten = getFrontendOpts().AuxTargetFeatures.getValue();
- TO->HostTriple = getTarget().getTriple().str();
- setAuxTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), TO));
- }
-
- if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
- if (getLangOpts().getFPRoundingMode() !=
- llvm::RoundingMode::NearestTiesToEven) {
- getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
- getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
- }
- if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
- getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
- getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
- }
- // FIXME: can we disable FEnvAccess?
- }
-
- // Inform the target of the language options.
- //
- // FIXME: We shouldn't need to do this, the target should be immutable once
- // created. This complexity should be lifted elsewhere.
- getTarget().adjust(getLangOpts());
-
- // Adjust target options based on codegen options.
- getTarget().adjustTargetOptions(getCodeGenOpts(), getTargetOpts());
-
- if (auto *Aux = getAuxTarget())
- getTarget().setAuxTarget(Aux);
-
// rewriter project will change target built-in bool type from its default.
if (getFrontendOpts().ProgramAction == frontend::RewriteObjC)
getTarget().noSignedCharForObjCBool();
@@ -992,7 +1017,7 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (!StatsFile.empty()) {
std::error_code EC;
auto StatS = std::make_unique<llvm::raw_fd_ostream>(
- StatsFile, EC, llvm::sys::fs::OF_Text);
+ StatsFile, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
getDiagnostics().Report(diag::warn_fe_unable_to_open_stats_file)
<< StatsFile << EC.message();
@@ -1029,6 +1054,15 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
[](CompilerInstance &) {}) {
llvm::TimeTraceScope TimeScope("Module Compile", ModuleName);
+ // Never compile a module that's already finalized - this would cause the
+ // existing module to be freed, causing crashes if it is later referenced
+ if (ImportingInstance.getModuleCache().isPCMFinal(ModuleFileName)) {
+ ImportingInstance.getDiagnostics().Report(
+ ImportLoc, diag::err_module_rebuild_finalized)
+ << ModuleName;
+ return false;
+ }
+
// Construct a compiler invocation for creating this module.
auto Invocation =
std::make_shared<CompilerInvocation>(ImportingInstance.getInvocation());
@@ -1144,7 +1178,10 @@ compileModuleImpl(CompilerInstance &ImportingInstance, SourceLocation ImportLoc,
// module generation thread crashed.
Instance.clearOutputFiles(/*EraseFiles=*/true);
- return !Instance.getDiagnostics().hasErrorOccurred();
+ // If \p AllowPCMWithCompilerErrors is set return 'success' even if errors
+ // occurred.
+ return !Instance.getDiagnostics().hasErrorOccurred() ||
+ Instance.getFrontendOpts().AllowPCMWithCompilerErrors;
}
static const FileEntry *getPublicModuleMap(const FileEntry *File,
@@ -1664,9 +1701,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
// We can't find a module, error out here.
getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
<< ModuleName << SourceRange(ImportLoc, ModuleNameLoc);
- ModuleBuildFailed = true;
- // FIXME: Why is this not cached?
- return ModuleLoadResult::OtherUncachedFailure;
+ return nullptr;
}
if (ModuleFilename.empty()) {
if (M && M->HasIncompatibleModuleFile) {
@@ -1677,9 +1712,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
getDiagnostics().Report(ModuleNameLoc, diag::err_module_build_disabled)
<< ModuleName;
- ModuleBuildFailed = true;
- // FIXME: Why is this not cached?
- return ModuleLoadResult::OtherUncachedFailure;
+ return nullptr;
}
// Create an ASTReader on demand.
@@ -1697,7 +1730,8 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
// Try to load the module file. If we are not trying to load from the
// module cache, we don't know how to rebuild modules.
unsigned ARRFlags = Source == MS_ModuleCache
- ? ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing
+ ? ASTReader::ARR_OutOfDate | ASTReader::ARR_Missing |
+ ASTReader::ARR_TreatModuleWithErrorsAsOutOfDate
: Source == MS_PrebuiltModulePath
? 0
: ASTReader::ARR_ConfigurationMismatch;
@@ -1724,7 +1758,6 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
if (*ModuleFile == M->getASTFile())
return M;
- ModuleBuildFailed = true;
getDiagnostics().Report(ModuleNameLoc, diag::err_module_prebuilt)
<< ModuleName;
return ModuleLoadResult();
@@ -1746,14 +1779,12 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
LLVM_FALLTHROUGH;
case ASTReader::VersionMismatch:
case ASTReader::HadErrors:
- // FIXME: Should this set ModuleBuildFailed = true?
ModuleLoader::HadFatalFailure = true;
// FIXME: The ASTReader will already have complained, but can we shoehorn
// that diagnostic information into a more useful form?
return ModuleLoadResult();
case ASTReader::Failure:
- // FIXME: Should this set ModuleBuildFailed = true?
ModuleLoader::HadFatalFailure = true;
return ModuleLoadResult();
}
@@ -1763,7 +1794,6 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
// We don't know the desired configuration for this module and don't
// necessarily even have a module map. Since ReadAST already produces
// diagnostics for these two cases, we simply error out here.
- ModuleBuildFailed = true;
return ModuleLoadResult();
}
@@ -1788,9 +1818,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
getDiagnostics().Report(ModuleNameLoc, diag::err_module_cycle)
<< ModuleName << CyclePath;
- // FIXME: Should this set ModuleBuildFailed = true?
- // FIXME: Why is this not cached?
- return ModuleLoadResult::OtherUncachedFailure;
+ return nullptr;
}
// Check whether we have already attempted to build this module (but
@@ -1799,9 +1827,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
getPreprocessorOpts().FailedModules->hasAlreadyFailed(ModuleName)) {
getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_built)
<< ModuleName << SourceRange(ImportLoc, ModuleNameLoc);
- ModuleBuildFailed = true;
- // FIXME: Why is this not cached?
- return ModuleLoadResult::OtherUncachedFailure;
+ return nullptr;
}
// Try to compile and then read the AST.
@@ -1811,9 +1837,7 @@ ModuleLoadResult CompilerInstance::findOrCompileModuleAndReadAST(
"undiagnosed error in compileModuleAndReadAST");
if (getPreprocessorOpts().FailedModules)
getPreprocessorOpts().FailedModules->addFailed(ModuleName);
- ModuleBuildFailed = true;
- // FIXME: Why is this not cached?
- return ModuleLoadResult::OtherUncachedFailure;
+ return nullptr;
}
// Okay, we've rebuilt and now loaded the module.
@@ -1856,22 +1880,19 @@ CompilerInstance::loadModule(SourceLocation ImportLoc,
//if (Module == nullptr) {
// getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
// << ModuleName;
- // ModuleBuildFailed = true;
+ // DisableGeneratingGlobalModuleIndex = true;
// return ModuleLoadResult();
//}
MM.cacheModuleLoad(*Path[0].first, Module);
} else {
ModuleLoadResult Result = findOrCompileModuleAndReadAST(
ModuleName, ImportLoc, ModuleNameLoc, IsInclusionDirective);
- // FIXME: Can we pull 'ModuleBuildFailed = true' out of the return
- // sequences for findOrCompileModuleAndReadAST and do it here (as long as
- // the result is not a config mismatch)? See FIXMEs there.
if (!Result.isNormal())
return Result;
+ if (!Result)
+ DisableGeneratingGlobalModuleIndex = true;
Module = Result;
MM.cacheModuleLoad(*Path[0].first, Module);
- if (!Module)
- return Module;
}
// If we never found the module, fail. Otherwise, verify the module and link
diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp
index d8be4ea14868..d545e9358f04 100644
--- a/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/clang/lib/Frontend/CompilerInvocation.cpp
@@ -38,6 +38,7 @@
#include "clang/Frontend/FrontendPluginRegistry.h"
#include "clang/Frontend/MigratorOptions.h"
#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
#include "clang/Frontend/Utils.h"
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/PreprocessorOptions.h"
@@ -48,10 +49,12 @@
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/CachedHashString.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -106,20 +109,40 @@ using namespace llvm::opt;
// Initialization.
//===----------------------------------------------------------------------===//
-CompilerInvocationBase::CompilerInvocationBase()
+CompilerInvocationRefBase::CompilerInvocationRefBase()
: LangOpts(new LangOptions()), TargetOpts(new TargetOptions()),
DiagnosticOpts(new DiagnosticOptions()),
HeaderSearchOpts(new HeaderSearchOptions()),
- PreprocessorOpts(new PreprocessorOptions()) {}
+ PreprocessorOpts(new PreprocessorOptions()),
+ AnalyzerOpts(new AnalyzerOptions()) {}
-CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
+CompilerInvocationRefBase::CompilerInvocationRefBase(
+ const CompilerInvocationRefBase &X)
: LangOpts(new LangOptions(*X.getLangOpts())),
TargetOpts(new TargetOptions(X.getTargetOpts())),
DiagnosticOpts(new DiagnosticOptions(X.getDiagnosticOpts())),
HeaderSearchOpts(new HeaderSearchOptions(X.getHeaderSearchOpts())),
- PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())) {}
+ PreprocessorOpts(new PreprocessorOptions(X.getPreprocessorOpts())),
+ AnalyzerOpts(new AnalyzerOptions(*X.getAnalyzerOpts())) {}
+
+CompilerInvocationRefBase::CompilerInvocationRefBase(
+ CompilerInvocationRefBase &&X) = default;
+
+CompilerInvocationRefBase &
+CompilerInvocationRefBase::operator=(CompilerInvocationRefBase X) {
+ LangOpts.swap(X.LangOpts);
+ TargetOpts.swap(X.TargetOpts);
+ DiagnosticOpts.swap(X.DiagnosticOpts);
+ HeaderSearchOpts.swap(X.HeaderSearchOpts);
+ PreprocessorOpts.swap(X.PreprocessorOpts);
+ AnalyzerOpts.swap(X.AnalyzerOpts);
+ return *this;
+}
+
+CompilerInvocationRefBase &
+CompilerInvocationRefBase::operator=(CompilerInvocationRefBase &&X) = default;
-CompilerInvocationBase::~CompilerInvocationBase() = default;
+CompilerInvocationRefBase::~CompilerInvocationRefBase() = default;
//===----------------------------------------------------------------------===//
// Normalizers
@@ -129,9 +152,10 @@ CompilerInvocationBase::~CompilerInvocationBase() = default;
#include "clang/Driver/Options.inc"
#undef SIMPLE_ENUM_VALUE_TABLE
-static llvm::Optional<bool>
-normalizeSimpleFlag(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
- DiagnosticsEngine &Diags, bool &Success) {
+static llvm::Optional<bool> normalizeSimpleFlag(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
if (Args.hasArg(Opt))
return true;
return None;
@@ -139,8 +163,7 @@ normalizeSimpleFlag(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
static Optional<bool> normalizeSimpleNegativeFlag(OptSpecifier Opt, unsigned,
const ArgList &Args,
- DiagnosticsEngine &,
- bool &Success) {
+ DiagnosticsEngine &) {
if (Args.hasArg(Opt))
return false;
return None;
@@ -166,7 +189,7 @@ template <typename T,
std::enable_if_t<!is_uint64_t_convertible<T>(), bool> = false>
static auto makeFlagToValueNormalizer(T Value) {
return [Value](OptSpecifier Opt, unsigned, const ArgList &Args,
- DiagnosticsEngine &, bool &Success) -> Optional<T> {
+ DiagnosticsEngine &) -> Optional<T> {
if (Args.hasArg(Opt))
return Value;
return None;
@@ -182,8 +205,8 @@ static auto makeFlagToValueNormalizer(T Value) {
static auto makeBooleanOptionNormalizer(bool Value, bool OtherValue,
OptSpecifier OtherOpt) {
return [Value, OtherValue, OtherOpt](OptSpecifier Opt, unsigned,
- const ArgList &Args, DiagnosticsEngine &,
- bool &Success) -> Optional<bool> {
+ const ArgList &Args,
+ DiagnosticsEngine &) -> Optional<bool> {
if (const Arg *A = Args.getLastArg(Opt, OtherOpt)) {
return A->getOption().matches(Opt) ? Value : OtherValue;
}
@@ -204,14 +227,16 @@ static void denormalizeStringImpl(SmallVectorImpl<const char *> &Args,
const char *Spelling,
CompilerInvocation::StringAllocator SA,
Option::OptionClass OptClass, unsigned,
- Twine Value) {
+ const Twine &Value) {
switch (OptClass) {
case Option::SeparateClass:
case Option::JoinedOrSeparateClass:
+ case Option::JoinedAndSeparateClass:
Args.push_back(Spelling);
Args.push_back(SA(Value));
break;
case Option::JoinedClass:
+ case Option::CommaJoinedClass:
Args.push_back(SA(Twine(Spelling) + Value));
break;
default:
@@ -246,9 +271,10 @@ findValueTableByValue(const SimpleEnumValueTable &Table, unsigned Value) {
return None;
}
-static llvm::Optional<unsigned>
-normalizeSimpleEnum(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
- DiagnosticsEngine &Diags, bool &Success) {
+static llvm::Optional<unsigned> normalizeSimpleEnum(OptSpecifier Opt,
+ unsigned TableIndex,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
assert(TableIndex < SimpleEnumValueTablesSize);
const SimpleEnumValueTable &Table = SimpleEnumValueTables[TableIndex];
@@ -260,7 +286,6 @@ normalizeSimpleEnum(OptSpecifier Opt, unsigned TableIndex, const ArgList &Args,
if (auto MaybeEnumVal = findValueTableByName(Table, ArgValue))
return MaybeEnumVal->Value;
- Success = false;
Diags.Report(diag::err_drv_invalid_value)
<< Arg->getAsString(Args) << ArgValue;
return None;
@@ -294,8 +319,7 @@ static void denormalizeSimpleEnum(SmallVectorImpl<const char *> &Args,
static Optional<std::string> normalizeString(OptSpecifier Opt, int TableIndex,
const ArgList &Args,
- DiagnosticsEngine &Diags,
- bool &Success) {
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
return None;
@@ -303,15 +327,14 @@ static Optional<std::string> normalizeString(OptSpecifier Opt, int TableIndex,
}
template <typename IntTy>
-static Optional<IntTy>
-normalizeStringIntegral(OptSpecifier Opt, int, const ArgList &Args,
- DiagnosticsEngine &Diags, bool &Success) {
+static Optional<IntTy> normalizeStringIntegral(OptSpecifier Opt, int,
+ const ArgList &Args,
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
return None;
IntTy Res;
if (StringRef(Arg->getValue()).getAsInteger(0, Res)) {
- Success = false;
Diags.Report(diag::err_drv_invalid_int_value)
<< Arg->getAsString(Args) << Arg->getValue();
return None;
@@ -321,7 +344,7 @@ normalizeStringIntegral(OptSpecifier Opt, int, const ArgList &Args,
static Optional<std::vector<std::string>>
normalizeStringVector(OptSpecifier Opt, int, const ArgList &Args,
- DiagnosticsEngine &, bool &Success) {
+ DiagnosticsEngine &) {
return Args.getAllArgValues(Opt);
}
@@ -359,8 +382,7 @@ static void denormalizeStringVector(SmallVectorImpl<const char *> &Args,
static Optional<std::string> normalizeTriple(OptSpecifier Opt, int TableIndex,
const ArgList &Args,
- DiagnosticsEngine &Diags,
- bool &Success) {
+ DiagnosticsEngine &Diags) {
auto *Arg = Args.getLastArg(Opt);
if (!Arg)
return None;
@@ -382,29 +404,46 @@ template <typename T> static T extractForwardValue(T KeyPath) {
template <typename T, typename U, U Value>
static T extractMaskValue(T KeyPath) {
- return KeyPath & Value;
+ return ((KeyPath & Value) == Value) ? static_cast<T>(Value) : T();
}
-#define PARSE_OPTION_WITH_MARSHALLING(ARGS, DIAGS, SUCCESS, ID, FLAGS, PARAM, \
- SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, \
- NORMALIZER, MERGER, TABLE_INDEX) \
+#define PARSE_OPTION_WITH_MARSHALLING( \
+ ARGS, DIAGS, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX) \
if ((FLAGS)&options::CC1Option) { \
KEYPATH = MERGER(KEYPATH, DEFAULT_VALUE); \
if (IMPLIED_CHECK) \
KEYPATH = MERGER(KEYPATH, IMPLIED_VALUE); \
if (SHOULD_PARSE) \
- if (auto MaybeValue = \
- NORMALIZER(OPT_##ID, TABLE_INDEX, ARGS, DIAGS, SUCCESS)) \
+ if (auto MaybeValue = NORMALIZER(OPT_##ID, TABLE_INDEX, ARGS, DIAGS)) \
KEYPATH = \
MERGER(KEYPATH, static_cast<decltype(KEYPATH)>(*MaybeValue)); \
}
+// Capture the extracted value as a lambda argument to avoid potential issues
+// with lifetime extension of the reference.
+#define GENERATE_OPTION_WITH_MARSHALLING( \
+ ARGS, STRING_ALLOCATOR, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, \
+ TABLE_INDEX) \
+ if ((FLAGS)&options::CC1Option) { \
+ [&](const auto &Extracted) { \
+ if (ALWAYS_EMIT || \
+ (Extracted != \
+ static_cast<decltype(KEYPATH)>((IMPLIED_CHECK) ? (IMPLIED_VALUE) \
+ : (DEFAULT_VALUE)))) \
+ DENORMALIZER(ARGS, SPELLING, STRING_ALLOCATOR, Option::KIND##Class, \
+ TABLE_INDEX, Extracted); \
+ }(EXTRACTOR(KEYPATH)); \
+ }
+
static const StringRef GetInputKindName(InputKind IK);
-static void FixupInvocation(CompilerInvocation &Invocation,
- DiagnosticsEngine &Diags, const InputArgList &Args,
+static bool FixupInvocation(CompilerInvocation &Invocation,
+ DiagnosticsEngine &Diags, const ArgList &Args,
InputKind IK) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
LangOptions &LangOpts = *Invocation.getLangOpts();
CodeGenOptions &CodeGenOpts = Invocation.getCodeGenOpts();
TargetOptions &TargetOpts = Invocation.getTargetOpts();
@@ -414,7 +453,7 @@ static void FixupInvocation(CompilerInvocation &Invocation,
CodeGenOpts.XRayAlwaysEmitTypedEvents = LangOpts.XRayAlwaysEmitTypedEvents;
CodeGenOpts.DisableFree = FrontendOpts.DisableFree;
FrontendOpts.GenerateGlobalModuleIndex = FrontendOpts.UseGlobalModuleIndex;
-
+ LangOpts.SanitizeCoverage = CodeGenOpts.hasSanitizeCoverage();
LangOpts.ForceEmitVTables = CodeGenOpts.ForceEmitVTables;
LangOpts.SpeculativeLoadHardening = CodeGenOpts.SpeculativeLoadHardening;
LangOpts.CurrentModule = LangOpts.ModuleName;
@@ -424,7 +463,8 @@ static void FixupInvocation(CompilerInvocation &Invocation,
CodeGenOpts.CodeModel = TargetOpts.CodeModel;
- if (LangOpts.getExceptionHandling() != llvm::ExceptionHandling::None &&
+ if (LangOpts.getExceptionHandling() !=
+ LangOptions::ExceptionHandlingKind::None &&
T.isWindowsMSVCEnvironment())
Diags.Report(diag::err_fe_invalid_exception_model)
<< static_cast<unsigned>(LangOpts.getExceptionHandling()) << T.str();
@@ -432,6 +472,9 @@ static void FixupInvocation(CompilerInvocation &Invocation,
if (LangOpts.AppleKext && !LangOpts.CPlusPlus)
Diags.Report(diag::warn_c_kext);
+ if (Args.hasArg(OPT_fconcepts_ts))
+ Diags.Report(diag::warn_fe_concepts_ts_flag);
+
if (LangOpts.NewAlignOverride &&
!llvm::isPowerOf2_32(LangOpts.NewAlignOverride)) {
Arg *A = Args.getLastArg(OPT_fnew_alignment_EQ);
@@ -440,6 +483,11 @@ static void FixupInvocation(CompilerInvocation &Invocation,
LangOpts.NewAlignOverride = 0;
}
+ // Prevent the user from specifying both -fsycl-is-device and -fsycl-is-host.
+ if (LangOpts.SYCLIsDevice && LangOpts.SYCLIsHost)
+ Diags.Report(diag::err_drv_argument_not_allowed_with) << "-fsycl-is-device"
+ << "-fsycl-is-host";
+
if (Args.hasArg(OPT_fgnu89_inline) && LangOpts.CPlusPlus)
Diags.Report(diag::err_drv_argument_not_allowed_with)
<< "-fgnu89-inline" << GetInputKindName(IK);
@@ -478,6 +526,8 @@ static void FixupInvocation(CompilerInvocation &Invocation,
Diags.Report(diag::err_drv_argument_only_allowed_with)
<< Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
<< "-fno-legacy-pass-manager";
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
//===----------------------------------------------------------------------===//
@@ -487,7 +537,9 @@ static void FixupInvocation(CompilerInvocation &Invocation,
static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
DiagnosticsEngine &Diags) {
unsigned DefaultOpt = llvm::CodeGenOpt::None;
- if (IK.getLanguage() == Language::OpenCL && !Args.hasArg(OPT_cl_opt_disable))
+ if ((IK.getLanguage() == Language::OpenCL ||
+ IK.getLanguage() == Language::OpenCLCXX) &&
+ !Args.hasArg(OPT_cl_opt_disable))
DefaultOpt = llvm::CodeGenOpt::Default;
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
@@ -528,9 +580,163 @@ static unsigned getOptimizationLevelSize(ArgList &Args) {
return 0;
}
-static std::string GetOptName(llvm::opt::OptSpecifier OptSpecifier) {
- static const OptTable &OptTable = getDriverOptTable();
- return OptTable.getOption(OptSpecifier).getPrefixedName();
+static void GenerateArg(SmallVectorImpl<const char *> &Args,
+ llvm::opt::OptSpecifier OptSpecifier,
+ CompilerInvocation::StringAllocator SA) {
+ Option Opt = getDriverOptTable().getOption(OptSpecifier);
+ denormalizeSimpleFlag(Args, SA(Opt.getPrefix() + Opt.getName()), SA,
+ Option::OptionClass::FlagClass, 0);
+}
+
+static void GenerateArg(SmallVectorImpl<const char *> &Args,
+ llvm::opt::OptSpecifier OptSpecifier,
+ const Twine &Value,
+ CompilerInvocation::StringAllocator SA) {
+ Option Opt = getDriverOptTable().getOption(OptSpecifier);
+ denormalizeString(Args, SA(Opt.getPrefix() + Opt.getName()), SA,
+ Opt.getKind(), 0, Value);
+}
+
+// Parse command line arguments into CompilerInvocation.
+using ParseFn =
+ llvm::function_ref<bool(CompilerInvocation &, ArrayRef<const char *>,
+ DiagnosticsEngine &, const char *)>;
+
+// Generate command line arguments from CompilerInvocation.
+using GenerateFn = llvm::function_ref<void(
+ CompilerInvocation &, SmallVectorImpl<const char *> &,
+ CompilerInvocation::StringAllocator)>;
+
+// May perform round-trip of command line arguments. By default, the round-trip
+// is enabled if CLANG_ROUND_TRIP_CC1_ARGS was defined during build. This can be
+// overwritten at run-time via the "-round-trip-args" and "-no-round-trip-args"
+// command line flags.
+// During round-trip, the command line arguments are parsed into a dummy
+// instance of CompilerInvocation which is used to generate the command line
+// arguments again. The real CompilerInvocation instance is then created by
+// parsing the generated arguments, not the original ones.
+static bool RoundTrip(ParseFn Parse, GenerateFn Generate,
+ CompilerInvocation &RealInvocation,
+ CompilerInvocation &DummyInvocation,
+ ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0) {
+ // FIXME: Switch to '#ifndef NDEBUG' when possible.
+#ifdef CLANG_ROUND_TRIP_CC1_ARGS
+ bool DoRoundTripDefault = true;
+#else
+ bool DoRoundTripDefault = false;
+#endif
+
+ bool DoRoundTrip = DoRoundTripDefault;
+ for (const auto *Arg : CommandLineArgs) {
+ if (Arg == StringRef("-round-trip-args"))
+ DoRoundTrip = true;
+ if (Arg == StringRef("-no-round-trip-args"))
+ DoRoundTrip = false;
+ }
+
+ // If round-trip was not requested, simply run the parser with the real
+ // invocation diagnostics.
+ if (!DoRoundTrip)
+ return Parse(RealInvocation, CommandLineArgs, Diags, Argv0);
+
+ // Serializes quoted (and potentially escaped) arguments.
+ auto SerializeArgs = [](ArrayRef<const char *> Args) {
+ std::string Buffer;
+ llvm::raw_string_ostream OS(Buffer);
+ for (const char *Arg : Args) {
+ llvm::sys::printArg(OS, Arg, /*Quote=*/true);
+ OS << ' ';
+ }
+ OS.flush();
+ return Buffer;
+ };
+
+ // Setup a dummy DiagnosticsEngine.
+ DiagnosticsEngine DummyDiags(new DiagnosticIDs(), new DiagnosticOptions());
+ DummyDiags.setClient(new TextDiagnosticBuffer());
+
+ // Run the first parse on the original arguments with the dummy invocation and
+ // diagnostics.
+ if (!Parse(DummyInvocation, CommandLineArgs, DummyDiags, Argv0) ||
+ DummyDiags.getNumWarnings() != 0) {
+ // If the first parse did not succeed, it must be user mistake (invalid
+ // command line arguments). We won't be able to generate arguments that
+ // would reproduce the same result. Let's fail again with the real
+ // invocation and diagnostics, so all side-effects of parsing are visible.
+ unsigned NumWarningsBefore = Diags.getNumWarnings();
+ auto Success = Parse(RealInvocation, CommandLineArgs, Diags, Argv0);
+ if (!Success || Diags.getNumWarnings() != NumWarningsBefore)
+ return Success;
+
+ // Parse with original options and diagnostics succeeded even though it
+ // shouldn't have. Something is off.
+ Diags.Report(diag::err_cc1_round_trip_fail_then_ok);
+ Diags.Report(diag::note_cc1_round_trip_original)
+ << SerializeArgs(CommandLineArgs);
+ return false;
+ }
+
+ // Setup string allocator.
+ llvm::BumpPtrAllocator Alloc;
+ llvm::StringSaver StringPool(Alloc);
+ auto SA = [&StringPool](const Twine &Arg) {
+ return StringPool.save(Arg).data();
+ };
+
+ // Generate arguments from the dummy invocation. If Generate is the
+ // inverse of Parse, the newly generated arguments must have the same
+ // semantics as the original.
+ SmallVector<const char *> GeneratedArgs1;
+ Generate(DummyInvocation, GeneratedArgs1, SA);
+
+ // Run the second parse, now on the generated arguments, and with the real
+ // invocation and diagnostics. The result is what we will end up using for the
+ // rest of compilation, so if Generate is not inverse of Parse, something down
+ // the line will break.
+ bool Success2 = Parse(RealInvocation, GeneratedArgs1, Diags, Argv0);
+
+ // The first parse on original arguments succeeded, but second parse of
+ // generated arguments failed. Something must be wrong with the generator.
+ if (!Success2) {
+ Diags.Report(diag::err_cc1_round_trip_ok_then_fail);
+ Diags.Report(diag::note_cc1_round_trip_generated)
+ << 1 << SerializeArgs(GeneratedArgs1);
+ return false;
+ }
+
+ // Generate arguments again, this time from the options we will end up using
+ // for the rest of the compilation.
+ SmallVector<const char *> GeneratedArgs2;
+ Generate(RealInvocation, GeneratedArgs2, SA);
+
+ // Compares two lists of generated arguments.
+ auto Equal = [](const ArrayRef<const char *> A,
+ const ArrayRef<const char *> B) {
+ return std::equal(A.begin(), A.end(), B.begin(), B.end(),
+ [](const char *AElem, const char *BElem) {
+ return StringRef(AElem) == StringRef(BElem);
+ });
+ };
+
+ // If we generated different arguments from what we assume are two
+ // semantically equivalent CompilerInvocations, the Generate function may
+ // be non-deterministic.
+ if (!Equal(GeneratedArgs1, GeneratedArgs2)) {
+ Diags.Report(diag::err_cc1_round_trip_mismatch);
+ Diags.Report(diag::note_cc1_round_trip_generated)
+ << 1 << SerializeArgs(GeneratedArgs1);
+ Diags.Report(diag::note_cc1_round_trip_generated)
+ << 2 << SerializeArgs(GeneratedArgs2);
+ return false;
+ }
+
+ Diags.Report(diag::remark_cc1_round_trip_generated)
+ << 1 << SerializeArgs(GeneratedArgs1);
+ Diags.Report(diag::remark_cc1_round_trip_generated)
+ << 2 << SerializeArgs(GeneratedArgs2);
+
+ return Success2;
}
static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
@@ -539,17 +745,17 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
for (auto *A : Args.filtered(Group)) {
if (A->getOption().getKind() == Option::FlagClass) {
// The argument is a pure flag (such as OPT_Wall or OPT_Wdeprecated). Add
- // its name (minus the "W" or "R" at the beginning) to the warning list.
+ // its name (minus the "W" or "R" at the beginning) to the diagnostics.
Diagnostics.push_back(
std::string(A->getOption().getName().drop_front(1)));
} else if (A->getOption().matches(GroupWithValue)) {
- // This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic group.
+ // This is -Wfoo= or -Rfoo=, where foo is the name of the diagnostic
+ // group. Add only the group name to the diagnostics.
Diagnostics.push_back(
std::string(A->getOption().getName().drop_front(1).rtrim("=-")));
} else {
// Otherwise, add its value (for OPT_W_Joined and similar).
- for (const auto *Arg : A->getValues())
- Diagnostics.emplace_back(Arg);
+ Diagnostics.push_back(A->getValue());
}
}
}
@@ -561,21 +767,128 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
static void getAllNoBuiltinFuncValues(ArgList &Args,
std::vector<std::string> &Funcs) {
- SmallVector<const char *, 8> Values;
- for (const auto &Arg : Args) {
- const Option &O = Arg->getOption();
- if (O.matches(options::OPT_fno_builtin_)) {
- const char *FuncName = Arg->getValue();
- if (Builtin::Context::isBuiltinFunc(FuncName))
- Values.push_back(FuncName);
+ std::vector<std::string> Values = Args.getAllArgValues(OPT_fno_builtin_);
+ auto BuiltinEnd = llvm::partition(Values, [](const std::string FuncName) {
+ return Builtin::Context::isBuiltinFunc(FuncName);
+ });
+ Funcs.insert(Funcs.end(), Values.begin(), BuiltinEnd);
+}
+
+static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const AnalyzerOptions *AnalyzerOpts = &Opts;
+
+#define ANALYZER_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef ANALYZER_OPTION_WITH_MARSHALLING
+
+ if (Opts.AnalysisStoreOpt != RegionStoreModel) {
+ switch (Opts.AnalysisStoreOpt) {
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
+ case NAME##Model: \
+ GenerateArg(Args, OPT_analyzer_store, CMDFLAG, SA); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ default:
+ llvm_unreachable("Tried to generate unknown analysis store.");
+ }
+ }
+
+ if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel) {
+ switch (Opts.AnalysisConstraintsOpt) {
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
+ case NAME##Model: \
+ GenerateArg(Args, OPT_analyzer_constraints, CMDFLAG, SA); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ default:
+ llvm_unreachable("Tried to generate unknown analysis constraint.");
+ }
+ }
+
+ if (Opts.AnalysisDiagOpt != PD_HTML) {
+ switch (Opts.AnalysisDiagOpt) {
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN) \
+ case PD_##NAME: \
+ GenerateArg(Args, OPT_analyzer_output, CMDFLAG, SA); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ default:
+ llvm_unreachable("Tried to generate unknown analysis diagnostic client.");
+ }
+ }
+
+ if (Opts.AnalysisPurgeOpt != PurgeStmt) {
+ switch (Opts.AnalysisPurgeOpt) {
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
+ case NAME: \
+ GenerateArg(Args, OPT_analyzer_purge, CMDFLAG, SA); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ default:
+ llvm_unreachable("Tried to generate unknown analysis purge mode.");
+ }
+ }
+
+ if (Opts.InliningMode != NoRedundancy) {
+ switch (Opts.InliningMode) {
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
+ case NAME: \
+ GenerateArg(Args, OPT_analyzer_inlining_mode, CMDFLAG, SA); \
+ break;
+#include "clang/StaticAnalyzer/Core/Analyses.def"
+ default:
+ llvm_unreachable("Tried to generate unknown analysis inlining mode.");
}
}
- Funcs.insert(Funcs.end(), Values.begin(), Values.end());
+
+ for (const auto &CP : Opts.CheckersAndPackages) {
+ OptSpecifier Opt =
+ CP.second ? OPT_analyzer_checker : OPT_analyzer_disable_checker;
+ GenerateArg(Args, Opt, CP.first, SA);
+ }
+
+ AnalyzerOptions ConfigOpts;
+ parseAnalyzerConfigs(ConfigOpts, nullptr);
+
+ for (const auto &C : Opts.Config) {
+ // Don't generate anything that came from parseAnalyzerConfigs. It would be
+ // redundant and may not be valid on the command line.
+ auto Entry = ConfigOpts.Config.find(C.getKey());
+ if (Entry != ConfigOpts.Config.end() && Entry->getValue() == C.getValue())
+ continue;
+
+ GenerateArg(Args, OPT_analyzer_config, C.getKey() + "=" + C.getValue(), SA);
+ }
+
+ // Nothing to generate for FullCompilerInvocation.
}
static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
- bool Success = true;
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ AnalyzerOptions *AnalyzerOpts = &Opts;
+
+#define ANALYZER_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef ANALYZER_OPTION_WITH_MARSHALLING
+
if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
StringRef Name = A->getValue();
AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
@@ -586,7 +899,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (Value == NumStores) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- Success = false;
} else {
Opts.AnalysisStoreOpt = Value;
}
@@ -602,7 +914,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (Value == NumConstraints) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- Success = false;
} else {
Opts.AnalysisConstraintsOpt = Value;
}
@@ -618,7 +929,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (Value == NUM_ANALYSIS_DIAG_CLIENTS) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- Success = false;
} else {
Opts.AnalysisDiagOpt = Value;
}
@@ -634,7 +944,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (Value == NumPurgeModes) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- Success = false;
} else {
Opts.AnalysisPurgeOpt = Value;
}
@@ -650,7 +959,6 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (Value == NumInliningModes) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
- Success = false;
} else {
Opts.InliningMode = Value;
}
@@ -685,14 +993,12 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
if (val.empty()) {
Diags.Report(SourceLocation(),
diag::err_analyzer_config_no_value) << configVal;
- Success = false;
break;
}
if (val.find('=') != StringRef::npos) {
Diags.Report(SourceLocation(),
diag::err_analyzer_config_multiple_values)
<< configVal;
- Success = false;
break;
}
@@ -722,7 +1028,7 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
}
os.flush();
- return Success;
+ return Diags.getNumErrors() == NumErrorsBefore;
}
static StringRef getStringOption(AnalyzerOptions::ConfigTable &Config,
@@ -843,20 +1149,69 @@ static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
<< "a filename";
}
-/// Create a new Regex instance out of the string value in \p RpassArg.
-/// It returns a pointer to the newly generated Regex instance.
-static std::shared_ptr<llvm::Regex>
-GenerateOptimizationRemarkRegex(DiagnosticsEngine &Diags, ArgList &Args,
- Arg *RpassArg) {
- StringRef Val = RpassArg->getValue();
- std::string RegexError;
- std::shared_ptr<llvm::Regex> Pattern = std::make_shared<llvm::Regex>(Val);
- if (!Pattern->isValid(RegexError)) {
- Diags.Report(diag::err_drv_optimization_remark_pattern)
- << RegexError << RpassArg->getAsString(Args);
- Pattern.reset();
+/// Generate a remark argument. This is an inverse of `ParseOptimizationRemark`.
+static void
+GenerateOptimizationRemark(SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA,
+ OptSpecifier OptEQ, StringRef Name,
+ const CodeGenOptions::OptRemark &Remark) {
+ if (Remark.hasValidPattern()) {
+ GenerateArg(Args, OptEQ, Remark.Pattern, SA);
+ } else if (Remark.Kind == CodeGenOptions::RK_Enabled) {
+ GenerateArg(Args, OPT_R_Joined, Name, SA);
+ } else if (Remark.Kind == CodeGenOptions::RK_Disabled) {
+ GenerateArg(Args, OPT_R_Joined, StringRef("no-") + Name, SA);
}
- return Pattern;
+}
+
+/// Parse a remark command line argument. It may be missing, disabled/enabled by
+/// '-R[no-]group' or specified with a regular expression by '-Rgroup=regexp'.
+/// On top of that, it can be disabled/enabled globally by '-R[no-]everything'.
+static CodeGenOptions::OptRemark
+ParseOptimizationRemark(DiagnosticsEngine &Diags, ArgList &Args,
+ OptSpecifier OptEQ, StringRef Name) {
+ CodeGenOptions::OptRemark Result;
+
+ auto InitializeResultPattern = [&Diags, &Args, &Result](const Arg *A) {
+ Result.Pattern = A->getValue();
+
+ std::string RegexError;
+ Result.Regex = std::make_shared<llvm::Regex>(Result.Pattern);
+ if (!Result.Regex->isValid(RegexError)) {
+ Diags.Report(diag::err_drv_optimization_remark_pattern)
+ << RegexError << A->getAsString(Args);
+ return false;
+ }
+
+ return true;
+ };
+
+ for (Arg *A : Args) {
+ if (A->getOption().matches(OPT_R_Joined)) {
+ StringRef Value = A->getValue();
+
+ if (Value == Name)
+ Result.Kind = CodeGenOptions::RK_Enabled;
+ else if (Value == "everything")
+ Result.Kind = CodeGenOptions::RK_EnabledEverything;
+ else if (Value.split('-') == std::make_pair(StringRef("no"), Name))
+ Result.Kind = CodeGenOptions::RK_Disabled;
+ else if (Value == "no-everything")
+ Result.Kind = CodeGenOptions::RK_DisabledEverything;
+ } else if (A->getOption().matches(OptEQ)) {
+ Result.Kind = CodeGenOptions::RK_WithPattern;
+ if (!InitializeResultPattern(A))
+ return CodeGenOptions::OptRemark();
+ }
+ }
+
+ if (Result.Kind == CodeGenOptions::RK_Disabled ||
+ Result.Kind == CodeGenOptions::RK_DisabledEverything) {
+ Result.Pattern = "";
+ Result.Regex = nullptr;
+ }
+
+ return Result;
}
static bool parseDiagnosticLevelMask(StringRef FlagName,
@@ -893,6 +1248,12 @@ static void parseSanitizerKinds(StringRef FlagName,
}
}
+static SmallVector<StringRef, 4> serializeSanitizerKinds(SanitizerSet S) {
+ SmallVector<StringRef, 4> Values;
+ serializeSanitizerSet(S, Values);
+ return Values;
+}
+
static void parseXRayInstrumentationBundle(StringRef FlagName, StringRef Bundle,
ArgList &Args, DiagnosticsEngine &D,
XRayInstrSet &S) {
@@ -912,6 +1273,15 @@ static void parseXRayInstrumentationBundle(StringRef FlagName, StringRef Bundle,
}
}
+static std::string serializeXRayInstrumentationBundle(const XRayInstrSet &S) {
+ llvm::SmallVector<StringRef, 2> BundleParts;
+ serializeXRayInstrValue(S, BundleParts);
+ std::string Buffer;
+ llvm::raw_string_ostream OS(Buffer);
+ llvm::interleave(BundleParts, OS, [&OS](StringRef Part) { OS << Part; }, ",");
+ return OS.str();
+}
+
// Set the profile kind using fprofile-instrument-use-path.
static void setPGOUseInstrumentor(CodeGenOptions &Opts,
const Twine &ProfileName) {
@@ -933,13 +1303,250 @@ static void setPGOUseInstrumentor(CodeGenOptions &Opts,
Opts.setProfileUse(CodeGenOptions::ProfileClangInstr);
}
+void CompilerInvocation::GenerateCodeGenArgs(
+ const CodeGenOptions &Opts, SmallVectorImpl<const char *> &Args,
+ StringAllocator SA, const llvm::Triple &T, const std::string &OutputFile,
+ const LangOptions *LangOpts) {
+ const CodeGenOptions &CodeGenOpts = Opts;
+
+ if (Opts.OptimizationLevel == 0)
+ GenerateArg(Args, OPT_O0, SA);
+ else
+ GenerateArg(Args, OPT_O, Twine(Opts.OptimizationLevel), SA);
+
+#define CODEGEN_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef CODEGEN_OPTION_WITH_MARSHALLING
+
+ if (Opts.OptimizationLevel > 0) {
+ if (Opts.Inlining == CodeGenOptions::NormalInlining)
+ GenerateArg(Args, OPT_finline_functions, SA);
+ else if (Opts.Inlining == CodeGenOptions::OnlyHintInlining)
+ GenerateArg(Args, OPT_finline_hint_functions, SA);
+ else if (Opts.Inlining == CodeGenOptions::OnlyAlwaysInlining)
+ GenerateArg(Args, OPT_fno_inline, SA);
+ }
+
+ if (Opts.DirectAccessExternalData && LangOpts->PICLevel != 0)
+ GenerateArg(Args, OPT_fdirect_access_external_data, SA);
+ else if (!Opts.DirectAccessExternalData && LangOpts->PICLevel == 0)
+ GenerateArg(Args, OPT_fno_direct_access_external_data, SA);
+
+ Optional<StringRef> DebugInfoVal;
+ switch (Opts.DebugInfo) {
+ case codegenoptions::DebugLineTablesOnly:
+ DebugInfoVal = "line-tables-only";
+ break;
+ case codegenoptions::DebugDirectivesOnly:
+ DebugInfoVal = "line-directives-only";
+ break;
+ case codegenoptions::DebugInfoConstructor:
+ DebugInfoVal = "constructor";
+ break;
+ case codegenoptions::LimitedDebugInfo:
+ DebugInfoVal = "limited";
+ break;
+ case codegenoptions::FullDebugInfo:
+ DebugInfoVal = "standalone";
+ break;
+ case codegenoptions::UnusedTypeInfo:
+ DebugInfoVal = "unused-types";
+ break;
+ case codegenoptions::NoDebugInfo: // default value
+ DebugInfoVal = None;
+ break;
+ case codegenoptions::LocTrackingOnly: // implied value
+ DebugInfoVal = None;
+ break;
+ }
+ if (DebugInfoVal)
+ GenerateArg(Args, OPT_debug_info_kind_EQ, *DebugInfoVal, SA);
+
+ for (const auto &Prefix : Opts.DebugPrefixMap)
+ GenerateArg(Args, OPT_fdebug_prefix_map_EQ,
+ Prefix.first + "=" + Prefix.second, SA);
+
+ for (const auto &Prefix : Opts.CoveragePrefixMap)
+ GenerateArg(Args, OPT_fcoverage_prefix_map_EQ,
+ Prefix.first + "=" + Prefix.second, SA);
+
+ if (Opts.NewStructPathTBAA)
+ GenerateArg(Args, OPT_new_struct_path_tbaa, SA);
+
+ if (Opts.OptimizeSize == 1)
+ GenerateArg(Args, OPT_O, "s", SA);
+ else if (Opts.OptimizeSize == 2)
+ GenerateArg(Args, OPT_O, "z", SA);
+
+ // SimplifyLibCalls is set only in the absence of -fno-builtin and
+ // -ffreestanding. We'll consider that when generating them.
+
+ // NoBuiltinFuncs are generated by LangOptions.
+
+ if (Opts.UnrollLoops && Opts.OptimizationLevel <= 1)
+ GenerateArg(Args, OPT_funroll_loops, SA);
+ else if (!Opts.UnrollLoops && Opts.OptimizationLevel > 1)
+ GenerateArg(Args, OPT_fno_unroll_loops, SA);
+
+ if (!Opts.BinutilsVersion.empty())
+ GenerateArg(Args, OPT_fbinutils_version_EQ, Opts.BinutilsVersion, SA);
+
+ if (Opts.DebugNameTable ==
+ static_cast<unsigned>(llvm::DICompileUnit::DebugNameTableKind::GNU))
+ GenerateArg(Args, OPT_ggnu_pubnames, SA);
+ else if (Opts.DebugNameTable ==
+ static_cast<unsigned>(
+ llvm::DICompileUnit::DebugNameTableKind::Default))
+ GenerateArg(Args, OPT_gpubnames, SA);
+
+ // ProfileInstrumentUsePath is marshalled automatically, no need to generate
+ // it or PGOUseInstrumentor.
+
+ if (Opts.TimePasses) {
+ if (Opts.TimePassesPerRun)
+ GenerateArg(Args, OPT_ftime_report_EQ, "per-pass-run", SA);
+ else
+ GenerateArg(Args, OPT_ftime_report, SA);
+ }
+
+ if (Opts.PrepareForLTO && !Opts.PrepareForThinLTO)
+ GenerateArg(Args, OPT_flto, SA);
+
+ if (Opts.PrepareForThinLTO)
+ GenerateArg(Args, OPT_flto_EQ, "thin", SA);
+
+ if (!Opts.ThinLTOIndexFile.empty())
+ GenerateArg(Args, OPT_fthinlto_index_EQ, Opts.ThinLTOIndexFile, SA);
+
+ if (Opts.SaveTempsFilePrefix == OutputFile)
+ GenerateArg(Args, OPT_save_temps_EQ, "obj", SA);
+
+ StringRef MemProfileBasename("memprof.profraw");
+ if (!Opts.MemoryProfileOutput.empty()) {
+ if (Opts.MemoryProfileOutput == MemProfileBasename) {
+ GenerateArg(Args, OPT_fmemory_profile, SA);
+ } else {
+ size_t ArgLength =
+ Opts.MemoryProfileOutput.size() - MemProfileBasename.size();
+ GenerateArg(Args, OPT_fmemory_profile_EQ,
+ Opts.MemoryProfileOutput.substr(0, ArgLength), SA);
+ }
+ }
+
+ if (memcmp(Opts.CoverageVersion, "408*", 4) != 0)
+ GenerateArg(Args, OPT_coverage_version_EQ,
+ StringRef(Opts.CoverageVersion, 4), SA);
+
+ // TODO: Check if we need to generate arguments stored in CmdArgs. (Namely
+ // '-fembed_bitcode', which does not map to any CompilerInvocation field and
+ // won't be generated.)
+
+ if (Opts.XRayInstrumentationBundle.Mask != XRayInstrKind::All) {
+ std::string InstrBundle =
+ serializeXRayInstrumentationBundle(Opts.XRayInstrumentationBundle);
+ if (!InstrBundle.empty())
+ GenerateArg(Args, OPT_fxray_instrumentation_bundle, InstrBundle, SA);
+ }
+
+ if (Opts.CFProtectionReturn && Opts.CFProtectionBranch)
+ GenerateArg(Args, OPT_fcf_protection_EQ, "full", SA);
+ else if (Opts.CFProtectionReturn)
+ GenerateArg(Args, OPT_fcf_protection_EQ, "return", SA);
+ else if (Opts.CFProtectionBranch)
+ GenerateArg(Args, OPT_fcf_protection_EQ, "branch", SA);
+
+ for (const auto &F : Opts.LinkBitcodeFiles) {
+ bool Builtint = F.LinkFlags == llvm::Linker::Flags::LinkOnlyNeeded &&
+ F.PropagateAttrs && F.Internalize;
+ GenerateArg(Args,
+ Builtint ? OPT_mlink_builtin_bitcode : OPT_mlink_bitcode_file,
+ F.Filename, SA);
+ }
+
+ // TODO: Consider removing marshalling annotations from f[no_]emulated_tls.
+ // That would make it easy to generate the option only **once** if it was
+ // explicitly set to non-default value.
+ if (Opts.ExplicitEmulatedTLS) {
+ GenerateArg(
+ Args, Opts.EmulatedTLS ? OPT_femulated_tls : OPT_fno_emulated_tls, SA);
+ }
+
+ if (Opts.FPDenormalMode != llvm::DenormalMode::getIEEE())
+ GenerateArg(Args, OPT_fdenormal_fp_math_EQ, Opts.FPDenormalMode.str(), SA);
+
+ if (Opts.FP32DenormalMode != llvm::DenormalMode::getIEEE())
+ GenerateArg(Args, OPT_fdenormal_fp_math_f32_EQ, Opts.FP32DenormalMode.str(),
+ SA);
+
+ if (Opts.StructReturnConvention == CodeGenOptions::SRCK_OnStack) {
+ OptSpecifier Opt =
+ T.isPPC32() ? OPT_maix_struct_return : OPT_fpcc_struct_return;
+ GenerateArg(Args, Opt, SA);
+ } else if (Opts.StructReturnConvention == CodeGenOptions::SRCK_InRegs) {
+ OptSpecifier Opt =
+ T.isPPC32() ? OPT_msvr4_struct_return : OPT_freg_struct_return;
+ GenerateArg(Args, Opt, SA);
+ }
+
+ if (Opts.EnableAIXExtendedAltivecABI)
+ GenerateArg(Args, OPT_mabi_EQ_vec_extabi, SA);
+
+ if (!Opts.OptRecordPasses.empty())
+ GenerateArg(Args, OPT_opt_record_passes, Opts.OptRecordPasses, SA);
+
+ if (!Opts.OptRecordFormat.empty())
+ GenerateArg(Args, OPT_opt_record_format, Opts.OptRecordFormat, SA);
+
+ GenerateOptimizationRemark(Args, SA, OPT_Rpass_EQ, "pass",
+ Opts.OptimizationRemark);
+
+ GenerateOptimizationRemark(Args, SA, OPT_Rpass_missed_EQ, "pass-missed",
+ Opts.OptimizationRemarkMissed);
+
+ GenerateOptimizationRemark(Args, SA, OPT_Rpass_analysis_EQ, "pass-analysis",
+ Opts.OptimizationRemarkAnalysis);
+
+ GenerateArg(Args, OPT_fdiagnostics_hotness_threshold_EQ,
+ Opts.DiagnosticsHotnessThreshold
+ ? Twine(*Opts.DiagnosticsHotnessThreshold)
+ : "auto",
+ SA);
+
+ for (StringRef Sanitizer : serializeSanitizerKinds(Opts.SanitizeRecover))
+ GenerateArg(Args, OPT_fsanitize_recover_EQ, Sanitizer, SA);
+
+ for (StringRef Sanitizer : serializeSanitizerKinds(Opts.SanitizeTrap))
+ GenerateArg(Args, OPT_fsanitize_trap_EQ, Sanitizer, SA);
+
+ if (!Opts.EmitVersionIdentMetadata)
+ GenerateArg(Args, OPT_Qn, SA);
+
+ switch (Opts.FiniteLoops) {
+ case CodeGenOptions::FiniteLoopsKind::Language:
+ break;
+ case CodeGenOptions::FiniteLoopsKind::Always:
+ GenerateArg(Args, OPT_ffinite_loops, SA);
+ break;
+ case CodeGenOptions::FiniteLoopsKind::Never:
+ GenerateArg(Args, OPT_fno_finite_loops, SA);
+ break;
+ }
+}
+
bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
InputKind IK,
DiagnosticsEngine &Diags,
const llvm::Triple &T,
const std::string &OutputFile,
const LangOptions &LangOptsRef) {
- bool Success = true;
+ unsigned NumErrorsBefore = Diags.getNumErrors();
unsigned OptimizationLevel = getOptimizationLevel(Args, IK, Diags);
// TODO: This could be done in Driver
@@ -965,10 +1572,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
- SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
- MERGER, TABLE_INDEX)
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
#include "clang/Driver/Options.inc"
#undef CODEGEN_OPTION_WITH_MARSHALLING
@@ -998,13 +1604,36 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.DirectAccessExternalData =
Args.hasArg(OPT_fdirect_access_external_data) ||
(!Args.hasArg(OPT_fno_direct_access_external_data) &&
- getLastArgIntValue(Args, OPT_pic_level, 0, Diags) == 0);
+ LangOpts->PICLevel == 0);
+
+ if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
+ unsigned Val =
+ llvm::StringSwitch<unsigned>(A->getValue())
+ .Case("line-tables-only", codegenoptions::DebugLineTablesOnly)
+ .Case("line-directives-only", codegenoptions::DebugDirectivesOnly)
+ .Case("constructor", codegenoptions::DebugInfoConstructor)
+ .Case("limited", codegenoptions::LimitedDebugInfo)
+ .Case("standalone", codegenoptions::FullDebugInfo)
+ .Case("unused-types", codegenoptions::UnusedTypeInfo)
+ .Default(~0U);
+ if (Val == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
+ << A->getValue();
+ else
+ Opts.setDebugInfo(static_cast<codegenoptions::DebugInfoKind>(Val));
+ }
// If -fuse-ctor-homing is set and limited debug info is already on, then use
- // constructor homing.
- if (Args.getLastArg(OPT_fuse_ctor_homing))
- if (Opts.getDebugInfo() == codegenoptions::LimitedDebugInfo)
+ // constructor homing, and vice versa for -fno-use-ctor-homing.
+ if (const Arg *A =
+ Args.getLastArg(OPT_fuse_ctor_homing, OPT_fno_use_ctor_homing)) {
+ if (A->getOption().matches(OPT_fuse_ctor_homing) &&
+ Opts.getDebugInfo() == codegenoptions::LimitedDebugInfo)
Opts.setDebugInfo(codegenoptions::DebugInfoConstructor);
+ if (A->getOption().matches(OPT_fno_use_ctor_homing) &&
+ Opts.getDebugInfo() == codegenoptions::DebugInfoConstructor)
+ Opts.setDebugInfo(codegenoptions::LimitedDebugInfo);
+ }
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
@@ -1012,9 +1641,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
{std::string(Split.first), std::string(Split.second)});
}
- for (const auto &Arg : Args.getAllArgValues(OPT_fprofile_prefix_map_EQ)) {
+ for (const auto &Arg : Args.getAllArgValues(OPT_fcoverage_prefix_map_EQ)) {
auto Split = StringRef(Arg).split('=');
- Opts.ProfilePrefixMap.insert(
+ Opts.CoveragePrefixMap.insert(
{std::string(Split.first), std::string(Split.second)});
}
@@ -1027,17 +1656,21 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
llvm::is_contained(DebugEntryValueArchs, T.getArch()))
Opts.EmitCallSiteInfo = true;
+ if (!Opts.EnableDIPreservationVerify && Opts.DIBugsReportFilePath.size()) {
+ Diags.Report(diag::warn_ignoring_verify_debuginfo_preserve_export)
+ << Opts.DIBugsReportFilePath;
+ Opts.DIBugsReportFilePath = "";
+ }
+
Opts.NewStructPathTBAA = !Args.hasArg(OPT_no_struct_path_tbaa) &&
Args.hasArg(OPT_new_struct_path_tbaa);
Opts.OptimizeSize = getOptimizationLevelSize(Args);
- Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
- Args.hasArg(OPT_ffreestanding));
+ Opts.SimplifyLibCalls = !LangOpts->NoBuiltin;
if (Opts.SimplifyLibCalls)
- getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
+ Opts.NoBuiltinFuncs = LangOpts->NoBuiltinFuncs;
Opts.UnrollLoops =
Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops,
(Opts.OptimizationLevel > 1));
-
Opts.BinutilsVersion =
std::string(Args.getLastArgValue(OPT_fbinutils_version_EQ));
@@ -1071,11 +1704,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
- // Basic Block Sections implies Function Sections.
- Opts.FunctionSections =
- Args.hasArg(OPT_ffunction_sections) ||
- (Opts.BBSections != "none" && Opts.BBSections != "labels");
-
Opts.PrepareForLTO = Args.hasArg(OPT_flto, OPT_flto_EQ);
Opts.PrepareForThinLTO = false;
if (Arg *A = Args.getLastArg(OPT_flto_EQ)) {
@@ -1108,6 +1736,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
} else if (Args.hasArg(OPT_fmemory_profile))
Opts.MemoryProfileOutput = MemProfileBasename;
+ memcpy(Opts.CoverageVersion, "408*", 4);
if (Opts.EmitGcovArcs || Opts.EmitGcovNotes) {
if (Args.hasArg(OPT_coverage_version_EQ)) {
StringRef CoverageVersion = Args.getLastArgValue(OPT_coverage_version_EQ);
@@ -1159,10 +1788,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.CFProtectionReturn = 1;
else if (Name == "branch")
Opts.CFProtectionBranch = 1;
- else if (Name != "none") {
+ else if (Name != "none")
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
- Success = false;
- }
}
for (auto *A :
@@ -1184,6 +1811,14 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.ExplicitEmulatedTLS = true;
}
+ if (Arg *A = Args.getLastArg(OPT_ftlsmodel_EQ)) {
+ if (T.isOSAIX()) {
+ StringRef Name = A->getValue();
+ if (Name != "global-dynamic")
+ Diags.Report(diag::err_aix_unsupported_tls_model) << Name;
+ }
+ }
+
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
@@ -1220,10 +1855,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
- if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility) ||
- !Args.hasArg(OPT_fvisibility)))
- Opts.IgnoreXCOFFVisibility = 1;
-
if (Arg *A =
Args.getLastArg(OPT_mabi_EQ_vec_default, OPT_mabi_EQ_vec_extabi)) {
if (!T.isOSAIX())
@@ -1231,13 +1862,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
<< A->getSpelling() << T.str();
const Option &O = A->getOption();
- if (O.matches(OPT_mabi_EQ_vec_default))
- Diags.Report(diag::err_aix_default_altivec_abi)
- << A->getSpelling() << T.str();
- else {
- assert(O.matches(OPT_mabi_EQ_vec_extabi));
- Opts.EnableAIXExtendedAltivecABI = 1;
- }
+ Opts.EnableAIXExtendedAltivecABI = O.matches(OPT_mabi_EQ_vec_extabi);
}
bool NeedLocTracking = false;
@@ -1255,23 +1880,18 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
NeedLocTracking = true;
}
- if (Arg *A = Args.getLastArg(OPT_Rpass_EQ)) {
- Opts.OptimizationRemarkPattern =
- GenerateOptimizationRemarkRegex(Diags, Args, A);
- NeedLocTracking = true;
- }
+ Opts.OptimizationRemark =
+ ParseOptimizationRemark(Diags, Args, OPT_Rpass_EQ, "pass");
- if (Arg *A = Args.getLastArg(OPT_Rpass_missed_EQ)) {
- Opts.OptimizationRemarkMissedPattern =
- GenerateOptimizationRemarkRegex(Diags, Args, A);
- NeedLocTracking = true;
- }
+ Opts.OptimizationRemarkMissed =
+ ParseOptimizationRemark(Diags, Args, OPT_Rpass_missed_EQ, "pass-missed");
- if (Arg *A = Args.getLastArg(OPT_Rpass_analysis_EQ)) {
- Opts.OptimizationRemarkAnalysisPattern =
- GenerateOptimizationRemarkRegex(Diags, Args, A);
- NeedLocTracking = true;
- }
+ Opts.OptimizationRemarkAnalysis = ParseOptimizationRemark(
+ Diags, Args, OPT_Rpass_analysis_EQ, "pass-analysis");
+
+ NeedLocTracking |= Opts.OptimizationRemark.hasValidPattern() ||
+ Opts.OptimizationRemarkMissed.hasValidPattern() ||
+ Opts.OptimizationRemarkAnalysis.hasValidPattern();
bool UsingSampleProfile = !Opts.SampleProfileFile.empty();
bool UsingProfile = UsingSampleProfile ||
@@ -1308,6 +1928,9 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (UsingSampleProfile)
NeedLocTracking = true;
+ if (!Opts.StackUsageOutput.empty())
+ NeedLocTracking = true;
+
// If the user requested a flag that requires source locations available in
// the backend, make sure that the backend tracks source location information.
if (NeedLocTracking && Opts.getDebugInfo() == codegenoptions::NoDebugInfo)
@@ -1324,55 +1947,122 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.EmitVersionIdentMetadata = Args.hasFlag(OPT_Qy, OPT_Qn, true);
- return Success;
+ if (Args.hasArg(options::OPT_ffinite_loops))
+ Opts.FiniteLoops = CodeGenOptions::FiniteLoopsKind::Always;
+ else if (Args.hasArg(options::OPT_fno_finite_loops))
+ Opts.FiniteLoops = CodeGenOptions::FiniteLoopsKind::Never;
+
+ Opts.EmitIEEENaNCompliantInsts =
+ Args.hasFlag(options::OPT_mamdgpu_ieee, options::OPT_mno_amdgpu_ieee);
+ if (!Opts.EmitIEEENaNCompliantInsts && !LangOptsRef.NoHonorNaNs)
+ Diags.Report(diag::err_drv_amdgpu_ieee_without_no_honor_nans);
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
-static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
- ArgList &Args) {
+static void
+GenerateDependencyOutputArgs(const DependencyOutputOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const DependencyOutputOptions &DependencyOutputOpts = Opts;
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
+
+ if (Opts.ShowIncludesDest != ShowIncludesDestination::None)
+ GenerateArg(Args, OPT_show_includes, SA);
+
+ for (const auto &Dep : Opts.ExtraDeps) {
+ switch (Dep.second) {
+ case EDK_SanitizeIgnorelist:
+ // Sanitizer ignorelist arguments are generated from LanguageOptions.
+ continue;
+ case EDK_ModuleFile:
+ // Module file arguments are generated from FrontendOptions and
+ // HeaderSearchOptions.
+ continue;
+ case EDK_ProfileList:
+ // Profile list arguments are generated from LanguageOptions via the
+ // marshalling infrastructure.
+ continue;
+ case EDK_DepFileEntry:
+ GenerateArg(Args, OPT_fdepfile_entry, Dep.first, SA);
+ break;
+ }
+ }
+}
+
+static bool ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
+ ArgList &Args, DiagnosticsEngine &Diags,
+ frontend::ActionKind Action,
+ bool ShowLineMarkers) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ DependencyOutputOptions &DependencyOutputOpts = Opts;
+#define DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef DEPENDENCY_OUTPUT_OPTION_WITH_MARSHALLING
+
if (Args.hasArg(OPT_show_includes)) {
// Writing both /showIncludes and preprocessor output to stdout
// would produce interleaved output, so use stderr for /showIncludes.
// This behaves the same as cl.exe, when /E, /EP or /P are passed.
- if (Args.hasArg(options::OPT_E) || Args.hasArg(options::OPT_P))
+ if (Action == frontend::PrintPreprocessedInput || !ShowLineMarkers)
Opts.ShowIncludesDest = ShowIncludesDestination::Stderr;
else
Opts.ShowIncludesDest = ShowIncludesDestination::Stdout;
} else {
Opts.ShowIncludesDest = ShowIncludesDestination::None;
}
- // Add sanitizer blacklists as extra dependencies.
+
+ // Add sanitizer ignorelists as extra dependencies.
// They won't be discovered by the regular preprocessor, so
// we let make / ninja to know about this implicit dependency.
- if (!Args.hasArg(OPT_fno_sanitize_blacklist)) {
- for (const auto *A : Args.filtered(OPT_fsanitize_blacklist)) {
+ if (!Args.hasArg(OPT_fno_sanitize_ignorelist)) {
+ for (const auto *A : Args.filtered(OPT_fsanitize_ignorelist_EQ)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(std::string(Val));
+ Opts.ExtraDeps.emplace_back(std::string(Val), EDK_SanitizeIgnorelist);
}
if (Opts.IncludeSystemHeaders) {
- for (const auto *A : Args.filtered(OPT_fsanitize_system_blacklist)) {
+ for (const auto *A : Args.filtered(OPT_fsanitize_system_ignorelist_EQ)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(std::string(Val));
+ Opts.ExtraDeps.emplace_back(std::string(Val), EDK_SanitizeIgnorelist);
}
}
}
// -fprofile-list= dependencies.
for (const auto &Filename : Args.getAllArgValues(OPT_fprofile_list_EQ))
- Opts.ExtraDeps.push_back(Filename);
+ Opts.ExtraDeps.emplace_back(Filename, EDK_ProfileList);
// Propagate the extra dependencies.
- for (const auto *A : Args.filtered(OPT_fdepfile_entry)) {
- Opts.ExtraDeps.push_back(A->getValue());
- }
+ for (const auto *A : Args.filtered(OPT_fdepfile_entry))
+ Opts.ExtraDeps.emplace_back(A->getValue(), EDK_DepFileEntry);
// Only the -fmodule-file=<file> form.
for (const auto *A : Args.filtered(OPT_fmodule_file)) {
StringRef Val = A->getValue();
if (Val.find('=') == StringRef::npos)
- Opts.ExtraDeps.push_back(std::string(Val));
+ Opts.ExtraDeps.emplace_back(std::string(Val), EDK_ModuleFile);
}
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
@@ -1426,23 +2116,143 @@ static bool checkVerifyPrefixes(const std::vector<std::string> &VerifyPrefixes,
return Success;
}
-bool CompilerInvocation::parseSimpleArgs(const ArgList &Args,
- DiagnosticsEngine &Diags) {
- bool Success = true;
+static void GenerateFileSystemArgs(const FileSystemOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const FileSystemOptions &FileSystemOpts = Opts;
-#define OPTION_WITH_MARSHALLING( \
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
- SHOULD_PARSE, this->KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
- MERGER, TABLE_INDEX)
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
#include "clang/Driver/Options.inc"
-#undef OPTION_WITH_MARSHALLING
+#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
+}
- return Success;
+static bool ParseFileSystemArgs(FileSystemOptions &Opts, const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ FileSystemOptions &FileSystemOpts = Opts;
+
+#define FILE_SYSTEM_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef FILE_SYSTEM_OPTION_WITH_MARSHALLING
+
+ return Diags.getNumErrors() == NumErrorsBefore;
+}
+
+static void GenerateMigratorArgs(const MigratorOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const MigratorOptions &MigratorOpts = Opts;
+#define MIGRATOR_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef MIGRATOR_OPTION_WITH_MARSHALLING
+}
+
+static bool ParseMigratorArgs(MigratorOptions &Opts, const ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ MigratorOptions &MigratorOpts = Opts;
+
+#define MIGRATOR_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef MIGRATOR_OPTION_WITH_MARSHALLING
+
+ return Diags.getNumErrors() == NumErrorsBefore;
+}
+
+void CompilerInvocation::GenerateDiagnosticArgs(
+ const DiagnosticOptions &Opts, SmallVectorImpl<const char *> &Args,
+ StringAllocator SA, bool DefaultDiagColor) {
+ const DiagnosticOptions *DiagnosticOpts = &Opts;
+#define DIAG_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef DIAG_OPTION_WITH_MARSHALLING
+
+ if (!Opts.DiagnosticSerializationFile.empty())
+ GenerateArg(Args, OPT_diagnostic_serialized_file,
+ Opts.DiagnosticSerializationFile, SA);
+
+ if (Opts.ShowColors)
+ GenerateArg(Args, OPT_fcolor_diagnostics, SA);
+
+ if (Opts.VerifyDiagnostics &&
+ llvm::is_contained(Opts.VerifyPrefixes, "expected"))
+ GenerateArg(Args, OPT_verify, SA);
+
+ for (const auto &Prefix : Opts.VerifyPrefixes)
+ if (Prefix != "expected")
+ GenerateArg(Args, OPT_verify_EQ, Prefix, SA);
+
+ DiagnosticLevelMask VIU = Opts.getVerifyIgnoreUnexpected();
+ if (VIU == DiagnosticLevelMask::None) {
+ // This is the default, don't generate anything.
+ } else if (VIU == DiagnosticLevelMask::All) {
+ GenerateArg(Args, OPT_verify_ignore_unexpected, SA);
+ } else {
+ if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Note) != 0)
+ GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "note", SA);
+ if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Remark) != 0)
+ GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "remark", SA);
+ if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Warning) != 0)
+ GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "warning", SA);
+ if (static_cast<unsigned>(VIU & DiagnosticLevelMask::Error) != 0)
+ GenerateArg(Args, OPT_verify_ignore_unexpected_EQ, "error", SA);
+ }
+
+ for (const auto &Warning : Opts.Warnings) {
+ // This option is automatically generated from UndefPrefixes.
+ if (Warning == "undef-prefix")
+ continue;
+ Args.push_back(SA(StringRef("-W") + Warning));
+ }
+
+ for (const auto &Remark : Opts.Remarks) {
+ // These arguments are generated from OptimizationRemark fields of
+ // CodeGenOptions.
+ StringRef IgnoredRemarks[] = {"pass", "no-pass",
+ "pass-analysis", "no-pass-analysis",
+ "pass-missed", "no-pass-missed"};
+ if (llvm::is_contained(IgnoredRemarks, Remark))
+ continue;
+
+ Args.push_back(SA(StringRef("-R") + Remark));
+ }
}
bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
@@ -1455,20 +2265,20 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Diags = &*IgnoringDiags;
}
+ unsigned NumErrorsBefore = Diags->getNumErrors();
+
// The key paths of diagnostic options defined in Options.td start with
// "DiagnosticOpts->". Let's provide the expected variable name and type.
DiagnosticOptions *DiagnosticOpts = &Opts;
- bool Success = true;
#define DIAG_OPTION_WITH_MARSHALLING( \
PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING(Args, *Diags, Success, ID, FLAGS, PARAM, \
- SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
- MERGER, TABLE_INDEX)
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, *Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
#include "clang/Driver/Options.inc"
#undef DIAG_OPTION_WITH_MARSHALLING
@@ -1479,24 +2289,20 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
Opts.DiagnosticSerializationFile = A->getValue();
Opts.ShowColors = parseShowColorsArgs(Args, DefaultDiagColor);
- if (Args.getLastArgValue(OPT_fdiagnostics_format) == "msvc-fallback")
- Opts.CLFallbackMode = true;
-
Opts.VerifyDiagnostics = Args.hasArg(OPT_verify) || Args.hasArg(OPT_verify_EQ);
+ Opts.VerifyPrefixes = Args.getAllArgValues(OPT_verify_EQ);
if (Args.hasArg(OPT_verify))
Opts.VerifyPrefixes.push_back("expected");
// Keep VerifyPrefixes in its original order for the sake of diagnostics, and
// then sort it to prepare for fast lookup using std::binary_search.
- if (!checkVerifyPrefixes(Opts.VerifyPrefixes, *Diags)) {
+ if (!checkVerifyPrefixes(Opts.VerifyPrefixes, *Diags))
Opts.VerifyDiagnostics = false;
- Success = false;
- }
else
llvm::sort(Opts.VerifyPrefixes);
DiagnosticLevelMask DiagMask = DiagnosticLevelMask::None;
- Success &= parseDiagnosticLevelMask("-verify-ignore-unexpected=",
- Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ),
- *Diags, DiagMask);
+ parseDiagnosticLevelMask(
+ "-verify-ignore-unexpected=",
+ Args.getAllArgValues(OPT_verify_ignore_unexpected_EQ), *Diags, DiagMask);
if (Args.hasArg(OPT_verify_ignore_unexpected))
DiagMask = DiagnosticLevelMask::All;
Opts.setVerifyIgnoreUnexpected(DiagMask);
@@ -1509,7 +2315,7 @@ bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
addDiagnosticArgs(Args, OPT_W_Group, OPT_W_value_Group, Opts.Warnings);
addDiagnosticArgs(Args, OPT_R_Group, OPT_R_value_Group, Opts.Remarks);
- return Success;
+ return Diags->getNumErrors() == NumErrorsBefore;
}
/// Parse the argument to the -ftest-module-file-extension
@@ -1536,18 +2342,264 @@ static bool parseTestModuleFileExtensionArg(StringRef Arg,
return false;
}
-static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
- DiagnosticsEngine &Diags,
- bool &IsHeaderFile) {
+/// Return a table that associates command line option specifiers with the
+/// frontend action. Note: The pair {frontend::PluginAction, OPT_plugin} is
+/// intentionally missing, as this case is handled separately from other
+/// frontend options.
+static const auto &getFrontendActionTable() {
+ static const std::pair<frontend::ActionKind, unsigned> Table[] = {
+ {frontend::ASTDeclList, OPT_ast_list},
+
+ {frontend::ASTDump, OPT_ast_dump_all_EQ},
+ {frontend::ASTDump, OPT_ast_dump_all},
+ {frontend::ASTDump, OPT_ast_dump_EQ},
+ {frontend::ASTDump, OPT_ast_dump},
+ {frontend::ASTDump, OPT_ast_dump_lookups},
+ {frontend::ASTDump, OPT_ast_dump_decl_types},
+
+ {frontend::ASTPrint, OPT_ast_print},
+ {frontend::ASTView, OPT_ast_view},
+ {frontend::DumpCompilerOptions, OPT_compiler_options_dump},
+ {frontend::DumpRawTokens, OPT_dump_raw_tokens},
+ {frontend::DumpTokens, OPT_dump_tokens},
+ {frontend::EmitAssembly, OPT_S},
+ {frontend::EmitBC, OPT_emit_llvm_bc},
+ {frontend::EmitHTML, OPT_emit_html},
+ {frontend::EmitLLVM, OPT_emit_llvm},
+ {frontend::EmitLLVMOnly, OPT_emit_llvm_only},
+ {frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
+ {frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
+ {frontend::EmitObj, OPT_emit_obj},
+
+ {frontend::FixIt, OPT_fixit_EQ},
+ {frontend::FixIt, OPT_fixit},
+
+ {frontend::GenerateModule, OPT_emit_module},
+ {frontend::GenerateModuleInterface, OPT_emit_module_interface},
+ {frontend::GenerateHeaderModule, OPT_emit_header_module},
+ {frontend::GeneratePCH, OPT_emit_pch},
+ {frontend::GenerateInterfaceStubs, OPT_emit_interface_stubs},
+ {frontend::InitOnly, OPT_init_only},
+ {frontend::ParseSyntaxOnly, OPT_fsyntax_only},
+ {frontend::ModuleFileInfo, OPT_module_file_info},
+ {frontend::VerifyPCH, OPT_verify_pch},
+ {frontend::PrintPreamble, OPT_print_preamble},
+ {frontend::PrintPreprocessedInput, OPT_E},
+ {frontend::TemplightDump, OPT_templight_dump},
+ {frontend::RewriteMacros, OPT_rewrite_macros},
+ {frontend::RewriteObjC, OPT_rewrite_objc},
+ {frontend::RewriteTest, OPT_rewrite_test},
+ {frontend::RunAnalysis, OPT_analyze},
+ {frontend::MigrateSource, OPT_migrate},
+ {frontend::RunPreprocessorOnly, OPT_Eonly},
+ {frontend::PrintDependencyDirectivesSourceMinimizerOutput,
+ OPT_print_dependency_directives_minimized_source},
+ };
+
+ return Table;
+}
+
+/// Maps command line option to frontend action.
+static Optional<frontend::ActionKind> getFrontendAction(OptSpecifier &Opt) {
+ for (const auto &ActionOpt : getFrontendActionTable())
+ if (ActionOpt.second == Opt.getID())
+ return ActionOpt.first;
+
+ return None;
+}
+
+/// Maps frontend action to command line option.
+static Optional<OptSpecifier>
+getProgramActionOpt(frontend::ActionKind ProgramAction) {
+ for (const auto &ActionOpt : getFrontendActionTable())
+ if (ActionOpt.first == ProgramAction)
+ return OptSpecifier(ActionOpt.second);
+
+ return None;
+}
+
+static void GenerateFrontendArgs(const FrontendOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA,
+ bool IsHeader) {
+ const FrontendOptions &FrontendOpts = Opts;
+#define FRONTEND_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef FRONTEND_OPTION_WITH_MARSHALLING
+
+ Optional<OptSpecifier> ProgramActionOpt =
+ getProgramActionOpt(Opts.ProgramAction);
+
+ // Generating a simple flag covers most frontend actions.
+ std::function<void()> GenerateProgramAction = [&]() {
+ GenerateArg(Args, *ProgramActionOpt, SA);
+ };
+
+ if (!ProgramActionOpt) {
+ // PluginAction is the only program action handled separately.
+ assert(Opts.ProgramAction == frontend::PluginAction &&
+ "Frontend action without option.");
+ GenerateProgramAction = [&]() {
+ GenerateArg(Args, OPT_plugin, Opts.ActionName, SA);
+ };
+ }
+
+ // FIXME: Simplify the complex 'AST dump' command line.
+ if (Opts.ProgramAction == frontend::ASTDump) {
+ GenerateProgramAction = [&]() {
+ // ASTDumpLookups, ASTDumpDeclTypes and ASTDumpFilter are generated via
+ // marshalling infrastructure.
+
+ if (Opts.ASTDumpFormat != ADOF_Default) {
+ StringRef Format;
+ switch (Opts.ASTDumpFormat) {
+ case ADOF_Default:
+ llvm_unreachable("Default AST dump format.");
+ case ADOF_JSON:
+ Format = "json";
+ break;
+ }
+
+ if (Opts.ASTDumpAll)
+ GenerateArg(Args, OPT_ast_dump_all_EQ, Format, SA);
+ if (Opts.ASTDumpDecls)
+ GenerateArg(Args, OPT_ast_dump_EQ, Format, SA);
+ } else {
+ if (Opts.ASTDumpAll)
+ GenerateArg(Args, OPT_ast_dump_all, SA);
+ if (Opts.ASTDumpDecls)
+ GenerateArg(Args, OPT_ast_dump, SA);
+ }
+ };
+ }
+
+ if (Opts.ProgramAction == frontend::FixIt && !Opts.FixItSuffix.empty()) {
+ GenerateProgramAction = [&]() {
+ GenerateArg(Args, OPT_fixit_EQ, Opts.FixItSuffix, SA);
+ };
+ }
+
+ GenerateProgramAction();
+
+ for (const auto &PluginArgs : Opts.PluginArgs) {
+ Option Opt = getDriverOptTable().getOption(OPT_plugin_arg);
+ const char *Spelling =
+ SA(Opt.getPrefix() + Opt.getName() + PluginArgs.first);
+ for (const auto &PluginArg : PluginArgs.second)
+ denormalizeString(Args, Spelling, SA, Opt.getKind(), 0, PluginArg);
+ }
+
+ for (const auto &Ext : Opts.ModuleFileExtensions)
+ if (auto *TestExt = dyn_cast_or_null<TestModuleFileExtension>(Ext.get()))
+ GenerateArg(Args, OPT_ftest_module_file_extension_EQ, TestExt->str(), SA);
+
+ if (!Opts.CodeCompletionAt.FileName.empty())
+ GenerateArg(Args, OPT_code_completion_at, Opts.CodeCompletionAt.ToString(),
+ SA);
+
+ for (const auto &Plugin : Opts.Plugins)
+ GenerateArg(Args, OPT_load, Plugin, SA);
+
+ // ASTDumpDecls and ASTDumpAll already handled with ProgramAction.
+
+ for (const auto &ModuleFile : Opts.ModuleFiles)
+ GenerateArg(Args, OPT_fmodule_file, ModuleFile, SA);
+
+ if (Opts.AuxTargetCPU.hasValue())
+ GenerateArg(Args, OPT_aux_target_cpu, *Opts.AuxTargetCPU, SA);
+
+ if (Opts.AuxTargetFeatures.hasValue())
+ for (const auto &Feature : *Opts.AuxTargetFeatures)
+ GenerateArg(Args, OPT_aux_target_feature, Feature, SA);
+
+ {
+ StringRef Preprocessed = Opts.DashX.isPreprocessed() ? "-cpp-output" : "";
+ StringRef ModuleMap =
+ Opts.DashX.getFormat() == InputKind::ModuleMap ? "-module-map" : "";
+ StringRef Header = IsHeader ? "-header" : "";
+
+ StringRef Lang;
+ switch (Opts.DashX.getLanguage()) {
+ case Language::C:
+ Lang = "c";
+ break;
+ case Language::OpenCL:
+ Lang = "cl";
+ break;
+ case Language::OpenCLCXX:
+ Lang = "clcpp";
+ break;
+ case Language::CUDA:
+ Lang = "cuda";
+ break;
+ case Language::HIP:
+ Lang = "hip";
+ break;
+ case Language::CXX:
+ Lang = "c++";
+ break;
+ case Language::ObjC:
+ Lang = "objective-c";
+ break;
+ case Language::ObjCXX:
+ Lang = "objective-c++";
+ break;
+ case Language::RenderScript:
+ Lang = "renderscript";
+ break;
+ case Language::Asm:
+ Lang = "assembler-with-cpp";
+ break;
+ case Language::Unknown:
+ assert(Opts.DashX.getFormat() == InputKind::Precompiled &&
+ "Generating -x argument for unknown language (not precompiled).");
+ Lang = "ast";
+ break;
+ case Language::LLVM_IR:
+ Lang = "ir";
+ break;
+ }
+
+ GenerateArg(Args, OPT_x, Lang + Header + ModuleMap + Preprocessed, SA);
+ }
+
+ // OPT_INPUT has a unique class, generate it directly.
+ for (const auto &Input : Opts.Inputs)
+ Args.push_back(SA(Input.getFile()));
+}
+
+static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags, bool &IsHeaderFile) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ FrontendOptions &FrontendOpts = Opts;
+
+#define FRONTEND_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef FRONTEND_OPTION_WITH_MARSHALLING
+
Opts.ProgramAction = frontend::ParseSyntaxOnly;
if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
- switch (A->getOption().getID()) {
- default:
- llvm_unreachable("Invalid option in group!");
- case OPT_ast_list:
- Opts.ProgramAction = frontend::ASTDeclList; break;
- case OPT_ast_dump_all_EQ:
- case OPT_ast_dump_EQ: {
+ OptSpecifier Opt = OptSpecifier(A->getOption().getID());
+ Optional<frontend::ActionKind> ProgramAction = getFrontendAction(Opt);
+ assert(ProgramAction && "Option specifier not in Action_Group.");
+
+ if (ProgramAction == frontend::ASTDump &&
+ (Opt == OPT_ast_dump_all_EQ || Opt == OPT_ast_dump_EQ)) {
unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
.CaseLower("default", ADOF_Default)
.CaseLower("json", ADOF_JSON)
@@ -1560,108 +2612,39 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
<< A->getAsString(Args) << A->getValue();
Opts.ASTDumpFormat = ADOF_Default;
}
- LLVM_FALLTHROUGH;
}
- case OPT_ast_dump:
- case OPT_ast_dump_all:
- case OPT_ast_dump_lookups:
- case OPT_ast_dump_decl_types:
- Opts.ProgramAction = frontend::ASTDump; break;
- case OPT_ast_print:
- Opts.ProgramAction = frontend::ASTPrint; break;
- case OPT_ast_view:
- Opts.ProgramAction = frontend::ASTView; break;
- case OPT_compiler_options_dump:
- Opts.ProgramAction = frontend::DumpCompilerOptions; break;
- case OPT_dump_raw_tokens:
- Opts.ProgramAction = frontend::DumpRawTokens; break;
- case OPT_dump_tokens:
- Opts.ProgramAction = frontend::DumpTokens; break;
- case OPT_S:
- Opts.ProgramAction = frontend::EmitAssembly; break;
- case OPT_emit_llvm_bc:
- Opts.ProgramAction = frontend::EmitBC; break;
- case OPT_emit_html:
- Opts.ProgramAction = frontend::EmitHTML; break;
- case OPT_emit_llvm:
- Opts.ProgramAction = frontend::EmitLLVM; break;
- case OPT_emit_llvm_only:
- Opts.ProgramAction = frontend::EmitLLVMOnly; break;
- case OPT_emit_codegen_only:
- Opts.ProgramAction = frontend::EmitCodeGenOnly; break;
- case OPT_emit_obj:
- Opts.ProgramAction = frontend::EmitObj; break;
- case OPT_fixit_EQ:
+
+ if (ProgramAction == frontend::FixIt && Opt == OPT_fixit_EQ)
Opts.FixItSuffix = A->getValue();
- LLVM_FALLTHROUGH;
- case OPT_fixit:
- Opts.ProgramAction = frontend::FixIt; break;
- case OPT_emit_module:
- Opts.ProgramAction = frontend::GenerateModule; break;
- case OPT_emit_module_interface:
- Opts.ProgramAction = frontend::GenerateModuleInterface; break;
- case OPT_emit_header_module:
- Opts.ProgramAction = frontend::GenerateHeaderModule; break;
- case OPT_emit_pch:
- Opts.ProgramAction = frontend::GeneratePCH; break;
- case OPT_emit_interface_stubs: {
+
+ if (ProgramAction == frontend::GenerateInterfaceStubs) {
StringRef ArgStr =
Args.hasArg(OPT_interface_stub_version_EQ)
? Args.getLastArgValue(OPT_interface_stub_version_EQ)
- : "experimental-ifs-v2";
+ : "ifs-v1";
if (ArgStr == "experimental-yaml-elf-v1" ||
- ArgStr == "experimental-ifs-v1" ||
+ ArgStr == "experimental-ifs-v1" || ArgStr == "experimental-ifs-v2" ||
ArgStr == "experimental-tapi-elf-v1") {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() +
" is deprecated.";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v2"
+ "-interface-stub-version=ifs-v1"
<< ErrorMessage;
- } else if (!ArgStr.startswith("experimental-ifs-")) {
+ ProgramAction = frontend::ParseSyntaxOnly;
+ } else if (!ArgStr.startswith("ifs-")) {
std::string ErrorMessage =
"Invalid interface stub format: " + ArgStr.str() + ".";
Diags.Report(diag::err_drv_invalid_value)
<< "Must specify a valid interface stub format type, ie: "
- "-interface-stub-version=experimental-ifs-v2"
+ "-interface-stub-version=ifs-v1"
<< ErrorMessage;
- } else {
- Opts.ProgramAction = frontend::GenerateInterfaceStubs;
+ ProgramAction = frontend::ParseSyntaxOnly;
}
- break;
- }
- case OPT_init_only:
- Opts.ProgramAction = frontend::InitOnly; break;
- case OPT_fsyntax_only:
- Opts.ProgramAction = frontend::ParseSyntaxOnly; break;
- case OPT_module_file_info:
- Opts.ProgramAction = frontend::ModuleFileInfo; break;
- case OPT_verify_pch:
- Opts.ProgramAction = frontend::VerifyPCH; break;
- case OPT_print_preamble:
- Opts.ProgramAction = frontend::PrintPreamble; break;
- case OPT_E:
- Opts.ProgramAction = frontend::PrintPreprocessedInput; break;
- case OPT_templight_dump:
- Opts.ProgramAction = frontend::TemplightDump; break;
- case OPT_rewrite_macros:
- Opts.ProgramAction = frontend::RewriteMacros; break;
- case OPT_rewrite_objc:
- Opts.ProgramAction = frontend::RewriteObjC; break;
- case OPT_rewrite_test:
- Opts.ProgramAction = frontend::RewriteTest; break;
- case OPT_analyze:
- Opts.ProgramAction = frontend::RunAnalysis; break;
- case OPT_migrate:
- Opts.ProgramAction = frontend::MigrateSource; break;
- case OPT_Eonly:
- Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
- case OPT_print_dependency_directives_minimized_source:
- Opts.ProgramAction =
- frontend::PrintDependencyDirectivesSourceMinimizerOutput;
- break;
}
+
+ Opts.ProgramAction = *ProgramAction;
}
if (const Arg* A = Args.getLastArg(OPT_plugin)) {
@@ -1741,6 +2724,7 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("c", Language::C)
.Case("cl", Language::OpenCL)
+ .Case("clcpp", Language::OpenCLCXX)
.Case("cuda", Language::CUDA)
.Case("hip", Language::HIP)
.Case("c++", Language::CXX)
@@ -1807,7 +2791,9 @@ static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.Inputs.emplace_back(std::move(Inputs[i]), IK, IsSystem);
}
- return DashX;
+ Opts.DashX = DashX;
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
@@ -1817,8 +2803,150 @@ std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
return Driver::GetResourcesPath(ClangExecutable, CLANG_RESOURCE_DIR);
}
-static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
+static void GenerateHeaderSearchArgs(HeaderSearchOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const HeaderSearchOptions *HeaderSearchOpts = &Opts;
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
+
+ if (Opts.UseLibcxx)
+ GenerateArg(Args, OPT_stdlib_EQ, "libc++", SA);
+
+ if (!Opts.ModuleCachePath.empty())
+ GenerateArg(Args, OPT_fmodules_cache_path, Opts.ModuleCachePath, SA);
+
+ for (const auto &File : Opts.PrebuiltModuleFiles)
+ GenerateArg(Args, OPT_fmodule_file, File.first + "=" + File.second, SA);
+
+ for (const auto &Path : Opts.PrebuiltModulePaths)
+ GenerateArg(Args, OPT_fprebuilt_module_path, Path, SA);
+
+ for (const auto &Macro : Opts.ModulesIgnoreMacros)
+ GenerateArg(Args, OPT_fmodules_ignore_macro, Macro.val(), SA);
+
+ auto Matches = [](const HeaderSearchOptions::Entry &Entry,
+ llvm::ArrayRef<frontend::IncludeDirGroup> Groups,
+ llvm::Optional<bool> IsFramework,
+ llvm::Optional<bool> IgnoreSysRoot) {
+ return llvm::find(Groups, Entry.Group) != Groups.end() &&
+ (!IsFramework || (Entry.IsFramework == *IsFramework)) &&
+ (!IgnoreSysRoot || (Entry.IgnoreSysRoot == *IgnoreSysRoot));
+ };
+
+ auto It = Opts.UserEntries.begin();
+ auto End = Opts.UserEntries.end();
+
+ // Add -I..., -F..., and -index-header-map options in order.
+ for (; It < End &&
+ Matches(*It, {frontend::IndexHeaderMap, frontend::Angled}, None, true);
+ ++It) {
+ OptSpecifier Opt = [It, Matches]() {
+ if (Matches(*It, frontend::IndexHeaderMap, true, true))
+ return OPT_F;
+ if (Matches(*It, frontend::IndexHeaderMap, false, true))
+ return OPT_I;
+ if (Matches(*It, frontend::Angled, true, true))
+ return OPT_F;
+ if (Matches(*It, frontend::Angled, false, true))
+ return OPT_I;
+ llvm_unreachable("Unexpected HeaderSearchOptions::Entry.");
+ }();
+
+ if (It->Group == frontend::IndexHeaderMap)
+ GenerateArg(Args, OPT_index_header_map, SA);
+ GenerateArg(Args, Opt, It->Path, SA);
+ };
+
+ // Note: some paths that came from "[-iprefix=xx] -iwithprefixbefore=yy" may
+ // have already been generated as "-I[xx]yy". If that's the case, their
+ // position on command line was such that this has no semantic impact on
+ // include paths.
+ for (; It < End &&
+ Matches(*It, {frontend::After, frontend::Angled}, false, true);
+ ++It) {
+ OptSpecifier Opt =
+ It->Group == frontend::After ? OPT_iwithprefix : OPT_iwithprefixbefore;
+ GenerateArg(Args, Opt, It->Path, SA);
+ }
+
+ // Note: Some paths that came from "-idirafter=xxyy" may have already been
+ // generated as "-iwithprefix=xxyy". If that's the case, their position on
+ // command line was such that this has no semantic impact on include paths.
+ for (; It < End && Matches(*It, {frontend::After}, false, true); ++It)
+ GenerateArg(Args, OPT_idirafter, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::Quoted}, false, true); ++It)
+ GenerateArg(Args, OPT_iquote, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::System}, false, None); ++It)
+ GenerateArg(Args, It->IgnoreSysRoot ? OPT_isystem : OPT_iwithsysroot,
+ It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::System}, true, true); ++It)
+ GenerateArg(Args, OPT_iframework, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::System}, true, false); ++It)
+ GenerateArg(Args, OPT_iframeworkwithsysroot, It->Path, SA);
+
+ // Add the paths for the various language specific isystem flags.
+ for (; It < End && Matches(*It, {frontend::CSystem}, false, true); ++It)
+ GenerateArg(Args, OPT_c_isystem, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::CXXSystem}, false, true); ++It)
+ GenerateArg(Args, OPT_cxx_isystem, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::ObjCSystem}, false, true); ++It)
+ GenerateArg(Args, OPT_objc_isystem, It->Path, SA);
+ for (; It < End && Matches(*It, {frontend::ObjCXXSystem}, false, true); ++It)
+ GenerateArg(Args, OPT_objcxx_isystem, It->Path, SA);
+
+ // Add the internal paths from a driver that detects standard include paths.
+ // Note: Some paths that came from "-internal-isystem" arguments may have
+ // already been generated as "-isystem". If that's the case, their position on
+ // command line was such that this has no semantic impact on include paths.
+ for (; It < End &&
+ Matches(*It, {frontend::System, frontend::ExternCSystem}, false, true);
+ ++It) {
+ OptSpecifier Opt = It->Group == frontend::System
+ ? OPT_internal_isystem
+ : OPT_internal_externc_isystem;
+ GenerateArg(Args, Opt, It->Path, SA);
+ }
+
+ assert(It == End && "Unhandled HeaderSearchOption::Entry.");
+
+ // Add the path prefixes which are implicitly treated as being system headers.
+ for (const auto &P : Opts.SystemHeaderPrefixes) {
+ OptSpecifier Opt = P.IsSystemHeader ? OPT_system_header_prefix
+ : OPT_no_system_header_prefix;
+ GenerateArg(Args, Opt, P.Prefix, SA);
+ }
+
+ for (const std::string &F : Opts.VFSOverlayFiles)
+ GenerateArg(Args, OPT_ivfsoverlay, F, SA);
+}
+
+static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags,
const std::string &WorkingDir) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ HeaderSearchOptions *HeaderSearchOpts = &Opts;
+
+#define HEADER_SEARCH_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef HEADER_SEARCH_OPTION_WITH_MARSHALLING
+
if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
Opts.UseLibcxx = (strcmp(A->getValue(), "libc++") == 0);
@@ -1932,6 +3060,8 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
for (const auto *A : Args.filtered(OPT_ivfsoverlay))
Opts.AddVFSOverlayFile(A->getValue());
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
@@ -1958,7 +3088,10 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
case Language::LLVM_IR:
llvm_unreachable("Invalid input kind!");
case Language::OpenCL:
- LangStd = LangStandard::lang_opencl10;
+ LangStd = LangStandard::lang_opencl12;
+ break;
+ case Language::OpenCLCXX:
+ LangStd = LangStandard::lang_openclcpp;
break;
case Language::CUDA:
LangStd = LangStandard::lang_cuda;
@@ -2017,6 +3150,8 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.HexFloats = Std.hasHexFloats();
Opts.ImplicitInt = Std.hasImplicitInt();
+ Opts.CPlusPlusModules = Opts.CPlusPlus20;
+
// Set OpenCL Version.
Opts.OpenCL = Std.isOpenCL();
if (LangStd == LangStandard::lang_opencl10)
@@ -2038,6 +3173,9 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
Opts.ZVector = 0;
Opts.setDefaultFPContractMode(LangOptions::FPM_On);
Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
+ Opts.OpenCLPipe = Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200;
+ Opts.OpenCLGenericAddressSpace =
+ Opts.OpenCLCPlusPlus || Opts.OpenCLVersion == 200;
// Include default header file for OpenCL.
if (Opts.IncludeDefaultHeader) {
@@ -2074,18 +3212,6 @@ void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
// OpenCL has half keyword
Opts.Half = Opts.OpenCL;
-
- // C++ has wchar_t keyword.
- Opts.WChar = Opts.CPlusPlus;
-
- Opts.CXXOperatorNames = Opts.CPlusPlus;
-
- Opts.AlignedAllocation = Opts.CPlusPlus17;
-
- Opts.DollarIdents = !Opts.AsmPreprocessor;
-
- // Enable [[]] attributes in C++11 and C2x by default.
- Opts.DoubleSquareBracketAttributes = Opts.CPlusPlus11 || Opts.C2x;
}
/// Check if input file kind and language standard are compatible.
@@ -2102,7 +3228,11 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
return S.getLanguage() == Language::C;
case Language::OpenCL:
- return S.getLanguage() == Language::OpenCL;
+ return S.getLanguage() == Language::OpenCL ||
+ S.getLanguage() == Language::OpenCLCXX;
+
+ case Language::OpenCLCXX:
+ return S.getLanguage() == Language::OpenCLCXX;
case Language::CXX:
case Language::ObjCXX:
@@ -2139,6 +3269,8 @@ static const StringRef GetInputKindName(InputKind IK) {
return "Objective-C++";
case Language::OpenCL:
return "OpenCL";
+ case Language::OpenCLCXX:
+ return "C++ for OpenCL";
case Language::CUDA:
return "CUDA";
case Language::RenderScript:
@@ -2157,19 +3289,273 @@ static const StringRef GetInputKindName(InputKind IK) {
llvm_unreachable("unknown input language");
}
-static void GenerateLangArgs(const LangOptions &Opts,
- SmallVectorImpl<const char *> &Args,
- CompilerInvocation::StringAllocator SA) {
+void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ StringAllocator SA,
+ const llvm::Triple &T, InputKind IK) {
+ if (IK.getFormat() == InputKind::Precompiled ||
+ IK.getLanguage() == Language::LLVM_IR) {
+ if (Opts.ObjCAutoRefCount)
+ GenerateArg(Args, OPT_fobjc_arc, SA);
+ if (Opts.PICLevel != 0)
+ GenerateArg(Args, OPT_pic_level, Twine(Opts.PICLevel), SA);
+ if (Opts.PIE)
+ GenerateArg(Args, OPT_pic_is_pie, SA);
+ for (StringRef Sanitizer : serializeSanitizerKinds(Opts.Sanitize))
+ GenerateArg(Args, OPT_fsanitize_EQ, Sanitizer, SA);
+
+ return;
+ }
+
+ OptSpecifier StdOpt;
+ switch (Opts.LangStd) {
+ case LangStandard::lang_opencl10:
+ case LangStandard::lang_opencl11:
+ case LangStandard::lang_opencl12:
+ case LangStandard::lang_opencl20:
+ case LangStandard::lang_opencl30:
+ case LangStandard::lang_openclcpp:
+ StdOpt = OPT_cl_std_EQ;
+ break;
+ default:
+ StdOpt = OPT_std_EQ;
+ break;
+ }
+
+ auto LangStandard = LangStandard::getLangStandardForKind(Opts.LangStd);
+ GenerateArg(Args, StdOpt, LangStandard.getName(), SA);
+
if (Opts.IncludeDefaultHeader)
- Args.push_back(SA(GetOptName(OPT_finclude_default_header)));
+ GenerateArg(Args, OPT_finclude_default_header, SA);
if (Opts.DeclareOpenCLBuiltins)
- Args.push_back(SA(GetOptName(OPT_fdeclare_opencl_builtins)));
+ GenerateArg(Args, OPT_fdeclare_opencl_builtins, SA);
+
+ const LangOptions *LangOpts = &Opts;
+
+#define LANG_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef LANG_OPTION_WITH_MARSHALLING
+
+ // The '-fcf-protection=' option is generated by CodeGenOpts generator.
+
+ if (Opts.ObjC) {
+ GenerateArg(Args, OPT_fobjc_runtime_EQ, Opts.ObjCRuntime.getAsString(), SA);
+
+ if (Opts.GC == LangOptions::GCOnly)
+ GenerateArg(Args, OPT_fobjc_gc_only, SA);
+ else if (Opts.GC == LangOptions::HybridGC)
+ GenerateArg(Args, OPT_fobjc_gc, SA);
+ else if (Opts.ObjCAutoRefCount == 1)
+ GenerateArg(Args, OPT_fobjc_arc, SA);
+
+ if (Opts.ObjCWeakRuntime)
+ GenerateArg(Args, OPT_fobjc_runtime_has_weak, SA);
+
+ if (Opts.ObjCWeak)
+ GenerateArg(Args, OPT_fobjc_weak, SA);
+
+ if (Opts.ObjCSubscriptingLegacyRuntime)
+ GenerateArg(Args, OPT_fobjc_subscripting_legacy_runtime, SA);
+ }
+
+ if (Opts.GNUCVersion != 0) {
+ unsigned Major = Opts.GNUCVersion / 100 / 100;
+ unsigned Minor = (Opts.GNUCVersion / 100) % 100;
+ unsigned Patch = Opts.GNUCVersion % 100;
+ GenerateArg(Args, OPT_fgnuc_version_EQ,
+ Twine(Major) + "." + Twine(Minor) + "." + Twine(Patch), SA);
+ }
+
+ if (Opts.IgnoreXCOFFVisibility)
+ GenerateArg(Args, OPT_mignore_xcoff_visibility, SA);
+
+ if (Opts.SignedOverflowBehavior == LangOptions::SOB_Trapping) {
+ GenerateArg(Args, OPT_ftrapv, SA);
+ GenerateArg(Args, OPT_ftrapv_handler, Opts.OverflowHandler, SA);
+ } else if (Opts.SignedOverflowBehavior == LangOptions::SOB_Defined) {
+ GenerateArg(Args, OPT_fwrapv, SA);
+ }
+
+ if (Opts.MSCompatibilityVersion != 0) {
+ unsigned Major = Opts.MSCompatibilityVersion / 10000000;
+ unsigned Minor = (Opts.MSCompatibilityVersion / 100000) % 100;
+ unsigned Subminor = Opts.MSCompatibilityVersion % 100000;
+ GenerateArg(Args, OPT_fms_compatibility_version,
+ Twine(Major) + "." + Twine(Minor) + "." + Twine(Subminor), SA);
+ }
+
+ if ((!Opts.GNUMode && !Opts.MSVCCompat && !Opts.CPlusPlus17) || T.isOSzOS()) {
+ if (!Opts.Trigraphs)
+ GenerateArg(Args, OPT_fno_trigraphs, SA);
+ } else {
+ if (Opts.Trigraphs)
+ GenerateArg(Args, OPT_ftrigraphs, SA);
+ }
+
+ if (Opts.Blocks && !(Opts.OpenCL && Opts.OpenCLVersion == 200))
+ GenerateArg(Args, OPT_fblocks, SA);
+
+ if (Opts.ConvergentFunctions &&
+ !(Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) || Opts.SYCLIsDevice))
+ GenerateArg(Args, OPT_fconvergent_functions, SA);
+
+ if (Opts.NoBuiltin && !Opts.Freestanding)
+ GenerateArg(Args, OPT_fno_builtin, SA);
+
+ if (!Opts.NoBuiltin)
+ for (const auto &Func : Opts.NoBuiltinFuncs)
+ GenerateArg(Args, OPT_fno_builtin_, Func, SA);
+
+ if (Opts.LongDoubleSize == 128)
+ GenerateArg(Args, OPT_mlong_double_128, SA);
+ else if (Opts.LongDoubleSize == 64)
+ GenerateArg(Args, OPT_mlong_double_64, SA);
+
+ // Not generating '-mrtd', it's just an alias for '-fdefault-calling-conv='.
+
+ // OpenMP was requested via '-fopenmp', not implied by '-fopenmp-simd' or
+ // '-fopenmp-targets='.
+ if (Opts.OpenMP && !Opts.OpenMPSimd) {
+ GenerateArg(Args, OPT_fopenmp, SA);
+
+ if (Opts.OpenMP != 50)
+ GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
+
+ if (!Opts.OpenMPUseTLS)
+ GenerateArg(Args, OPT_fnoopenmp_use_tls, SA);
+
+ if (Opts.OpenMPIsDevice)
+ GenerateArg(Args, OPT_fopenmp_is_device, SA);
+
+ if (Opts.OpenMPIRBuilder)
+ GenerateArg(Args, OPT_fopenmp_enable_irbuilder, SA);
+ }
+
+ if (Opts.OpenMPSimd) {
+ GenerateArg(Args, OPT_fopenmp_simd, SA);
+
+ if (Opts.OpenMP != 50)
+ GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
+ }
+
+ if (Opts.OpenMPCUDANumSMs != 0)
+ GenerateArg(Args, OPT_fopenmp_cuda_number_of_sm_EQ,
+ Twine(Opts.OpenMPCUDANumSMs), SA);
+
+ if (Opts.OpenMPCUDABlocksPerSM != 0)
+ GenerateArg(Args, OPT_fopenmp_cuda_blocks_per_sm_EQ,
+ Twine(Opts.OpenMPCUDABlocksPerSM), SA);
+
+ if (Opts.OpenMPCUDAReductionBufNum != 1024)
+ GenerateArg(Args, OPT_fopenmp_cuda_teams_reduction_recs_num_EQ,
+ Twine(Opts.OpenMPCUDAReductionBufNum), SA);
+
+ if (!Opts.OMPTargetTriples.empty()) {
+ std::string Targets;
+ llvm::raw_string_ostream OS(Targets);
+ llvm::interleave(
+ Opts.OMPTargetTriples, OS,
+ [&OS](const llvm::Triple &T) { OS << T.str(); }, ",");
+ GenerateArg(Args, OPT_fopenmp_targets_EQ, OS.str(), SA);
+ }
+
+ if (!Opts.OMPHostIRFile.empty())
+ GenerateArg(Args, OPT_fopenmp_host_ir_file_path, Opts.OMPHostIRFile, SA);
+
+ if (Opts.OpenMPCUDAMode)
+ GenerateArg(Args, OPT_fopenmp_cuda_mode, SA);
+
+ if (Opts.OpenMPCUDAForceFullRuntime)
+ GenerateArg(Args, OPT_fopenmp_cuda_force_full_runtime, SA);
+
+ // The arguments used to set Optimize, OptimizeSize and NoInlineDefine are
+ // generated from CodeGenOptions.
+
+ if (Opts.DefaultFPContractMode == LangOptions::FPM_Fast)
+ GenerateArg(Args, OPT_ffp_contract, "fast", SA);
+ else if (Opts.DefaultFPContractMode == LangOptions::FPM_On)
+ GenerateArg(Args, OPT_ffp_contract, "on", SA);
+ else if (Opts.DefaultFPContractMode == LangOptions::FPM_Off)
+ GenerateArg(Args, OPT_ffp_contract, "off", SA);
+ else if (Opts.DefaultFPContractMode == LangOptions::FPM_FastHonorPragmas)
+ GenerateArg(Args, OPT_ffp_contract, "fast-honor-pragmas", SA);
+
+ for (StringRef Sanitizer : serializeSanitizerKinds(Opts.Sanitize))
+ GenerateArg(Args, OPT_fsanitize_EQ, Sanitizer, SA);
+
+ // Conflating '-fsanitize-system-ignorelist' and '-fsanitize-ignorelist'.
+ for (const std::string &F : Opts.NoSanitizeFiles)
+ GenerateArg(Args, OPT_fsanitize_ignorelist_EQ, F, SA);
+
+ if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver3_8)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "3.8", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver4)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "4.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver6)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "6.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver7)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "7.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver9)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "9.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver11)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "11.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver12)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "12.0", SA);
+
+ if (Opts.getSignReturnAddressScope() ==
+ LangOptions::SignReturnAddressScopeKind::All)
+ GenerateArg(Args, OPT_msign_return_address_EQ, "all", SA);
+ else if (Opts.getSignReturnAddressScope() ==
+ LangOptions::SignReturnAddressScopeKind::NonLeaf)
+ GenerateArg(Args, OPT_msign_return_address_EQ, "non-leaf", SA);
+
+ if (Opts.getSignReturnAddressKey() ==
+ LangOptions::SignReturnAddressKeyKind::BKey)
+ GenerateArg(Args, OPT_msign_return_address_key_EQ, "b_key", SA);
+
+ if (Opts.CXXABI)
+ GenerateArg(Args, OPT_fcxx_abi_EQ, TargetCXXABI::getSpelling(*Opts.CXXABI),
+ SA);
+
+ if (Opts.RelativeCXXABIVTables)
+ GenerateArg(Args, OPT_fexperimental_relative_cxx_abi_vtables, SA);
+ else
+ GenerateArg(Args, OPT_fno_experimental_relative_cxx_abi_vtables, SA);
}
-void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
+bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
InputKind IK, const llvm::Triple &T,
std::vector<std::string> &Includes,
DiagnosticsEngine &Diags) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ if (IK.getFormat() == InputKind::Precompiled ||
+ IK.getLanguage() == Language::LLVM_IR) {
+ // ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
+ // PassManager in BackendUtil.cpp. They need to be initialized no matter
+ // what the input type is.
+ if (Args.hasArg(OPT_fobjc_arc))
+ Opts.ObjCAutoRefCount = 1;
+ // PICLevel and PIELevel are needed during code generation and this should
+ // be set regardless of the input type.
+ Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
+ Opts.PIE = Args.hasArg(OPT_pic_is_pie);
+ parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
+ Diags, Opts.Sanitize);
+
+ return Diags.getNumErrors() == NumErrorsBefore;
+ }
+
+ // Other LangOpts are only initialized when the input is not AST or LLVM IR.
+ // FIXME: Should we really be parsing this for an Language::Asm input?
+
// FIXME: Cleanup per-file based stuff.
LangStandard::Kind LangStd = LangStandard::lang_unspecified;
if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
@@ -2242,17 +3628,15 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// The key paths of codegen options defined in Options.td start with
// "LangOpts->". Let's provide the expected variable name and type.
LangOptions *LangOpts = &Opts;
- bool Success = true;
#define LANG_OPTION_WITH_MARSHALLING( \
PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
MERGER, EXTRACTOR, TABLE_INDEX) \
- PARSE_OPTION_WITH_MARSHALLING(Args, Diags, Success, ID, FLAGS, PARAM, \
- SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, \
- MERGER, TABLE_INDEX)
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
#include "clang/Driver/Options.inc"
#undef LANG_OPTION_WITH_MARSHALLING
@@ -2263,8 +3647,15 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
}
- if (Args.hasArg(OPT_fno_operator_names))
- Opts.CXXOperatorNames = 0;
+ if ((Args.hasArg(OPT_fsycl_is_device) || Args.hasArg(OPT_fsycl_is_host)) &&
+ !Args.hasArg(OPT_sycl_std_EQ)) {
+ // If the user supplied -fsycl-is-device or -fsycl-is-host, but failed to
+ // provide -sycl-std=, we want to default it to whatever the default SYCL
+ // version is. I could not find a way to express this with the options
+ // tablegen because we still want this value to be SYCL_None when the user
+ // is not in device or host mode.
+ Opts.setSYCLVersion(LangOptions::SYCL_Default);
+ }
if (Opts.ObjC) {
if (Arg *arg = Args.getLastArg(OPT_fobjc_runtime_EQ)) {
@@ -2328,6 +3719,30 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.GNUCVersion = Major * 100 * 100 + Minor * 100 + Patch;
}
+ // In AIX OS, the -mignore-xcoff-visibility is enable by default if there is
+ // no -fvisibility=* option.
+ // This is the reason why '-fvisibility' needs to be always generated:
+ // its absence implies '-mignore-xcoff-visibility'.
+ //
+ // Suppose the original cc1 command line does contain '-fvisibility default':
+ // '-mignore-xcoff-visibility' should not be implied.
+ // * If '-fvisibility' is not generated (as most options with default values
+ // don't), its absence would imply '-mignore-xcoff-visibility'. This changes
+ // the command line semantics.
+ // * If '-fvisibility' is generated regardless of its presence and value,
+ // '-mignore-xcoff-visibility' won't be implied and the command line
+ // semantics are kept intact.
+ //
+ // When the original cc1 command line does **not** contain '-fvisibility',
+ // '-mignore-xcoff-visibility' is implied. The generated command line will
+ // contain both '-fvisibility default' and '-mignore-xcoff-visibility' and
+ // subsequent calls to `CreateFromArgs`/`generateCC1CommandLine` will always
+ // produce the same arguments.
+
+ if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility) ||
+ !Args.hasArg(OPT_fvisibility)))
+ Opts.IgnoreXCOFFVisibility = 1;
+
if (Args.hasArg(OPT_ftrapv)) {
Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
// Set the handler, if one is specified.
@@ -2337,8 +3752,6 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
else if (Args.hasArg(OPT_fwrapv))
Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
- Opts.MicrosoftExt = Opts.MSVCCompat || Args.hasArg(OPT_fms_extensions);
- Opts.AsmBlocks = Args.hasArg(OPT_fasm_blocks) || Opts.MicrosoftExt;
Opts.MSCompatibilityVersion = 0;
if (const Arg *A = Args.getLastArg(OPT_fms_compatibility_version)) {
VersionTuple VT;
@@ -2359,89 +3772,23 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.Trigraphs =
Args.hasFlag(OPT_ftrigraphs, OPT_fno_trigraphs, Opts.Trigraphs);
- Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
- OPT_fno_dollars_in_identifiers,
- Opts.DollarIdents);
-
- // -ffixed-point
- Opts.FixedPoint =
- Args.hasFlag(OPT_ffixed_point, OPT_fno_fixed_point, /*Default=*/false) &&
- !Opts.CPlusPlus;
- Opts.PaddingOnUnsignedFixedPoint =
- Args.hasFlag(OPT_fpadding_on_unsigned_fixed_point,
- OPT_fno_padding_on_unsigned_fixed_point,
- /*Default=*/false) &&
- Opts.FixedPoint;
-
- Opts.RTTI = Opts.CPlusPlus && !Args.hasArg(OPT_fno_rtti);
- Opts.RTTIData = Opts.RTTI && !Args.hasArg(OPT_fno_rtti_data);
Opts.Blocks = Args.hasArg(OPT_fblocks) || (Opts.OpenCL
&& Opts.OpenCLVersion == 200);
- Opts.Coroutines = Opts.CPlusPlus20 || Args.hasArg(OPT_fcoroutines_ts);
Opts.ConvergentFunctions = Opts.OpenCL || (Opts.CUDA && Opts.CUDAIsDevice) ||
Opts.SYCLIsDevice ||
Args.hasArg(OPT_fconvergent_functions);
- Opts.DoubleSquareBracketAttributes =
- Args.hasFlag(OPT_fdouble_square_bracket_attributes,
- OPT_fno_double_square_bracket_attributes,
- Opts.DoubleSquareBracketAttributes);
-
- Opts.CPlusPlusModules = Opts.CPlusPlus20;
- Opts.Modules =
- Args.hasArg(OPT_fmodules) || Opts.ModulesTS || Opts.CPlusPlusModules;
- Opts.ModulesDeclUse =
- Args.hasArg(OPT_fmodules_decluse) || Opts.ModulesStrictDeclUse;
- // FIXME: We only need this in C++ modules / Modules TS if we might textually
- // enter a different module (eg, when building a header unit).
- Opts.ModulesLocalVisibility =
- Args.hasArg(OPT_fmodules_local_submodule_visibility) || Opts.ModulesTS ||
- Opts.CPlusPlusModules;
- Opts.ModulesSearchAll = Opts.Modules &&
- !Args.hasArg(OPT_fno_modules_search_all) &&
- Args.hasArg(OPT_fmodules_search_all);
- Opts.CharIsSigned = Opts.OpenCL || !Args.hasArg(OPT_fno_signed_char);
- Opts.WChar = Opts.CPlusPlus && !Args.hasArg(OPT_fno_wchar);
- Opts.Char8 = Args.hasFlag(OPT_fchar8__t, OPT_fno_char8__t, Opts.CPlusPlus20);
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
- Opts.AlignedAllocation =
- Args.hasFlag(OPT_faligned_allocation, OPT_fno_aligned_allocation,
- Opts.AlignedAllocation);
- Opts.AlignedAllocationUnavailable =
- Opts.AlignedAllocation && Args.hasArg(OPT_aligned_alloc_unavailable);
- if (Args.hasArg(OPT_fconcepts_ts))
- Diags.Report(diag::warn_fe_concepts_ts_flag);
- Opts.MathErrno = !Opts.OpenCL && Args.hasArg(OPT_fmath_errno);
Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
? 128
: Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
- Opts.EnableAIXExtendedAltivecABI = Args.hasArg(OPT_mabi_EQ_vec_extabi);
- Opts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
- Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
- || Args.hasArg(OPT_fdump_record_layouts);
if (Opts.FastRelaxedMath)
Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
- Opts.XLPragmaPack = Args.hasArg(OPT_fxl_pragma_pack);
llvm::sort(Opts.ModuleFeatures);
- Opts.ArmSveVectorBits =
- getLastArgIntValue(Args, options::OPT_msve_vector_bits_EQ, 0, Diags);
-
- // __declspec is enabled by default for the PS4 by the driver, and also
- // enabled for Microsoft Extensions or Borland Extensions, here.
- //
- // FIXME: __declspec is also currently enabled for CUDA, but isn't really a
- // CUDA extension. However, it is required for supporting
- // __clang_cuda_builtin_vars.h, which uses __declspec(property). Once that has
- // been rewritten in terms of something more generic, remove the Opts.CUDA
- // term here.
- Opts.DeclSpecKeyword =
- Args.hasFlag(OPT_fdeclspec, OPT_fno_declspec,
- (Opts.MicrosoftExt || Opts.Borland || Opts.CUDA));
-
// -mrtd option
if (Arg *A = Args.getLastArg(OPT_mrtd)) {
if (Opts.getDefaultCallingConv() != LangOptions::DCC_None)
@@ -2456,6 +3803,8 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
}
+ // Check if -fopenmp is specified and set default version to 5.0.
+ Opts.OpenMP = Args.hasArg(OPT_fopenmp) ? 50 : 0;
// Check if -fopenmp-simd is specified.
bool IsSimdSpecified =
Args.hasFlag(options::OPT_fopenmp_simd, options::OPT_fno_openmp_simd,
@@ -2470,6 +3819,8 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
bool IsTargetSpecified =
Opts.OpenMPIsDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
+ Opts.ConvergentFunctions = Opts.ConvergentFunctions || Opts.OpenMPIsDevice;
+
if (Opts.OpenMP || Opts.OpenMPSimd) {
if (int Version = getLastArgIntValue(
Args, OPT_fopenmp_version_EQ,
@@ -2553,22 +3904,11 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.OpenMPCUDAMode = Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_mode);
- // Set CUDA support for parallel execution of target regions for OpenMP target
- // NVPTX/AMDGCN if specified in options.
- Opts.OpenMPCUDATargetParallel =
- Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
- Args.hasArg(options::OPT_fopenmp_cuda_parallel_target_regions);
-
// Set CUDA mode for OpenMP target NVPTX/AMDGCN if specified in options
Opts.OpenMPCUDAForceFullRuntime =
Opts.OpenMPIsDevice && (T.isNVPTX() || T.isAMDGCN()) &&
Args.hasArg(options::OPT_fopenmp_cuda_force_full_runtime);
- // Record whether the __DEPRECATED define was requested.
- Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
- OPT_fno_deprecated_macro,
- Opts.Deprecated);
-
// FIXME: Eliminate this dependency.
unsigned Opt = getOptimizationLevel(Args, IK, Diags),
OptSize = getOptimizationLevelSize(Args);
@@ -2599,28 +3939,15 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
- LangOptions::FPExceptionModeKind FPEB = LangOptions::FPE_Ignore;
- if (Arg *A = Args.getLastArg(OPT_ffp_exception_behavior_EQ)) {
- StringRef Val = A->getValue();
- if (Val.equals("ignore"))
- FPEB = LangOptions::FPE_Ignore;
- else if (Val.equals("maytrap"))
- FPEB = LangOptions::FPE_MayTrap;
- else if (Val.equals("strict"))
- FPEB = LangOptions::FPE_Strict;
- else
- Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
- }
- Opts.setFPExceptionMode(FPEB);
-
// Parse -fsanitize= arguments.
parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
Diags, Opts.Sanitize);
- std::vector<std::string> systemBlacklists =
- Args.getAllArgValues(OPT_fsanitize_system_blacklist);
- Opts.SanitizerBlacklistFiles.insert(Opts.SanitizerBlacklistFiles.end(),
- systemBlacklists.begin(),
- systemBlacklists.end());
+ Opts.NoSanitizeFiles = Args.getAllArgValues(OPT_fsanitize_ignorelist_EQ);
+ std::vector<std::string> systemIgnorelists =
+ Args.getAllArgValues(OPT_fsanitize_system_ignorelist_EQ);
+ Opts.NoSanitizeFiles.insert(Opts.NoSanitizeFiles.end(),
+ systemIgnorelists.begin(),
+ systemIgnorelists.end());
if (Arg *A = Args.getLastArg(OPT_fclang_abi_compat_EQ)) {
Opts.setClangABICompat(LangOptions::ClangABI::Latest);
@@ -2651,6 +3978,8 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.setClangABICompat(LangOptions::ClangABI::Ver9);
else if (Major <= 11)
Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
+ else if (Major <= 12)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -2660,13 +3989,13 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_msign_return_address_EQ)) {
StringRef SignScope = A->getValue();
- if (SignScope.equals_lower("none"))
+ if (SignScope.equals_insensitive("none"))
Opts.setSignReturnAddressScope(
LangOptions::SignReturnAddressScopeKind::None);
- else if (SignScope.equals_lower("all"))
+ else if (SignScope.equals_insensitive("all"))
Opts.setSignReturnAddressScope(
LangOptions::SignReturnAddressScopeKind::All);
- else if (SignScope.equals_lower("non-leaf"))
+ else if (SignScope.equals_insensitive("non-leaf"))
Opts.setSignReturnAddressScope(
LangOptions::SignReturnAddressScopeKind::NonLeaf);
else
@@ -2676,10 +4005,10 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_msign_return_address_key_EQ)) {
StringRef SignKey = A->getValue();
if (!SignScope.empty() && !SignKey.empty()) {
- if (SignKey.equals_lower("a_key"))
+ if (SignKey.equals_insensitive("a_key"))
Opts.setSignReturnAddressKey(
LangOptions::SignReturnAddressKeyKind::AKey);
- else if (SignKey.equals_lower("b_key"))
+ else if (SignKey.equals_insensitive("b_key"))
Opts.setSignReturnAddressKey(
LangOptions::SignReturnAddressKeyKind::BKey);
else
@@ -2689,15 +4018,26 @@ void CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
}
}
- std::string ThreadModel =
- std::string(Args.getLastArgValue(OPT_mthread_model, "posix"));
- if (ThreadModel != "posix" && ThreadModel != "single")
- Diags.Report(diag::err_drv_invalid_value)
- << Args.getLastArg(OPT_mthread_model)->getAsString(Args) << ThreadModel;
- Opts.setThreadModel(
- llvm::StringSwitch<LangOptions::ThreadModelKind>(ThreadModel)
- .Case("posix", LangOptions::ThreadModelKind::POSIX)
- .Case("single", LangOptions::ThreadModelKind::Single));
+ // The value can be empty, which indicates the system default should be used.
+ StringRef CXXABI = Args.getLastArgValue(OPT_fcxx_abi_EQ);
+ if (!CXXABI.empty()) {
+ if (!TargetCXXABI::isABI(CXXABI)) {
+ Diags.Report(diag::err_invalid_cxx_abi) << CXXABI;
+ } else {
+ auto Kind = TargetCXXABI::getKind(CXXABI);
+ if (!TargetCXXABI::isSupportedCXXABI(T, Kind))
+ Diags.Report(diag::err_unsupported_cxx_abi) << CXXABI << T.str();
+ else
+ Opts.CXXABI = Kind;
+ }
+ }
+
+ Opts.RelativeCXXABIVTables =
+ Args.hasFlag(options::OPT_fexperimental_relative_cxx_abi_vtables,
+ options::OPT_fno_experimental_relative_cxx_abi_vtables,
+ TargetCXXABI::usesRelativeVTables(T));
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
@@ -2744,9 +4084,96 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
llvm_unreachable("invalid frontend action");
}
-static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
+static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA,
+ const LangOptions &LangOpts,
+ const FrontendOptions &FrontendOpts,
+ const CodeGenOptions &CodeGenOpts) {
+ PreprocessorOptions *PreprocessorOpts = &Opts;
+
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
+
+ if (Opts.PCHWithHdrStop && !Opts.PCHWithHdrStopCreate)
+ GenerateArg(Args, OPT_pch_through_hdrstop_use, SA);
+
+ for (const auto &D : Opts.DeserializedPCHDeclsToErrorOn)
+ GenerateArg(Args, OPT_error_on_deserialized_pch_decl, D, SA);
+
+ for (const auto &MP : Opts.MacroPrefixMap)
+ GenerateArg(Args, OPT_fmacro_prefix_map_EQ, MP.first + "=" + MP.second, SA);
+
+ if (Opts.PrecompiledPreambleBytes != std::make_pair(0u, false))
+ GenerateArg(Args, OPT_preamble_bytes_EQ,
+ Twine(Opts.PrecompiledPreambleBytes.first) + "," +
+ (Opts.PrecompiledPreambleBytes.second ? "1" : "0"),
+ SA);
+
+ for (const auto &M : Opts.Macros) {
+ // Don't generate __CET__ macro definitions. They are implied by the
+ // -fcf-protection option that is generated elsewhere.
+ if (M.first == "__CET__=1" && !M.second &&
+ !CodeGenOpts.CFProtectionReturn && CodeGenOpts.CFProtectionBranch)
+ continue;
+ if (M.first == "__CET__=2" && !M.second && CodeGenOpts.CFProtectionReturn &&
+ !CodeGenOpts.CFProtectionBranch)
+ continue;
+ if (M.first == "__CET__=3" && !M.second && CodeGenOpts.CFProtectionReturn &&
+ CodeGenOpts.CFProtectionBranch)
+ continue;
+
+ GenerateArg(Args, M.second ? OPT_U : OPT_D, M.first, SA);
+ }
+
+ for (const auto &I : Opts.Includes) {
+ // Don't generate OpenCL includes. They are implied by other flags that are
+ // generated elsewhere.
+ if (LangOpts.OpenCL && LangOpts.IncludeDefaultHeader &&
+ ((LangOpts.DeclareOpenCLBuiltins && I == "opencl-c-base.h") ||
+ I == "opencl-c.h"))
+ continue;
+
+ GenerateArg(Args, OPT_include, I, SA);
+ }
+
+ for (const auto &CI : Opts.ChainedIncludes)
+ GenerateArg(Args, OPT_chain_include, CI, SA);
+
+ for (const auto &RF : Opts.RemappedFiles)
+ GenerateArg(Args, OPT_remap_file, RF.first + ";" + RF.second, SA);
+
+ // Don't handle LexEditorPlaceholders. It is implied by the action that is
+ // generated elsewhere.
+}
+
+static bool ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags,
- frontend::ActionKind Action) {
+ frontend::ActionKind Action,
+ const FrontendOptions &FrontendOpts) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ PreprocessorOptions *PreprocessorOpts = &Opts;
+
+#define PREPROCESSOR_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef PREPROCESSOR_OPTION_WITH_MARSHALLING
+
Opts.PCHWithHdrStop = Args.hasArg(OPT_pch_through_hdrstop_create) ||
Args.hasArg(OPT_pch_through_hdrstop_use);
@@ -2817,21 +4244,94 @@ static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
// "editor placeholder in source file" error in PP only mode.
if (isStrictlyPreprocessorAction(Action))
Opts.LexEditorPlaceholders = false;
+
+ return Diags.getNumErrors() == NumErrorsBefore;
+}
+
+static void GeneratePreprocessorOutputArgs(
+ const PreprocessorOutputOptions &Opts, SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA, frontend::ActionKind Action) {
+ const PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
+
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
+
+ bool Generate_dM = isStrictlyPreprocessorAction(Action) && !Opts.ShowCPP;
+ if (Generate_dM)
+ GenerateArg(Args, OPT_dM, SA);
+ if (!Generate_dM && Opts.ShowMacros)
+ GenerateArg(Args, OPT_dD, SA);
}
-static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
- ArgList &Args,
+static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
+ ArgList &Args, DiagnosticsEngine &Diags,
frontend::ActionKind Action) {
- if (isStrictlyPreprocessorAction(Action))
- Opts.ShowCPP = !Args.hasArg(OPT_dM);
- else
- Opts.ShowCPP = 0;
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ PreprocessorOutputOptions &PreprocessorOutputOpts = Opts;
+
+#define PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef PREPROCESSOR_OUTPUT_OPTION_WITH_MARSHALLING
+ Opts.ShowCPP = isStrictlyPreprocessorAction(Action) && !Args.hasArg(OPT_dM);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
-static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
+static void GenerateTargetArgs(const TargetOptions &Opts,
+ SmallVectorImpl<const char *> &Args,
+ CompilerInvocation::StringAllocator SA) {
+ const TargetOptions *TargetOpts = &Opts;
+#define TARGET_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ GENERATE_OPTION_WITH_MARSHALLING( \
+ Args, SA, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef TARGET_OPTION_WITH_MARSHALLING
+
+ if (!Opts.SDKVersion.empty())
+ GenerateArg(Args, OPT_target_sdk_version_EQ, Opts.SDKVersion.getAsString(),
+ SA);
+}
+
+static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
DiagnosticsEngine &Diags) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
+
+ TargetOptions *TargetOpts = &Opts;
+
+#define TARGET_OPTION_WITH_MARSHALLING( \
+ PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
+ DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
+ MERGER, EXTRACTOR, TABLE_INDEX) \
+ PARSE_OPTION_WITH_MARSHALLING( \
+ Args, Diags, ID, FLAGS, PARAM, SHOULD_PARSE, KEYPATH, DEFAULT_VALUE, \
+ IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, MERGER, TABLE_INDEX)
+#include "clang/Driver/Options.inc"
+#undef TARGET_OPTION_WITH_MARSHALLING
+
if (Arg *A = Args.getLastArg(options::OPT_target_sdk_version_EQ)) {
llvm::VersionTuple Version;
if (Version.tryParse(A->getValue()))
@@ -2840,13 +4340,14 @@ static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
else
Opts.SDKVersion = Version;
}
+
+ return Diags.getNumErrors() == NumErrorsBefore;
}
-bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
- ArrayRef<const char *> CommandLineArgs,
- DiagnosticsEngine &Diags,
- const char *Argv0) {
- bool Success = true;
+bool CompilerInvocation::CreateFromArgsImpl(
+ CompilerInvocation &Res, ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0) {
+ unsigned NumErrorsBefore = Diags.getNumErrors();
// Parse the arguments.
const OptTable &Opts = getDriverOptTable();
@@ -2857,11 +4358,9 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
LangOptions &LangOpts = *Res.getLangOpts();
// Check for missing argument error.
- if (MissingArgCount) {
+ if (MissingArgCount)
Diags.Report(diag::err_drv_missing_argument)
<< Args.getArgString(MissingArgIndex) << MissingArgCount;
- Success = false;
- }
// Issue errors on unknown arguments.
for (const auto *A : Args.filtered(OPT_UNKNOWN)) {
@@ -2872,54 +4371,25 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
else
Diags.Report(diag::err_drv_unknown_argument_with_suggestion)
<< ArgString << Nearest;
- Success = false;
}
- Success &= Res.parseSimpleArgs(Args, Diags);
-
- Success &= ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
- ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args);
- if (!Res.getDependencyOutputOpts().OutputFile.empty() &&
- Res.getDependencyOutputOpts().Targets.empty()) {
- Diags.Report(diag::err_fe_dependency_file_requires_MT);
- Success = false;
- }
- Success &= ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
- /*DefaultDiagColor=*/false);
+ ParseFileSystemArgs(Res.getFileSystemOpts(), Args, Diags);
+ ParseMigratorArgs(Res.getMigratorOpts(), Args, Diags);
+ ParseAnalyzerArgs(*Res.getAnalyzerOpts(), Args, Diags);
+ ParseDiagnosticArgs(Res.getDiagnosticOpts(), Args, &Diags,
+ /*DefaultDiagColor=*/false);
+ ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags, LangOpts.IsHeaderFile);
// FIXME: We shouldn't have to pass the DashX option around here
- InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), Args, Diags,
- LangOpts.IsHeaderFile);
+ InputKind DashX = Res.getFrontendOpts().DashX;
ParseTargetArgs(Res.getTargetOpts(), Args, Diags);
llvm::Triple T(Res.getTargetOpts().Triple);
- ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args,
+ ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), Args, Diags,
Res.getFileSystemOpts().WorkingDir);
- if (DashX.getFormat() == InputKind::Precompiled ||
- DashX.getLanguage() == Language::LLVM_IR) {
- // ObjCAAutoRefCount and Sanitize LangOpts are used to setup the
- // PassManager in BackendUtil.cpp. They need to be initializd no matter
- // what the input type is.
- if (Args.hasArg(OPT_fobjc_arc))
- LangOpts.ObjCAutoRefCount = 1;
- // PIClevel and PIELevel are needed during code generation and this should be
- // set regardless of the input type.
- LangOpts.PICLevel = getLastArgIntValue(Args, OPT_pic_level, 0, Diags);
- LangOpts.PIE = Args.hasArg(OPT_pic_is_pie);
- parseSanitizerKinds("-fsanitize=", Args.getAllArgValues(OPT_fsanitize_EQ),
- Diags, LangOpts.Sanitize);
- } else {
- // Other LangOpts are only initialized when the input is not AST or LLVM IR.
- // FIXME: Should we really be calling this for an Language::Asm input?
- ParseLangArgs(LangOpts, Args, DashX, T, Res.getPreprocessorOpts().Includes,
- Diags);
- if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
- LangOpts.ObjCExceptions = 1;
- if (T.isOSDarwin() && DashX.isPreprocessed()) {
- // Supress the darwin-specific 'stdlibcxx-not-found' diagnostic for
- // preprocessed input as we don't expect it to be used with -std=libc++
- // anyway.
- Res.getDiagnosticOpts().Warnings.push_back("no-stdlibcxx-not-found");
- }
- }
+
+ ParseLangArgs(LangOpts, Args, DashX, T, Res.getPreprocessorOpts().Includes,
+ Diags);
+ if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
+ LangOpts.ObjCExceptions = 1;
if (LangOpts.CUDA) {
// During CUDA device-side compilation, the aux triple is the
@@ -2932,8 +4402,8 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
if (LangOpts.OpenMPIsDevice)
Res.getTargetOpts().HostTriple = Res.getFrontendOpts().AuxTriple;
- Success &= ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, T,
- Res.getFrontendOpts().OutputFile, LangOpts);
+ ParseCodeGenArgs(Res.getCodeGenOpts(), Args, DashX, Diags, T,
+ Res.getFrontendOpts().OutputFile, LangOpts);
// FIXME: Override value name discarding when asan or msan is used because the
// backend passes depend on the name of the alloca in order to print out
@@ -2945,13 +4415,17 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
!LangOpts.Sanitize.has(SanitizerKind::KernelMemory);
ParsePreprocessorArgs(Res.getPreprocessorOpts(), Args, Diags,
- Res.getFrontendOpts().ProgramAction);
- ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), Args,
+ Res.getFrontendOpts().ProgramAction,
+ Res.getFrontendOpts());
+ ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), Args, Diags,
Res.getFrontendOpts().ProgramAction);
- // Turn on -Wspir-compat for SPIR target.
- if (T.isSPIR())
- Res.getDiagnosticOpts().Warnings.push_back("spir-compat");
+ ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), Args, Diags,
+ Res.getFrontendOpts().ProgramAction,
+ Res.getPreprocessorOutputOpts().ShowLineMarkers);
+ if (!Res.getDependencyOutputOpts().OutputFile.empty() &&
+ Res.getDependencyOutputOpts().Targets.empty())
+ Diags.Report(diag::err_fe_dependency_file_requires_MT);
// If sanitizer is enabled, disable OPT_ffine_grained_bitfield_accesses.
if (Res.getCodeGenOpts().FineGrainedBitfieldAccesses &&
@@ -2966,7 +4440,23 @@ bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
FixupInvocation(Res, Diags, Args, DashX);
- return Success;
+ return Diags.getNumErrors() == NumErrorsBefore;
+}
+
+bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Invocation,
+ ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags,
+ const char *Argv0) {
+ CompilerInvocation DummyInvocation;
+
+ return RoundTrip(
+ [](CompilerInvocation &Invocation, ArrayRef<const char *> CommandLineArgs,
+ DiagnosticsEngine &Diags, const char *Argv0) {
+ return CreateFromArgsImpl(Invocation, CommandLineArgs, Diags, Argv0);
+ },
+ [](CompilerInvocation &Invocation, SmallVectorImpl<const char *> &Args,
+ StringAllocator SA) { Invocation.generateCC1CommandLine(Args, SA); },
+ Invocation, DummyInvocation, CommandLineArgs, Diags, Argv0);
}
std::string CompilerInvocation::getModuleHash() const {
@@ -3079,51 +4569,28 @@ std::string CompilerInvocation::getModuleHash() const {
if (!SanHash.empty())
code = hash_combine(code, SanHash.Mask);
- return llvm::APInt(64, code).toString(36, /*Signed=*/false);
+ return toString(llvm::APInt(64, code), 36, /*Signed=*/false);
}
void CompilerInvocation::generateCC1CommandLine(
SmallVectorImpl<const char *> &Args, StringAllocator SA) const {
- // Capture the extracted value as a lambda argument to avoid potential issues
- // with lifetime extension of the reference.
-#define GENERATE_OPTION_WITH_MARSHALLING( \
- ARGS, STRING_ALLOCATOR, KIND, FLAGS, SPELLING, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, EXTRACTOR, \
- TABLE_INDEX) \
- if ((FLAGS)&options::CC1Option) { \
- [&](const auto &Extracted) { \
- if (ALWAYS_EMIT || \
- (Extracted != \
- static_cast<decltype(KEYPATH)>((IMPLIED_CHECK) ? (IMPLIED_VALUE) \
- : (DEFAULT_VALUE)))) \
- DENORMALIZER(ARGS, SPELLING, STRING_ALLOCATOR, Option::KIND##Class, \
- TABLE_INDEX, Extracted); \
- }(EXTRACTOR(KEYPATH)); \
- }
-
-#define OPTION_WITH_MARSHALLING( \
- PREFIX_TYPE, NAME, ID, KIND, GROUP, ALIAS, ALIASARGS, FLAGS, PARAM, \
- HELPTEXT, METAVAR, VALUES, SPELLING, SHOULD_PARSE, ALWAYS_EMIT, KEYPATH, \
- DEFAULT_VALUE, IMPLIED_CHECK, IMPLIED_VALUE, NORMALIZER, DENORMALIZER, \
- MERGER, EXTRACTOR, TABLE_INDEX) \
- GENERATE_OPTION_WITH_MARSHALLING(Args, SA, KIND, FLAGS, SPELLING, \
- ALWAYS_EMIT, this->KEYPATH, DEFAULT_VALUE, \
- IMPLIED_CHECK, IMPLIED_VALUE, DENORMALIZER, \
- EXTRACTOR, TABLE_INDEX)
-
-#define DIAG_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
-#define LANG_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
-#define CODEGEN_OPTION_WITH_MARSHALLING OPTION_WITH_MARSHALLING
-
-#include "clang/Driver/Options.inc"
-
-#undef CODEGEN_OPTION_WITH_MARSHALLING
-#undef LANG_OPTION_WITH_MARSHALLING
-#undef DIAG_OPTION_WITH_MARSHALLING
-#undef OPTION_WITH_MARSHALLING
-#undef GENERATE_OPTION_WITH_MARSHALLING
-
- GenerateLangArgs(*LangOpts, Args, SA);
+ llvm::Triple T(TargetOpts->Triple);
+
+ GenerateFileSystemArgs(FileSystemOpts, Args, SA);
+ GenerateMigratorArgs(MigratorOpts, Args, SA);
+ GenerateAnalyzerArgs(*AnalyzerOpts, Args, SA);
+ GenerateDiagnosticArgs(*DiagnosticOpts, Args, SA, false);
+ GenerateFrontendArgs(FrontendOpts, Args, SA, LangOpts->IsHeaderFile);
+ GenerateTargetArgs(*TargetOpts, Args, SA);
+ GenerateHeaderSearchArgs(*HeaderSearchOpts, Args, SA);
+ GenerateLangArgs(*LangOpts, Args, SA, T, FrontendOpts.DashX);
+ GenerateCodeGenArgs(CodeGenOpts, Args, SA, T, FrontendOpts.OutputFile,
+ &*LangOpts);
+ GeneratePreprocessorArgs(*PreprocessorOpts, Args, SA, *LangOpts, FrontendOpts,
+ CodeGenOpts);
+ GeneratePreprocessorOutputArgs(PreprocessorOutputOpts, Args, SA,
+ FrontendOpts.ProgramAction);
+ GenerateDependencyOutputArgs(DependencyOutputOpts, Args, SA);
}
IntrusiveRefCntPtr<llvm::vfs::FileSystem>
diff --git a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index ff0aa6faf33f..2e23ebfdf160 100644
--- a/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -10,15 +10,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/Utils.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
-#include "clang/Driver/Action.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Tool.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Host.h"
using namespace clang;
@@ -37,7 +39,10 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
SmallVector<const char *, 16> Args(ArgList.begin(), ArgList.end());
// FIXME: Find a cleaner way to force the driver into restricted modes.
- Args.push_back("-fsyntax-only");
+ Args.insert(
+ llvm::find_if(
+ Args, [](const char *Elem) { return llvm::StringRef(Elem) == "--"; }),
+ "-fsyntax-only");
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(), *Diags,
diff --git a/clang/lib/Frontend/DependencyFile.cpp b/clang/lib/Frontend/DependencyFile.cpp
index fe8ab7197400..288827374106 100644
--- a/clang/lib/Frontend/DependencyFile.cpp
+++ b/clang/lib/Frontend/DependencyFile.cpp
@@ -141,7 +141,18 @@ void DependencyCollector::maybeAddDependency(StringRef Filename,
}
bool DependencyCollector::addDependency(StringRef Filename) {
- if (Seen.insert(Filename).second) {
+ StringRef SearchPath;
+#ifdef _WIN32
+ // Make the search insensitive to case and separators.
+ llvm::SmallString<256> TmpPath = Filename;
+ llvm::sys::path::native(TmpPath);
+ std::transform(TmpPath.begin(), TmpPath.end(), TmpPath.begin(), ::tolower);
+ SearchPath = TmpPath.str();
+#else
+ SearchPath = Filename;
+#endif
+
+ if (Seen.insert(SearchPath).second) {
Dependencies.push_back(std::string(Filename));
return true;
}
@@ -182,7 +193,7 @@ DependencyFileGenerator::DependencyFileGenerator(
IncludeModuleFiles(Opts.IncludeModuleFiles),
OutputFormat(Opts.OutputFormat), InputFileIndex(0) {
for (const auto &ExtraDep : Opts.ExtraDeps) {
- if (addDependency(ExtraDep))
+ if (addDependency(ExtraDep.first))
++InputFileIndex;
}
}
@@ -307,7 +318,7 @@ void DependencyFileGenerator::outputDependencyFile(DiagnosticsEngine &Diags) {
}
std::error_code EC;
- llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
Diags.Report(diag::err_fe_error_opening) << OutputFile << EC.message();
return;
diff --git a/clang/lib/Frontend/DependencyGraph.cpp b/clang/lib/Frontend/DependencyGraph.cpp
index 8a6e491def45..4bed4e2d4403 100644
--- a/clang/lib/Frontend/DependencyGraph.cpp
+++ b/clang/lib/Frontend/DependencyGraph.cpp
@@ -100,7 +100,7 @@ DependencyGraphCallback::writeNodeReference(raw_ostream &OS,
void DependencyGraphCallback::OutputGraphFile() {
std::error_code EC;
- llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
PP->getDiagnostics().Report(diag::err_fe_error_opening) << OutputFile
<< EC.message();
diff --git a/clang/lib/Frontend/DiagnosticRenderer.cpp b/clang/lib/Frontend/DiagnosticRenderer.cpp
index 22b957988f46..0afc8f3b1dab 100644
--- a/clang/lib/Frontend/DiagnosticRenderer.cpp
+++ b/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -394,6 +394,13 @@ mapDiagnosticRanges(FullSourceLoc CaretLoc, ArrayRef<CharSourceRange> Ranges,
}
}
+ // There is a chance that begin or end is invalid here, for example if
+ // specific compile error is reported.
+ // It is possible that the FileID's do not match, if one comes from an
+ // included file. In this case we can not produce a meaningful source range.
+ if (Begin.isInvalid() || End.isInvalid() || BeginFileID != EndFileID)
+ continue;
+
// Do the backtracking.
SmallVector<FileID, 4> CommonArgExpansions;
computeCommonMacroArgExpansionFileIDs(Begin, End, SM, CommonArgExpansions);
diff --git a/clang/lib/Frontend/FrontendAction.cpp b/clang/lib/Frontend/FrontendAction.cpp
index 11b25b106627..c996c9c486bc 100644
--- a/clang/lib/Frontend/FrontendAction.cpp
+++ b/clang/lib/Frontend/FrontendAction.cpp
@@ -342,7 +342,8 @@ static std::error_code collectModuleHeaderIncludes(
// file relative to the module build directory (the directory containing
// the module map file) so this will find the same file that we found
// while parsing the module map.
- addHeaderInclude(H.NameAsWritten, Includes, LangOpts, Module->IsExternC);
+ addHeaderInclude(H.PathRelativeToRootModuleDirectory, Includes, LangOpts,
+ Module->IsExternC);
}
}
// Note that Module->PrivateHeaders will not be a TopHeader.
@@ -351,8 +352,8 @@ static std::error_code collectModuleHeaderIncludes(
Module->addTopHeader(UmbrellaHeader.Entry);
if (Module->Parent)
// Include the umbrella header for submodules.
- addHeaderInclude(UmbrellaHeader.NameAsWritten, Includes, LangOpts,
- Module->IsExternC);
+ addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ Includes, LangOpts, Module->IsExternC);
} else if (Module::DirectoryName UmbrellaDir = Module->getUmbrellaDir()) {
// Add all of the headers we find in this subdirectory.
std::error_code EC;
@@ -386,7 +387,8 @@ static std::error_code collectModuleHeaderIncludes(
auto PathIt = llvm::sys::path::rbegin(Dir->path());
for (int I = 0; I != Dir.level() + 1; ++I, ++PathIt)
Components.push_back(*PathIt);
- SmallString<128> RelativeHeader(UmbrellaDir.NameAsWritten);
+ SmallString<128> RelativeHeader(
+ UmbrellaDir.PathRelativeToRootModuleDirectory);
for (auto It = Components.rbegin(), End = Components.rend(); It != End;
++It)
llvm::sys::path::append(RelativeHeader, *It);
@@ -470,7 +472,7 @@ static Module *prepareToBuildModule(CompilerInstance &CI,
// Dig out the module definition.
HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
Module *M = HS.lookupModule(CI.getLangOpts().CurrentModule,
- /*AllowSearch=*/false);
+ /*AllowSearch=*/true);
if (!M) {
CI.getDiagnostics().Report(diag::err_missing_module)
<< CI.getLangOpts().CurrentModule << ModuleMapFilename;
@@ -528,8 +530,8 @@ getInputBufferForModule(CompilerInstance &CI, Module *M) {
SmallString<256> HeaderContents;
std::error_code Err = std::error_code();
if (Module::Header UmbrellaHeader = M->getUmbrellaHeader())
- addHeaderInclude(UmbrellaHeader.NameAsWritten, HeaderContents,
- CI.getLangOpts(), M->IsExternC);
+ addHeaderInclude(UmbrellaHeader.PathRelativeToRootModuleDirectory,
+ HeaderContents, CI.getLangOpts(), M->IsExternC);
Err = collectModuleHeaderIncludes(
CI.getLangOpts(), FileMgr, CI.getDiagnostics(),
CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(), M,
@@ -1087,6 +1089,7 @@ bool WrapperFrontendAction::BeginSourceFileAction(CompilerInstance &CI) {
void WrapperFrontendAction::ExecuteAction() {
WrappedAction->ExecuteAction();
}
+void WrapperFrontendAction::EndSourceFile() { WrappedAction->EndSourceFile(); }
void WrapperFrontendAction::EndSourceFileAction() {
WrappedAction->EndSourceFileAction();
}
diff --git a/clang/lib/Frontend/FrontendActions.cpp b/clang/lib/Frontend/FrontendActions.cpp
index 060cec23acc4..c6ebbdc8c04e 100644
--- a/clang/lib/Frontend/FrontendActions.cpp
+++ b/clang/lib/Frontend/FrontendActions.cpp
@@ -62,6 +62,27 @@ InitOnlyAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
void InitOnlyAction::ExecuteAction() {
}
+// Basically PreprocessOnlyAction::ExecuteAction.
+void ReadPCHAndPreprocessAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+
+ // Ignore unknown pragmas.
+ PP.IgnorePragmas();
+
+ Token Tok;
+ // Start parsing the specified input file.
+ PP.EnterMainSourceFile();
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+}
+
+std::unique_ptr<ASTConsumer>
+ReadPCHAndPreprocessAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return std::make_unique<ASTConsumer>();
+}
+
//===----------------------------------------------------------------------===//
// AST Consumer Actions
//===----------------------------------------------------------------------===//
@@ -218,7 +239,8 @@ GenerateModuleFromModuleMapAction::CreateOutputFile(CompilerInstance &CI,
// Because this is exposed via libclang we must disable RemoveFileOnSignal.
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, /*Extension=*/"",
/*RemoveFileOnSignal=*/false,
- /*CreateMissingDirectories=*/true);
+ /*CreateMissingDirectories=*/true,
+ /*ForceUseTemporary=*/true);
}
bool GenerateModuleInterfaceAction::BeginSourceFileAction(
@@ -297,7 +319,8 @@ bool GenerateHeaderModuleAction::BeginSourceFileAction(
<< Name;
continue;
}
- Headers.push_back({std::string(Name), *FE});
+ Headers.push_back(
+ {std::string(Name), std::string(Name), &FE->getFileEntry()});
}
HS.getModuleMap().createHeaderModule(CI.getLangOpts().CurrentModule, Headers);
@@ -722,7 +745,7 @@ void DumpModuleInfoAction::ExecuteAction() {
if (!OutputFileName.empty() && OutputFileName != "-") {
std::error_code EC;
OutFile.reset(new llvm::raw_fd_ostream(OutputFileName.str(), EC,
- llvm::sys::fs::OF_Text));
+ llvm::sys::fs::OF_TextWithCRLF));
}
llvm::raw_ostream &Out = OutFile.get()? *OutFile.get() : llvm::outs();
@@ -795,7 +818,7 @@ void PreprocessOnlyAction::ExecuteAction() {
void PrintPreprocessedAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
// Output file may need to be set to 'Binary', to avoid converting Unix style
- // line feeds (<LF>) to Microsoft style line feeds (<CR><LF>).
+ // line feeds (<LF>) to Microsoft style line feeds (<CR><LF>) on Windows.
//
// Look to see what type of line endings the file uses. If there's a
// CRLF, then we won't open the file up in binary mode. If there is
@@ -807,30 +830,35 @@ void PrintPreprocessedAction::ExecuteAction() {
// all of their source code on a single line. However, that is still a
// concern, so if we scan for too long, we'll just assume the file should
// be opened in binary mode.
- bool BinaryMode = true;
- const SourceManager& SM = CI.getSourceManager();
- if (llvm::Optional<llvm::MemoryBufferRef> Buffer =
- SM.getBufferOrNone(SM.getMainFileID())) {
- const char *cur = Buffer->getBufferStart();
- const char *end = Buffer->getBufferEnd();
- const char *next = (cur != end) ? cur + 1 : end;
-
- // Limit ourselves to only scanning 256 characters into the source
- // file. This is mostly a sanity check in case the file has no
- // newlines whatsoever.
- if (end - cur > 256) end = cur + 256;
-
- while (next < end) {
- if (*cur == 0x0D) { // CR
- if (*next == 0x0A) // CRLF
- BinaryMode = false;
-
- break;
- } else if (*cur == 0x0A) // LF
- break;
-
- ++cur;
- ++next;
+
+ bool BinaryMode = false;
+ if (llvm::Triple(LLVM_HOST_TRIPLE).isOSWindows()) {
+ BinaryMode = true;
+ const SourceManager &SM = CI.getSourceManager();
+ if (llvm::Optional<llvm::MemoryBufferRef> Buffer =
+ SM.getBufferOrNone(SM.getMainFileID())) {
+ const char *cur = Buffer->getBufferStart();
+ const char *end = Buffer->getBufferEnd();
+ const char *next = (cur != end) ? cur + 1 : end;
+
+ // Limit ourselves to only scanning 256 characters into the source
+ // file. This is mostly a sanity check in case the file has no
+ // newlines whatsoever.
+ if (end - cur > 256)
+ end = cur + 256;
+
+ while (next < end) {
+ if (*cur == 0x0D) { // CR
+ if (*next == 0x0A) // CRLF
+ BinaryMode = false;
+
+ break;
+ } else if (*cur == 0x0A) // LF
+ break;
+
+ ++cur;
+ ++next;
+ }
}
}
@@ -862,6 +890,7 @@ void PrintPreambleAction::ExecuteAction() {
case Language::ObjC:
case Language::ObjCXX:
case Language::OpenCL:
+ case Language::OpenCLCXX:
case Language::CUDA:
case Language::HIP:
break;
diff --git a/clang/lib/Frontend/FrontendOptions.cpp b/clang/lib/Frontend/FrontendOptions.cpp
index 4ea13cf0784f..37ac428a8003 100644
--- a/clang/lib/Frontend/FrontendOptions.cpp
+++ b/clang/lib/Frontend/FrontendOptions.cpp
@@ -29,6 +29,7 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Case("cppm", Language::CXX)
.Case("iim", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
+ .Case("clcpp", Language::OpenCLCXX)
.Cases("cu", "cuh", Language::CUDA)
.Case("hip", Language::HIP)
.Cases("ll", "bc", Language::LLVM_IR)
diff --git a/clang/lib/Frontend/HeaderIncludeGen.cpp b/clang/lib/Frontend/HeaderIncludeGen.cpp
index 97fac8a26fae..1ee47d8d2480 100644
--- a/clang/lib/Frontend/HeaderIncludeGen.cpp
+++ b/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -45,6 +45,9 @@ public:
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
FileID PrevFID) override;
+
+ void FileSkipped(const FileEntryRef &SkippedFile, const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) override;
};
}
@@ -101,7 +104,7 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
std::error_code EC;
llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
OutputPath.str(), EC,
- llvm::sys::fs::OF_Append | llvm::sys::fs::OF_Text);
+ llvm::sys::fs::OF_Append | llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
PP.getDiagnostics().Report(clang::diag::warn_fe_cc_print_header_failure)
<< EC.message();
@@ -119,7 +122,7 @@ void clang::AttachHeaderIncludeGen(Preprocessor &PP,
// as sanitizer blacklists. It's only important for cl.exe compatibility,
// the GNU way to generate rules is -M / -MM / -MD / -MMD.
for (const auto &Header : DepOpts.ExtraDeps)
- PrintHeaderInfo(OutputFile, Header, ShowDepth, 2, MSStyle);
+ PrintHeaderInfo(OutputFile, Header.first, ShowDepth, 2, MSStyle);
PP.addPPCallbacks(std::make_unique<HeaderIncludesCallback>(
&PP, ShowAllHeaders, OutputFile, DepOpts, OwnsOutputFile, ShowDepth,
MSStyle));
@@ -181,3 +184,16 @@ void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
MSStyle);
}
}
+
+void HeaderIncludesCallback::FileSkipped(const FileEntryRef &SkippedFile, const
+ Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) {
+ if (!DepOpts.ShowSkippedHeaderIncludes)
+ return;
+
+ if (!DepOpts.IncludeSystemHeaders && isSystem(FileType))
+ return;
+
+ PrintHeaderInfo(OutputFile, SkippedFile.getName(), ShowDepth,
+ CurrentIncludeDepth + 1, MSStyle);
+}
diff --git a/clang/lib/Frontend/InitHeaderSearch.cpp b/clang/lib/Frontend/InitHeaderSearch.cpp
index bc31445d6d08..ba9f96384f81 100644
--- a/clang/lib/Frontend/InitHeaderSearch.cpp
+++ b/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -32,14 +32,20 @@ using namespace clang;
using namespace clang::frontend;
namespace {
+/// Holds information about a single DirectoryLookup object.
+struct DirectoryLookupInfo {
+ IncludeDirGroup Group;
+ DirectoryLookup Lookup;
+
+ DirectoryLookupInfo(IncludeDirGroup Group, DirectoryLookup Lookup)
+ : Group(Group), Lookup(Lookup) {}
+};
/// InitHeaderSearch - This class makes it easier to set the search paths of
/// a HeaderSearch object. InitHeaderSearch stores several search path lists
/// internally, which can be sent to a HeaderSearch object in one swoop.
class InitHeaderSearch {
- std::vector<std::pair<IncludeDirGroup, DirectoryLookup> > IncludePath;
- typedef std::vector<std::pair<IncludeDirGroup,
- DirectoryLookup> >::const_iterator path_iterator;
+ std::vector<DirectoryLookupInfo> IncludePath;
std::vector<std::pair<std::string, bool> > SystemHeaderPrefixes;
HeaderSearch &Headers;
bool Verbose;
@@ -154,8 +160,7 @@ bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
// If the directory exists, add it.
if (auto DE = FM.getOptionalDirectoryRef(MappedPathStr)) {
- IncludePath.push_back(
- std::make_pair(Group, DirectoryLookup(*DE, Type, isFramework)));
+ IncludePath.emplace_back(Group, DirectoryLookup(*DE, Type, isFramework));
return true;
}
@@ -165,9 +170,8 @@ bool InitHeaderSearch::AddUnmappedPath(const Twine &Path, IncludeDirGroup Group,
if (auto FE = FM.getFile(MappedPathStr)) {
if (const HeaderMap *HM = Headers.CreateHeaderMap(*FE)) {
// It is a headermap, add it to the search path.
- IncludePath.push_back(
- std::make_pair(Group,
- DirectoryLookup(HM, Type, Group == IndexHeaderMap)));
+ IncludePath.emplace_back(
+ Group, DirectoryLookup(HM, Type, Group == IndexHeaderMap));
return true;
}
}
@@ -558,32 +562,32 @@ void InitHeaderSearch::Realize(const LangOptions &Lang) {
// Quoted arguments go first.
for (auto &Include : IncludePath)
- if (Include.first == Quoted)
- SearchList.push_back(Include.second);
+ if (Include.Group == Quoted)
+ SearchList.push_back(Include.Lookup);
// Deduplicate and remember index.
RemoveDuplicates(SearchList, 0, Verbose);
unsigned NumQuoted = SearchList.size();
for (auto &Include : IncludePath)
- if (Include.first == Angled || Include.first == IndexHeaderMap)
- SearchList.push_back(Include.second);
+ if (Include.Group == Angled || Include.Group == IndexHeaderMap)
+ SearchList.push_back(Include.Lookup);
RemoveDuplicates(SearchList, NumQuoted, Verbose);
unsigned NumAngled = SearchList.size();
for (auto &Include : IncludePath)
- if (Include.first == System || Include.first == ExternCSystem ||
- (!Lang.ObjC && !Lang.CPlusPlus && Include.first == CSystem) ||
+ if (Include.Group == System || Include.Group == ExternCSystem ||
+ (!Lang.ObjC && !Lang.CPlusPlus && Include.Group == CSystem) ||
(/*FIXME !Lang.ObjC && */ Lang.CPlusPlus &&
- Include.first == CXXSystem) ||
- (Lang.ObjC && !Lang.CPlusPlus && Include.first == ObjCSystem) ||
- (Lang.ObjC && Lang.CPlusPlus && Include.first == ObjCXXSystem))
- SearchList.push_back(Include.second);
+ Include.Group == CXXSystem) ||
+ (Lang.ObjC && !Lang.CPlusPlus && Include.Group == ObjCSystem) ||
+ (Lang.ObjC && Lang.CPlusPlus && Include.Group == ObjCXXSystem))
+ SearchList.push_back(Include.Lookup);
for (auto &Include : IncludePath)
- if (Include.first == After)
- SearchList.push_back(Include.second);
+ if (Include.Group == After)
+ SearchList.push_back(Include.Lookup);
// Remove duplicates across both the Angled and System directories. GCC does
// this and failing to remove duplicates across these two groups breaks
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index d47ad1b74649..bca0bb4ada67 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -168,7 +168,7 @@ static void DefineTypeSize(const Twine &MacroName, unsigned TypeWidth,
MacroBuilder &Builder) {
llvm::APInt MaxVal = isSigned ? llvm::APInt::getSignedMaxValue(TypeWidth)
: llvm::APInt::getMaxValue(TypeWidth);
- Builder.defineMacro(MacroName, MaxVal.toString(10, isSigned) + ValSuffix);
+ Builder.defineMacro(MacroName, toString(MaxVal, 10, isSigned) + ValSuffix);
}
/// DefineTypeSize - An overloaded helper that uses TargetInfo to determine
@@ -216,6 +216,11 @@ static void DefineExactWidthIntType(TargetInfo::IntType Ty,
if (TypeWidth == 64)
Ty = IsSigned ? TI.getInt64Type() : TI.getUInt64Type();
+ // Use the target specified int16 type when appropriate. Some MCU targets
+ // (such as AVR) have definition of [u]int16_t to [un]signed int.
+ if (TypeWidth == 16)
+ Ty = IsSigned ? TI.getInt16Type() : TI.getUInt16Type();
+
const char *Prefix = IsSigned ? "__INT" : "__UINT";
DefineType(Prefix + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
@@ -474,10 +479,12 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__FAST_RELAXED_MATH__");
}
- if (LangOpts.SYCL) {
+ if (LangOpts.SYCLIsDevice || LangOpts.SYCLIsHost) {
// SYCL Version is set to a value when building SYCL applications
if (LangOpts.getSYCLVersion() == LangOptions::SYCL_2017)
Builder.defineMacro("CL_SYCL_LANGUAGE_VERSION", "121");
+ else if (LangOpts.getSYCLVersion() == LangOptions::SYCL_2020)
+ Builder.defineMacro("SYCL_LANGUAGE_VERSION", "202001");
}
// Not "standard" per se, but available even with the -undef flag.
@@ -565,7 +572,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_aggregate_bases", "201603L");
Builder.defineMacro("__cpp_structured_bindings", "201606L");
Builder.defineMacro("__cpp_nontype_template_args",
- LangOpts.CPlusPlus20 ? "201911L" : "201411L");
+ "201411L"); // (not latest)
Builder.defineMacro("__cpp_fold_expressions", "201603L");
Builder.defineMacro("__cpp_guaranteed_copy_elision", "201606L");
Builder.defineMacro("__cpp_nontype_template_parameter_auto", "201606L");
@@ -587,7 +594,12 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_designated_initializers", "201707L");
Builder.defineMacro("__cpp_impl_three_way_comparison", "201907L");
//Builder.defineMacro("__cpp_modules", "201907L");
- //Builder.defineMacro("__cpp_using_enum", "201907L");
+ Builder.defineMacro("__cpp_using_enum", "201907L");
+ }
+ // C++2b features.
+ if (LangOpts.CPlusPlus2b) {
+ Builder.defineMacro("__cpp_implicit_move", "202011L");
+ Builder.defineMacro("__cpp_size_t_suffix", "202011L");
}
if (LangOpts.Char8)
Builder.defineMacro("__cpp_char8_t", "201811L");
@@ -598,6 +610,29 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_coroutines", "201703L");
}
+/// InitializeOpenCLFeatureTestMacros - Define OpenCL macros based on target
+/// settings and language version
+void InitializeOpenCLFeatureTestMacros(const TargetInfo &TI,
+ const LangOptions &Opts,
+ MacroBuilder &Builder) {
+ const llvm::StringMap<bool> &OpenCLFeaturesMap = TI.getSupportedOpenCLOpts();
+ // FIXME: OpenCL options which affect language semantics/syntax
+ // should be moved into LangOptions.
+ auto defineOpenCLExtMacro = [&](llvm::StringRef Name, auto... OptArgs) {
+ // Check if extension is supported by target and is available in this
+ // OpenCL version
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, Name) &&
+ OpenCLOptions::isOpenCLOptionAvailableIn(Opts, OptArgs...))
+ Builder.defineMacro(Name);
+ };
+#define OPENCL_GENERIC_EXTENSION(Ext, ...) \
+ defineOpenCLExtMacro(#Ext, __VA_ARGS__);
+#include "clang/Basic/OpenCLExtensions.def"
+
+ // Assume compiling for FULL profile
+ Builder.defineMacro("__opencl_c_int64");
+}
+
static void InitializePredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
@@ -775,6 +810,21 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
}
}
+ // Macros to help identify the narrow and wide character sets
+ // FIXME: clang currently ignores -fexec-charset=. If this changes,
+ // then this may need to be updated.
+ Builder.defineMacro("__clang_literal_encoding__", "\"UTF-8\"");
+ if (TI.getTypeWidth(TI.getWCharType()) >= 32) {
+ // FIXME: 32-bit wchar_t signals UTF-32. This may change
+ // if -fwide-exec-charset= is ever supported.
+ Builder.defineMacro("__clang_wide_literal_encoding__", "\"UTF-32\"");
+ } else {
+ // FIXME: Less-than 32-bit wchar_t generally means UTF-16
+ // (e.g., Windows, 32-bit IBM). This may need to be
+ // updated if -fwide-exec-charset= is ever supported.
+ Builder.defineMacro("__clang_wide_literal_encoding__", "\"UTF-16\"");
+ }
+
if (LangOpts.Optimize)
Builder.defineMacro("__OPTIMIZE__");
if (LangOpts.OptimizeSize)
@@ -967,8 +1017,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DefineFastIntType(64, true, TI, Builder);
DefineFastIntType(64, false, TI, Builder);
- char UserLabelPrefix[2] = {TI.getDataLayout().getGlobalPrefix(), 0};
- Builder.defineMacro("__USER_LABEL_PREFIX__", UserLabelPrefix);
+ Builder.defineMacro("__USER_LABEL_PREFIX__", TI.getUserLabelPrefix());
if (LangOpts.FastMath || LangOpts.FiniteMathOnly)
Builder.defineMacro("__FINITE_MATH_ONLY__", "1");
@@ -1120,7 +1169,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// OpenCL definitions.
if (LangOpts.OpenCL) {
- TI.getOpenCLFeatureDefines(LangOpts, Builder);
+ InitializeOpenCLFeatureTestMacros(TI, LangOpts, Builder);
if (TI.getTriple().isSPIR())
Builder.defineMacro("__IMAGE_SUPPORT__");
diff --git a/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp b/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
index b7c1e693413b..d58f5bb09199 100644
--- a/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
+++ b/clang/lib/Frontend/InterfaceStubFunctionsConsumer.cpp
@@ -290,11 +290,8 @@ public:
const ASTContext &context, StringRef Format,
raw_ostream &OS) -> void {
OS << "--- !" << Format << "\n";
- OS << "IfsVersion: 2.0\n";
- OS << "Triple: " << T.str() << "\n";
- OS << "ObjectFileFormat: "
- << "ELF"
- << "\n"; // TODO: For now, just ELF.
+ OS << "IfsVersion: 3.0\n";
+ OS << "Target: " << T.str() << "\n";
OS << "Symbols:\n";
for (const auto &E : Symbols) {
const MangledSymbol &Symbol = E.second;
@@ -330,7 +327,7 @@ public:
OS.flush();
};
- assert(Format == "experimental-ifs-v2" && "Unexpected IFS Format.");
+ assert(Format == "ifs-v1" && "Unexpected IFS Format.");
writeIfsV1(Instance.getTarget().getTriple(), Symbols, context, Format, *OS);
}
};
@@ -339,6 +336,5 @@ public:
std::unique_ptr<ASTConsumer>
GenerateInterfaceStubsAction::CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) {
- return std::make_unique<InterfaceStubFunctionsConsumer>(
- CI, InFile, "experimental-ifs-v2");
+ return std::make_unique<InterfaceStubFunctionsConsumer>(CI, InFile, "ifs-v1");
}
diff --git a/clang/lib/Frontend/ModuleDependencyCollector.cpp b/clang/lib/Frontend/ModuleDependencyCollector.cpp
index 2e4e64f827b0..4301e49f1d80 100644
--- a/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -148,7 +148,7 @@ void ModuleDependencyCollector::writeFileMap() {
std::error_code EC;
SmallString<256> YAMLPath = VFSDir;
llvm::sys::path::append(YAMLPath, "vfs.yaml");
- llvm::raw_fd_ostream OS(YAMLPath, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream OS(YAMLPath, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
HasErrors = true;
return;
diff --git a/clang/lib/Frontend/PrecompiledPreamble.cpp b/clang/lib/Frontend/PrecompiledPreamble.cpp
index 77b93713ce68..af82ab3f5558 100644
--- a/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -365,17 +365,9 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
Clang->setDiagnostics(&Diagnostics);
// Create the target instance.
- Clang->setTarget(TargetInfo::CreateTargetInfo(
- Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
- if (!Clang->hasTarget())
+ if (!Clang->createTarget())
return BuildPreambleError::CouldntCreateTargetInfo;
- // Inform the target of the language options.
- //
- // FIXME: We shouldn't need to do this, the target should be immutable once
- // created. This complexity should be lifted elsewhere.
- Clang->getTarget().adjust(Clang->getLangOpts());
-
if (Clang->getFrontendOpts().Inputs.size() != 1 ||
Clang->getFrontendOpts().Inputs[0].getKind().getFormat() !=
InputKind::Source ||
diff --git a/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 24ea1ccba207..b7259569595d 100644
--- a/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -95,14 +95,20 @@ private:
bool DumpIncludeDirectives;
bool UseLineDirectives;
bool IsFirstFileEntered;
+ bool MinimizeWhitespace;
+
+ Token PrevTok;
+ Token PrevPrevTok;
+
public:
PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers,
bool defines, bool DumpIncludeDirectives,
- bool UseLineDirectives)
+ bool UseLineDirectives, bool MinimizeWhitespace)
: PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
DisableLineMarkers(lineMarkers), DumpDefines(defines),
DumpIncludeDirectives(DumpIncludeDirectives),
- UseLineDirectives(UseLineDirectives) {
+ UseLineDirectives(UseLineDirectives),
+ MinimizeWhitespace(MinimizeWhitespace) {
CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
@@ -110,8 +116,13 @@ public:
FileType = SrcMgr::C_User;
Initialized = false;
IsFirstFileEntered = false;
+
+ PrevTok.startToken();
+ PrevPrevTok.startToken();
}
+ bool isMinimizeWhitespace() const { return MinimizeWhitespace; }
+
void setEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
@@ -120,7 +131,12 @@ public:
return EmittedDirectiveOnThisLine;
}
- bool startNewLineIfNeeded(bool ShouldUpdateCurrentLine = true);
+ /// Ensure that the output stream position is at the beginning of a new line
+ /// and inserts one if it does not. It is intended to ensure that directives
+ /// inserted by the directives not from the input source (such as #line) are
+ /// in the first column. To insert newlines that represent the input, use
+ /// MoveToLine(/*...*/, /*RequireStartOfLine=*/true).
+ void startNewLineIfNeeded();
void FileChanged(SourceLocation Loc, FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -148,18 +164,45 @@ public:
void PragmaAssumeNonNullBegin(SourceLocation Loc) override;
void PragmaAssumeNonNullEnd(SourceLocation Loc) override;
- bool HandleFirstTokOnLine(Token &Tok);
+ /// Insert whitespace before emitting the next token.
+ ///
+ /// @param Tok Next token to be emitted.
+ /// @param RequireSpace Ensure at least one whitespace is emitted. Useful
+ /// if non-tokens have been emitted to the stream.
+ /// @param RequireSameLine Never emit newlines. Useful when semantics depend
+ /// on being on the same line, such as directives.
+ void HandleWhitespaceBeforeTok(const Token &Tok, bool RequireSpace,
+ bool RequireSameLine);
/// Move to the line of the provided source location. This will
- /// return true if the output stream required adjustment or if
- /// the requested location is on the first line.
- bool MoveToLine(SourceLocation Loc) {
+ /// return true if a newline was inserted or if
+ /// the requested location is the first token on the first line.
+ /// In these cases the next output will be the first column on the line and
+ /// make it possible to insert indention. The newline was inserted
+ /// implicitly when at the beginning of the file.
+ ///
+ /// @param Tok Token where to move to.
+ /// @param RequiresStartOfLine Whether the next line depends on being in the
+ /// first column, such as a directive.
+ ///
+ /// @return Whether column adjustments are necessary.
+ bool MoveToLine(const Token &Tok, bool RequireStartOfLine) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isInvalid())
+ return false;
+ bool IsFirstInFile = Tok.isAtStartOfLine() && PLoc.getLine() == 1;
+ return MoveToLine(PLoc.getLine(), RequireStartOfLine) || IsFirstInFile;
+ }
+
+ /// Move to the line of the provided source location. Returns true if a new
+ /// line was inserted.
+ bool MoveToLine(SourceLocation Loc, bool RequireStartOfLine) {
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
if (PLoc.isInvalid())
return false;
- return MoveToLine(PLoc.getLine()) || (PLoc.getLine() == 1);
+ return MoveToLine(PLoc.getLine(), RequireStartOfLine);
}
- bool MoveToLine(unsigned LineNo);
+ bool MoveToLine(unsigned LineNo, bool RequireStartOfLine);
bool AvoidConcat(const Token &PrevPrevTok, const Token &PrevTok,
const Token &Tok) {
@@ -187,7 +230,7 @@ public:
void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
const char *Extra,
unsigned ExtraLen) {
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ startNewLineIfNeeded();
// Emit #line directives or GNU line markers depending on what mode we're in.
if (UseLineDirectives) {
@@ -214,43 +257,57 @@ void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
/// object. We can do this by emitting some number of \n's, or be emitting a
/// #line directive. This returns false if already at the specified line, true
/// if some newlines were emitted.
-bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
+bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo,
+ bool RequireStartOfLine) {
+ // If it is required to start a new line or finish the current, insert
+ // vertical whitespace now and take it into account when moving to the
+ // expected line.
+ bool StartedNewLine = false;
+ if ((RequireStartOfLine && EmittedTokensOnThisLine) ||
+ EmittedDirectiveOnThisLine) {
+ OS << '\n';
+ StartedNewLine = true;
+ CurLine += 1;
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
+ }
+
// If this line is "close enough" to the original line, just print newlines,
// otherwise print a #line directive.
- if (LineNo-CurLine <= 8) {
- if (LineNo-CurLine == 1)
- OS << '\n';
- else if (LineNo == CurLine)
- return false; // Spelling line moved, but expansion line didn't.
- else {
- const char *NewLines = "\n\n\n\n\n\n\n\n";
- OS.write(NewLines, LineNo-CurLine);
- }
+ if (CurLine == LineNo) {
+ // Nothing to do if we are already on the correct line.
+ } else if (!StartedNewLine && (!MinimizeWhitespace || !DisableLineMarkers) &&
+ LineNo - CurLine == 1) {
+ // Printing a single line has priority over printing a #line directive, even
+ // when minimizing whitespace which otherwise would print #line directives
+ // for every single line.
+ OS << '\n';
+ StartedNewLine = true;
+ } else if (!MinimizeWhitespace && LineNo - CurLine <= 8) {
+ const char *NewLines = "\n\n\n\n\n\n\n\n";
+ OS.write(NewLines, LineNo - CurLine);
+ StartedNewLine = true;
} else if (!DisableLineMarkers) {
// Emit a #line or line marker.
WriteLineInfo(LineNo, nullptr, 0);
- } else {
- // Okay, we're in -P mode, which turns off line markers. However, we still
- // need to emit a newline between tokens on different lines.
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ StartedNewLine = true;
+ }
+
+ if (StartedNewLine) {
+ EmittedTokensOnThisLine = false;
+ EmittedDirectiveOnThisLine = false;
}
CurLine = LineNo;
- return true;
+ return StartedNewLine;
}
-bool
-PrintPPOutputPPCallbacks::startNewLineIfNeeded(bool ShouldUpdateCurrentLine) {
+void PrintPPOutputPPCallbacks::startNewLineIfNeeded() {
if (EmittedTokensOnThisLine || EmittedDirectiveOnThisLine) {
OS << '\n';
EmittedTokensOnThisLine = false;
EmittedDirectiveOnThisLine = false;
- if (ShouldUpdateCurrentLine)
- ++CurLine;
- return true;
}
-
- return false;
}
/// FileChanged - Whenever the preprocessor enters or exits a #include file
@@ -273,7 +330,7 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
if (Reason == PPCallbacks::EnterFile) {
SourceLocation IncludeLoc = UserLoc.getIncludeLoc();
if (IncludeLoc.isValid())
- MoveToLine(IncludeLoc);
+ MoveToLine(IncludeLoc, /*RequireStartOfLine=*/false);
} else if (Reason == PPCallbacks::SystemHeaderPragma) {
// GCC emits the # directive for this directive on the line AFTER the
// directive and emits a bunch of spaces that aren't needed. This is because
@@ -290,7 +347,8 @@ void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
FileType = NewFileType;
if (DisableLineMarkers) {
- startNewLineIfNeeded(/*ShouldUpdateCurrentLine=*/false);
+ if (!MinimizeWhitespace)
+ startNewLineIfNeeded();
return;
}
@@ -336,15 +394,13 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
// In -dI mode, dump #include directives prior to dumping their content or
// interpretation.
if (DumpIncludeDirectives) {
- startNewLineIfNeeded();
- MoveToLine(HashLoc);
+ MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
const std::string TokenText = PP.getSpelling(IncludeTok);
assert(!TokenText.empty());
OS << "#" << TokenText << " "
<< (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
<< " /* clang -E -dI */";
setEmittedDirectiveOnThisLine();
- startNewLineIfNeeded();
}
// When preprocessing, turn implicit imports into module import pragmas.
@@ -353,17 +409,13 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
case tok::pp_include:
case tok::pp_import:
case tok::pp_include_next:
- startNewLineIfNeeded();
- MoveToLine(HashLoc);
+ MoveToLine(HashLoc, /*RequireStartOfLine=*/true);
OS << "#pragma clang module import " << Imported->getFullModuleName(true)
<< " /* clang -E: implicit import for "
<< "#" << PP.getSpelling(IncludeTok) << " "
<< (IsAngled ? '<' : '"') << FileName << (IsAngled ? '>' : '"')
<< " */";
- // Since we want a newline after the pragma, but not a #<line>, start a
- // new line immediately.
- EmittedTokensOnThisLine = true;
- startNewLineIfNeeded();
+ setEmittedDirectiveOnThisLine();
break;
case tok::pp___include_macros:
@@ -398,11 +450,11 @@ void PrintPPOutputPPCallbacks::EndModule(const Module *M) {
/// Ident - Handle #ident directives when read by the preprocessor.
///
void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, StringRef S) {
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS.write("#ident ", strlen("#ident "));
OS.write(S.begin(), S.size());
- EmittedTokensOnThisLine = true;
+ setEmittedTokensOnThisLine();
}
/// MacroDefined - This hook is called whenever a macro definition is seen.
@@ -414,7 +466,7 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
// Ignore __FILE__ etc.
MI->isBuiltinMacro()) return;
- MoveToLine(MI->getDefinitionLoc());
+ MoveToLine(MI->getDefinitionLoc(), /*RequireStartOfLine=*/true);
PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
setEmittedDirectiveOnThisLine();
}
@@ -425,7 +477,7 @@ void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
// Only print out macro definitions in -dD mode.
if (!DumpDefines) return;
- MoveToLine(MacroNameTok.getLocation());
+ MoveToLine(MacroNameTok.getLocation(), /*RequireStartOfLine=*/true);
OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
setEmittedDirectiveOnThisLine();
}
@@ -446,8 +498,7 @@ void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
StringRef Namespace,
PragmaMessageKind Kind,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma ";
if (!Namespace.empty())
OS << Namespace << ' ';
@@ -472,8 +523,7 @@ void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
void PrintPPOutputPPCallbacks::PragmaDebug(SourceLocation Loc,
StringRef DebugType) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma clang __debug ";
OS << DebugType;
@@ -483,16 +533,14 @@ void PrintPPOutputPPCallbacks::PragmaDebug(SourceLocation Loc,
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma " << Namespace << " diagnostic push";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma " << Namespace << " diagnostic pop";
setEmittedDirectiveOnThisLine();
}
@@ -501,8 +549,7 @@ void PrintPPOutputPPCallbacks::PragmaDiagnostic(SourceLocation Loc,
StringRef Namespace,
diag::Severity Map,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma " << Namespace << " diagnostic ";
switch (Map) {
case diag::Severity::Remark:
@@ -528,8 +575,7 @@ void PrintPPOutputPPCallbacks::PragmaDiagnostic(SourceLocation Loc,
void PrintPPOutputPPCallbacks::PragmaWarning(SourceLocation Loc,
StringRef WarningSpec,
ArrayRef<int> Ids) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma warning(" << WarningSpec << ':';
for (ArrayRef<int>::iterator I = Ids.begin(), E = Ids.end(); I != E; ++I)
OS << ' ' << *I;
@@ -539,8 +585,7 @@ void PrintPPOutputPPCallbacks::PragmaWarning(SourceLocation Loc,
void PrintPPOutputPPCallbacks::PragmaWarningPush(SourceLocation Loc,
int Level) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma warning(push";
if (Level >= 0)
OS << ", " << Level;
@@ -549,16 +594,14 @@ void PrintPPOutputPPCallbacks::PragmaWarningPush(SourceLocation Loc,
}
void PrintPPOutputPPCallbacks::PragmaWarningPop(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma warning(pop)";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::PragmaExecCharsetPush(SourceLocation Loc,
StringRef Str) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma character_execution_set(push";
if (!Str.empty())
OS << ", " << Str;
@@ -567,64 +610,80 @@ void PrintPPOutputPPCallbacks::PragmaExecCharsetPush(SourceLocation Loc,
}
void PrintPPOutputPPCallbacks::PragmaExecCharsetPop(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma character_execution_set(pop)";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaAssumeNonNullBegin(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma clang assume_nonnull begin";
setEmittedDirectiveOnThisLine();
}
void PrintPPOutputPPCallbacks::
PragmaAssumeNonNullEnd(SourceLocation Loc) {
- startNewLineIfNeeded();
- MoveToLine(Loc);
+ MoveToLine(Loc, /*RequireStartOfLine=*/true);
OS << "#pragma clang assume_nonnull end";
setEmittedDirectiveOnThisLine();
}
-/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
-/// is called for the first token on each new line. If this really is the start
-/// of a new logical line, handle it and return true, otherwise return false.
-/// This may not be the start of a logical line because the "start of line"
-/// marker is set for spelling lines, not expansion ones.
-bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
- // Figure out what line we went to and insert the appropriate number of
- // newline characters.
- if (!MoveToLine(Tok.getLocation()))
- return false;
-
- // Print out space characters so that the first token on a line is
- // indented for easy reading.
- unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
-
- // The first token on a line can have a column number of 1, yet still expect
- // leading white space, if a macro expansion in column 1 starts with an empty
- // macro argument, or an empty nested macro expansion. In this case, move the
- // token to column 2.
- if (ColNo == 1 && Tok.hasLeadingSpace())
- ColNo = 2;
-
- // This hack prevents stuff like:
- // #define HASH #
- // HASH define foo bar
- // From having the # character end up at column 1, which makes it so it
- // is not handled as a #define next time through the preprocessor if in
- // -fpreprocessed mode.
- if (ColNo <= 1 && Tok.is(tok::hash))
- OS << ' ';
+void PrintPPOutputPPCallbacks::HandleWhitespaceBeforeTok(const Token &Tok,
+ bool RequireSpace,
+ bool RequireSameLine) {
+ // These tokens are not expanded to anything and don't need whitespace before
+ // them.
+ if (Tok.is(tok::eof) ||
+ (Tok.isAnnotation() && !Tok.is(tok::annot_header_unit) &&
+ !Tok.is(tok::annot_module_begin) && !Tok.is(tok::annot_module_end)))
+ return;
- // Otherwise, indent the appropriate number of spaces.
- for (; ColNo > 1; --ColNo)
- OS << ' ';
+ if (!RequireSameLine && MoveToLine(Tok, /*RequireStartOfLine=*/false)) {
+ if (MinimizeWhitespace) {
+ // Avoid interpreting hash as a directive under -fpreprocessed.
+ if (Tok.is(tok::hash))
+ OS << ' ';
+ } else {
+ // Print out space characters so that the first token on a line is
+ // indented for easy reading.
+ unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
+
+ // The first token on a line can have a column number of 1, yet still
+ // expect leading white space, if a macro expansion in column 1 starts
+ // with an empty macro argument, or an empty nested macro expansion. In
+ // this case, move the token to column 2.
+ if (ColNo == 1 && Tok.hasLeadingSpace())
+ ColNo = 2;
+
+ // This hack prevents stuff like:
+ // #define HASH #
+ // HASH define foo bar
+ // From having the # character end up at column 1, which makes it so it
+ // is not handled as a #define next time through the preprocessor if in
+ // -fpreprocessed mode.
+ if (ColNo <= 1 && Tok.is(tok::hash))
+ OS << ' ';
+
+ // Otherwise, indent the appropriate number of spaces.
+ for (; ColNo > 1; --ColNo)
+ OS << ' ';
+ }
+ } else {
+ // Insert whitespace between the previous and next token if either
+ // - The caller requires it
+ // - The input had whitespace between them and we are not in
+ // whitespace-minimization mode
+ // - The whitespace is necessary to keep the tokens apart and there is not
+ // already a newline between them
+ if (RequireSpace || (!MinimizeWhitespace && Tok.hasLeadingSpace()) ||
+ ((EmittedTokensOnThisLine || EmittedTokensOnThisLine) &&
+ AvoidConcat(PrevPrevTok, PrevTok, Tok)))
+ OS << ' ';
+ }
- return true;
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
}
void PrintPPOutputPPCallbacks::HandleNewlinesInToken(const char *TokStr,
@@ -668,9 +727,9 @@ struct UnknownPragmaHandler : public PragmaHandler {
Token &PragmaTok) override {
// Figure out what line we went to and insert the appropriate number of
// newline characters.
- Callbacks->startNewLineIfNeeded();
- Callbacks->MoveToLine(PragmaTok.getLocation());
+ Callbacks->MoveToLine(PragmaTok.getLocation(), /*RequireStartOfLine=*/true);
Callbacks->OS.write(Prefix, strlen(Prefix));
+ Callbacks->setEmittedTokensOnThisLine();
if (ShouldExpandTokens) {
// The first token does not have expanded macros. Expand them, if
@@ -682,21 +741,16 @@ struct UnknownPragmaHandler : public PragmaHandler {
/*IsReinject=*/false);
PP.Lex(PragmaTok);
}
- Token PrevToken;
- Token PrevPrevToken;
- PrevToken.startToken();
- PrevPrevToken.startToken();
// Read and print all of the pragma tokens.
+ bool IsFirst = true;
while (PragmaTok.isNot(tok::eod)) {
- if (PragmaTok.hasLeadingSpace() ||
- Callbacks->AvoidConcat(PrevPrevToken, PrevToken, PragmaTok))
- Callbacks->OS << ' ';
+ Callbacks->HandleWhitespaceBeforeTok(PragmaTok, /*RequireSpace=*/IsFirst,
+ /*RequireSameLine=*/true);
+ IsFirst = false;
std::string TokSpell = PP.getSpelling(PragmaTok);
Callbacks->OS.write(&TokSpell[0], TokSpell.size());
-
- PrevPrevToken = PrevToken;
- PrevToken = PragmaTok;
+ Callbacks->setEmittedTokensOnThisLine();
if (ShouldExpandTokens)
PP.Lex(PragmaTok);
@@ -715,44 +769,41 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
bool DropComments = PP.getLangOpts().TraditionalCPP &&
!PP.getCommentRetentionState();
+ bool IsStartOfLine = false;
char Buffer[256];
- Token PrevPrevTok, PrevTok;
- PrevPrevTok.startToken();
- PrevTok.startToken();
while (1) {
- if (Callbacks->hasEmittedDirectiveOnThisLine()) {
- Callbacks->startNewLineIfNeeded();
- Callbacks->MoveToLine(Tok.getLocation());
- }
-
- // If this token is at the start of a line, emit newlines if needed.
- if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
- // done.
- } else if (Tok.hasLeadingSpace() ||
- // If we haven't emitted a token on this line yet, PrevTok isn't
- // useful to look at and no concatenation could happen anyway.
- (Callbacks->hasEmittedTokensOnThisLine() &&
- // Don't print "-" next to "-", it would form "--".
- Callbacks->AvoidConcat(PrevPrevTok, PrevTok, Tok))) {
- OS << ' ';
- }
+ // Two lines joined with line continuation ('\' as last character on the
+ // line) must be emitted as one line even though Tok.getLine() returns two
+ // different values. In this situation Tok.isAtStartOfLine() is false even
+ // though it may be the first token on the lexical line. When
+ // dropping/skipping a token that is at the start of a line, propagate the
+ // start-of-line-ness to the next token to not append it to the previous
+ // line.
+ IsStartOfLine = IsStartOfLine || Tok.isAtStartOfLine();
+
+ Callbacks->HandleWhitespaceBeforeTok(Tok, /*RequireSpace=*/false,
+ /*RequireSameLine=*/!IsStartOfLine);
if (DropComments && Tok.is(tok::comment)) {
// Skip comments. Normally the preprocessor does not generate
// tok::comment nodes at all when not keeping comments, but under
// -traditional-cpp the lexer keeps /all/ whitespace, including comments.
- SourceLocation StartLoc = Tok.getLocation();
- Callbacks->MoveToLine(StartLoc.getLocWithOffset(Tok.getLength()));
+ PP.Lex(Tok);
+ continue;
} else if (Tok.is(tok::eod)) {
// Don't print end of directive tokens, since they are typically newlines
// that mess up our line tracking. These come from unknown pre-processor
// directives or hash-prefixed comments in standalone assembly files.
PP.Lex(Tok);
+ // FIXME: The token on the next line after #include should have
+ // Tok.isAtStartOfLine() set.
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_include)) {
// PrintPPOutputPPCallbacks::InclusionDirective handles producing
// appropriate output here. Ignore this token entirely.
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_begin)) {
// FIXME: We retrieve this token after the FileChanged callback, and
@@ -764,11 +815,13 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
Callbacks->BeginModule(
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_module_end)) {
Callbacks->EndModule(
reinterpret_cast<Module *>(Tok.getAnnotationValue()));
PP.Lex(Tok);
+ IsStartOfLine = true;
continue;
} else if (Tok.is(tok::annot_header_unit)) {
// This is a header-name that has been (effectively) converted into a
@@ -796,8 +849,17 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
// Tokens that can contain embedded newlines need to adjust our current
// line number.
+ // FIXME: The token may end with a newline in which case
+ // setEmittedDirectiveOnThisLine/setEmittedTokensOnThisLine afterwards is
+ // wrong.
if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
Callbacks->HandleNewlinesInToken(TokPtr, Len);
+ if (Tok.is(tok::comment) && Len >= 2 && TokPtr[0] == '/' &&
+ TokPtr[1] == '/') {
+ // It's a line comment;
+ // Ensure that we don't concatenate anything behind it.
+ Callbacks->setEmittedDirectiveOnThisLine();
+ }
} else {
std::string S = PP.getSpelling(Tok);
OS.write(S.data(), S.size());
@@ -806,13 +868,17 @@ static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
// line number.
if (Tok.getKind() == tok::comment || Tok.getKind() == tok::unknown)
Callbacks->HandleNewlinesInToken(S.data(), S.size());
+ if (Tok.is(tok::comment) && S.size() >= 2 && S[0] == '/' && S[1] == '/') {
+ // It's a line comment;
+ // Ensure that we don't concatenate anything behind it.
+ Callbacks->setEmittedDirectiveOnThisLine();
+ }
}
Callbacks->setEmittedTokensOnThisLine();
+ IsStartOfLine = false;
if (Tok.is(tok::eof)) break;
- PrevPrevTok = PrevTok;
- PrevTok = Tok;
PP.Lex(Tok);
}
}
@@ -870,7 +936,8 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
- Opts.ShowIncludeDirectives, Opts.UseLineDirectives);
+ Opts.ShowIncludeDirectives, Opts.UseLineDirectives,
+ Opts.MinimizeWhitespace);
// Expand macros in pragmas with -fms-extensions. The assumption is that
// the majority of pragmas in such a file will be Microsoft pragmas.
diff --git a/clang/lib/Frontend/Rewrite/FrontendActions.cpp b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
index 5351ff0593ed..09ed07be923e 100644
--- a/clang/lib/Frontend/Rewrite/FrontendActions.cpp
+++ b/clang/lib/Frontend/Rewrite/FrontendActions.cpp
@@ -185,7 +185,7 @@ RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
void RewriteMacrosAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
std::unique_ptr<raw_ostream> OS =
- CI.createDefaultOutputFile(true, getCurrentFileOrBufferName());
+ CI.createDefaultOutputFile(/*Binary=*/true, getCurrentFileOrBufferName());
if (!OS) return;
RewriteMacrosInInput(CI.getPreprocessor(), OS.get());
@@ -194,7 +194,7 @@ void RewriteMacrosAction::ExecuteAction() {
void RewriteTestAction::ExecuteAction() {
CompilerInstance &CI = getCompilerInstance();
std::unique_ptr<raw_ostream> OS =
- CI.createDefaultOutputFile(false, getCurrentFileOrBufferName());
+ CI.createDefaultOutputFile(/*Binary=*/false, getCurrentFileOrBufferName());
if (!OS) return;
DoRewriteTest(CI.getPreprocessor(), OS.get());
@@ -270,7 +270,7 @@ public:
bool RewriteIncludesAction::BeginSourceFileAction(CompilerInstance &CI) {
if (!OutputStream) {
OutputStream =
- CI.createDefaultOutputFile(true, getCurrentFileOrBufferName());
+ CI.createDefaultOutputFile(/*Binary=*/true, getCurrentFileOrBufferName());
if (!OutputStream)
return false;
}
diff --git a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index 9d5366bb161e..fd54bcbf7c35 100644
--- a/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -585,7 +585,7 @@ namespace {
CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
- return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_PRValue, Kind, E, nullptr,
FPOptionsOverride(), TInfo,
SourceLocation(), SourceLocation());
}
@@ -2107,12 +2107,12 @@ RewriteModernObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
- DRE, nullptr, VK_RValue, FPOptionsOverride());
+ DRE, nullptr, VK_PRValue, FPOptionsOverride());
const auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *Exp =
CallExpr::Create(*Context, ICE, Args, FT->getCallResultType(*Context),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
return Exp;
}
@@ -2591,7 +2591,7 @@ Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
Expr *Unop = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(DRE->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
@@ -2694,7 +2694,7 @@ Stmt *RewriteModernObjC::RewriteObjCBoxedExpr(ObjCBoxedExpr *Exp) {
auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2720,7 +2720,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
std::string NSArrayFName("__NSContainer_literal");
FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
DeclRefExpr *NSArrayDRE = new (Context) DeclRefExpr(
- *Context, NSArrayFD, false, NSArrayFType, VK_RValue, SourceLocation());
+ *Context, NSArrayFD, false, NSArrayFType, VK_PRValue, SourceLocation());
SmallVector<Expr*, 16> InitExprs;
unsigned NumElements = Exp->getNumElements();
@@ -2815,7 +2815,7 @@ Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -2841,7 +2841,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
std::string NSDictFName("__NSContainer_literal");
FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
DeclRefExpr *NSDictDRE = new (Context) DeclRefExpr(
- *Context, NSDictFD, false, NSDictFType, VK_RValue, SourceLocation());
+ *Context, NSDictFD, false, NSDictFType, VK_PRValue, SourceLocation());
SmallVector<Expr*, 16> KeyExprs;
SmallVector<Expr*, 16> ValueExprs;
@@ -2967,7 +2967,7 @@ Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
ReplaceStmt(Exp, CE);
return CE;
}
@@ -3177,7 +3177,7 @@ Expr *RewriteModernObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFla
FunctionDecl::Create(*Context, TUDecl, SourceLocation(), SourceLocation(),
ID, FuncType, nullptr, SC_Extern, false, false);
DeclRefExpr *DRE = new (Context)
- DeclRefExpr(*Context, FD, false, castType, VK_RValue, SourceLocation());
+ DeclRefExpr(*Context, FD, false, castType, VK_PRValue, SourceLocation());
CallExpr *STCE =
CallExpr::Create(*Context, DRE, MsgExprs, castType, VK_LValue,
SourceLocation(), FPOptionsOverride());
@@ -3242,16 +3242,11 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 4> InitExprs;
// set the receiver to self, the first argument to all methods.
- InitExprs.push_back(
- NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_BitCast,
- new (Context) DeclRefExpr(*Context,
- CurMethodDef->getSelfDecl(),
- false,
- Context->getObjCIdType(),
- VK_RValue,
- SourceLocation()))
- ); // set the 'receiver'.
+ InitExprs.push_back(NoTypeInfoCStyleCastExpr(
+ Context, Context->getObjCIdType(), CK_BitCast,
+ new (Context) DeclRefExpr(*Context, CurMethodDef->getSelfDecl(), false,
+ Context->getObjCIdType(), VK_PRValue,
+ SourceLocation()))); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
@@ -3291,7 +3286,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
@@ -3309,7 +3304,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// struct __rw_objc_super *
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
@@ -3339,15 +3334,11 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
- InitExprs.push_back(
- NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_BitCast,
- new (Context) DeclRefExpr(*Context,
- CurMethodDef->getSelfDecl(),
- false,
- Context->getObjCIdType(),
- VK_RValue, SourceLocation()))
- ); // set the 'receiver'.
+ InitExprs.push_back(NoTypeInfoCStyleCastExpr(
+ Context, Context->getObjCIdType(), CK_BitCast,
+ new (Context) DeclRefExpr(*Context, CurMethodDef->getSelfDecl(), false,
+ Context->getObjCIdType(), VK_PRValue,
+ SourceLocation()))); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
@@ -3387,7 +3378,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
@@ -3399,9 +3390,8 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
- SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
- superType, VK_RValue, ILE,
- false);
+ SuperRep = new (Context) CompoundLiteralExpr(
+ SourceLocation(), superTInfo, superType, VK_PRValue, ILE, false);
}
MsgExprs.push_back(SuperRep);
break;
@@ -3543,7 +3533,7 @@ Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
const FunctionType *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3684,8 +3674,7 @@ bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
Result += " {\n";
for (const auto *EC : ED->enumerators()) {
Result += "\t"; Result += EC->getName(); Result += " = ";
- llvm::APSInt Val = EC->getInitVal();
- Result += Val.toString(10);
+ Result += toString(EC->getInitVal(), 10);
Result += ",\n";
}
Result += "\t} ";
@@ -4580,11 +4569,9 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
Expr *RHSExp = CEXPR->getRHS();
Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
Expr *CONDExp = CEXPR->getCond();
- ConditionalOperator *CondExpr =
- new (Context) ConditionalOperator(CONDExp,
- SourceLocation(), cast<Expr>(LHSStmt),
- SourceLocation(), cast<Expr>(RHSStmt),
- Exp->getType(), VK_RValue, OK_Ordinary);
+ ConditionalOperator *CondExpr = new (Context) ConditionalOperator(
+ CONDExp, SourceLocation(), cast<Expr>(LHSStmt), SourceLocation(),
+ cast<Expr>(RHSStmt), Exp->getType(), VK_PRValue, OK_Ordinary);
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
@@ -4654,7 +4641,7 @@ Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp
BlkExprs.push_back(*I);
}
CallExpr *CE =
- CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_RValue,
+ CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_PRValue,
SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -5283,7 +5270,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
FD = SynthBlockInitFunctionDecl(Tag);
DeclRefExpr *DRE = new (Context)
- DeclRefExpr(*Context, FD, false, FType, VK_RValue, SourceLocation());
+ DeclRefExpr(*Context, FD, false, FType, VK_PRValue, SourceLocation());
SmallVector<Expr*, 4> InitExprs;
@@ -5305,7 +5292,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
- UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
+ UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_PRValue,
OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
@@ -5323,9 +5310,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = UnaryOperator::Create(
- const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
+ Exp = UnaryOperator::Create(const_cast<ASTContext &>(*Context), Exp,
+ UO_AddrOf, QT, VK_PRValue, OK_Ordinary,
+ SourceLocation(), false,
+ FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -5340,9 +5328,10 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = UnaryOperator::Create(
- const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
+ Exp = UnaryOperator::Create(const_cast<ASTContext &>(*Context), Exp,
+ UO_AddrOf, QT, VK_PRValue, OK_Ordinary,
+ SourceLocation(), false,
+ FPOptionsOverride());
}
}
@@ -5382,7 +5371,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (!isNestedCapturedVar)
Exp = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(Exp->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
@@ -5409,7 +5398,7 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
NewRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
- Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(NewRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
@@ -7497,7 +7486,7 @@ Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
VK_LValue, SourceLocation());
BinaryOperator *addExpr = BinaryOperator::Create(
*Context, castExpr, DRE, BO_Add,
- Context->getPointerType(Context->CharTy), VK_RValue, OK_Ordinary,
+ Context->getPointerType(Context->CharTy), VK_PRValue, OK_Ordinary,
SourceLocation(), FPOptionsOverride());
// Don't forget the parens to enforce the proper binding.
ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
diff --git a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index 543b3b09a9cc..0750d36b02ac 100644
--- a/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -491,7 +491,7 @@ namespace {
CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
CastKind Kind, Expr *E) {
TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
- return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, nullptr,
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_PRValue, Kind, E, nullptr,
FPOptionsOverride(), TInfo,
SourceLocation(), SourceLocation());
}
@@ -2024,13 +2024,13 @@ RewriteObjC::SynthesizeCallToFunctionDecl(FunctionDecl *FD,
QualType pToFunc = Context->getPointerType(msgSendType);
ImplicitCastExpr *ICE =
ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
- DRE, nullptr, VK_RValue, FPOptionsOverride());
+ DRE, nullptr, VK_PRValue, FPOptionsOverride());
const auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *Exp =
CallExpr::Create(*Context, ICE, Args, FT->getCallResultType(*Context),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
return Exp;
}
@@ -2518,7 +2518,7 @@ Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
DeclRefExpr(*Context, NewVD, false, strType, VK_LValue, SourceLocation());
Expr *Unop = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(DRE->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
// cast to NSConstantString *
CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
@@ -2617,7 +2617,7 @@ CallExpr *RewriteObjC::SynthMsgSendStretCallExpr(FunctionDecl *MsgSendStretFlavo
const auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *STCE =
- CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(), VK_RValue,
+ CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(), VK_PRValue,
SourceLocation(), FPOptionsOverride());
return STCE;
}
@@ -2670,16 +2670,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SmallVector<Expr*, 4> InitExprs;
// set the receiver to self, the first argument to all methods.
- InitExprs.push_back(
- NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_BitCast,
- new (Context) DeclRefExpr(*Context,
- CurMethodDef->getSelfDecl(),
- false,
- Context->getObjCIdType(),
- VK_RValue,
- SourceLocation()))
- ); // set the 'receiver'.
+ InitExprs.push_back(NoTypeInfoCStyleCastExpr(
+ Context, Context->getObjCIdType(), CK_BitCast,
+ new (Context) DeclRefExpr(*Context, CurMethodDef->getSelfDecl(), false,
+ Context->getObjCIdType(), VK_PRValue,
+ SourceLocation()))); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
@@ -2721,7 +2716,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
@@ -2739,7 +2734,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
// struct objc_super *
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
}
MsgExprs.push_back(SuperRep);
@@ -2766,15 +2761,11 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
SmallVector<Expr*, 4> InitExprs;
- InitExprs.push_back(
- NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
- CK_BitCast,
- new (Context) DeclRefExpr(*Context,
- CurMethodDef->getSelfDecl(),
- false,
- Context->getObjCIdType(),
- VK_RValue, SourceLocation()))
- ); // set the 'receiver'.
+ InitExprs.push_back(NoTypeInfoCStyleCastExpr(
+ Context, Context->getObjCIdType(), CK_BitCast,
+ new (Context) DeclRefExpr(*Context, CurMethodDef->getSelfDecl(), false,
+ Context->getObjCIdType(), VK_PRValue,
+ SourceLocation()))); // set the 'receiver'.
// (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
SmallVector<Expr*, 8> ClsExprs;
@@ -2817,7 +2808,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
//
SuperRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), SuperRep, UO_AddrOf,
- Context->getPointerType(SuperRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(SuperRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
SuperRep = NoTypeInfoCStyleCastExpr(Context,
Context->getPointerType(superType),
@@ -2829,9 +2820,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
SourceLocation());
TypeSourceInfo *superTInfo
= Context->getTrivialTypeSourceInfo(superType);
- SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
- superType, VK_RValue, ILE,
- false);
+ SuperRep = new (Context) CompoundLiteralExpr(
+ SourceLocation(), superTInfo, superType, VK_PRValue, ILE, false);
}
MsgExprs.push_back(SuperRep);
break;
@@ -2973,7 +2963,7 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
const auto *FT = msgSendType->castAs<FunctionType>();
CallExpr *CE = CallExpr::Create(*Context, PE, MsgExprs, FT->getReturnType(),
- VK_RValue, EndLoc, FPOptionsOverride());
+ VK_PRValue, EndLoc, FPOptionsOverride());
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -3003,14 +2993,12 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
Context->IntTy,
SourceLocation());
BinaryOperator *lessThanExpr = BinaryOperator::Create(
- *Context, sizeofExpr, limit, BO_LE, Context->IntTy, VK_RValue,
+ *Context, sizeofExpr, limit, BO_LE, Context->IntTy, VK_PRValue,
OK_Ordinary, SourceLocation(), FPOptionsOverride());
// (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
- ConditionalOperator *CondExpr =
- new (Context) ConditionalOperator(lessThanExpr,
- SourceLocation(), CE,
- SourceLocation(), STCE,
- returnType, VK_RValue, OK_Ordinary);
+ ConditionalOperator *CondExpr = new (Context) ConditionalOperator(
+ lessThanExpr, SourceLocation(), CE, SourceLocation(), STCE, returnType,
+ VK_PRValue, OK_Ordinary);
ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
CondExpr);
}
@@ -3056,7 +3044,7 @@ Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
*Context, VD, false, getProtocolType(), VK_LValue, SourceLocation());
Expr *DerefExpr = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), DRE, UO_AddrOf,
- Context->getPointerType(DRE->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(DRE->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
CK_BitCast,
@@ -3749,11 +3737,9 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
Expr *RHSExp = CEXPR->getRHS();
Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
Expr *CONDExp = CEXPR->getCond();
- ConditionalOperator *CondExpr =
- new (Context) ConditionalOperator(CONDExp,
- SourceLocation(), cast<Expr>(LHSStmt),
- SourceLocation(), cast<Expr>(RHSStmt),
- Exp->getType(), VK_RValue, OK_Ordinary);
+ ConditionalOperator *CondExpr = new (Context) ConditionalOperator(
+ CONDExp, SourceLocation(), cast<Expr>(LHSStmt), SourceLocation(),
+ cast<Expr>(RHSStmt), Exp->getType(), VK_PRValue, OK_Ordinary);
return CondExpr;
} else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
CPT = IRE->getType()->getAs<BlockPointerType>();
@@ -3823,7 +3809,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
BlkExprs.push_back(*I);
}
CallExpr *CE =
- CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_RValue,
+ CallExpr::Create(*Context, PE, BlkExprs, Exp->getType(), VK_PRValue,
SourceLocation(), FPOptionsOverride());
return CE;
}
@@ -4422,7 +4408,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
// Simulate a constructor call...
FD = SynthBlockInitFunctionDecl(Tag);
DeclRefExpr *DRE = new (Context)
- DeclRefExpr(*Context, FD, false, FType, VK_RValue, SourceLocation());
+ DeclRefExpr(*Context, FD, false, FType, VK_PRValue, SourceLocation());
SmallVector<Expr*, 4> InitExprs;
@@ -4444,7 +4430,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
const_cast<ASTContext &>(*Context),
new (Context) DeclRefExpr(*Context, NewVD, false, Context->VoidPtrTy,
VK_LValue, SourceLocation()),
- UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_RValue,
+ UO_AddrOf, Context->getPointerType(Context->VoidPtrTy), VK_PRValue,
OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
InitExprs.push_back(DescRefExpr);
@@ -4462,9 +4448,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = UnaryOperator::Create(
- const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
+ Exp = UnaryOperator::Create(const_cast<ASTContext &>(*Context), Exp,
+ UO_AddrOf, QT, VK_PRValue, OK_Ordinary,
+ SourceLocation(), false,
+ FPOptionsOverride());
}
} else if (isTopLevelBlockPointerType((*I)->getType())) {
FD = SynthBlockInitFunctionDecl((*I)->getName());
@@ -4479,9 +4466,10 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (HasLocalVariableExternalStorage(*I)) {
QualType QT = (*I)->getType();
QT = Context->getPointerType(QT);
- Exp = UnaryOperator::Create(
- const_cast<ASTContext &>(*Context), Exp, UO_AddrOf, QT, VK_RValue,
- OK_Ordinary, SourceLocation(), false, FPOptionsOverride());
+ Exp = UnaryOperator::Create(const_cast<ASTContext &>(*Context), Exp,
+ UO_AddrOf, QT, VK_PRValue, OK_Ordinary,
+ SourceLocation(), false,
+ FPOptionsOverride());
}
}
InitExprs.push_back(Exp);
@@ -4520,7 +4508,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
if (!isNestedCapturedVar)
Exp = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), Exp, UO_AddrOf,
- Context->getPointerType(Exp->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(Exp->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
InitExprs.push_back(Exp);
@@ -4539,7 +4527,7 @@ Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
SourceLocation(), FPOptionsOverride());
NewRep = UnaryOperator::Create(
const_cast<ASTContext &>(*Context), NewRep, UO_AddrOf,
- Context->getPointerType(NewRep->getType()), VK_RValue, OK_Ordinary,
+ Context->getPointerType(NewRep->getType()), VK_PRValue, OK_Ordinary,
SourceLocation(), false, FPOptionsOverride());
NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
NewRep);
diff --git a/clang/lib/Frontend/TestModuleFileExtension.cpp b/clang/lib/Frontend/TestModuleFileExtension.cpp
index 354aa7f5cd3f..7d4026a7efc6 100644
--- a/clang/lib/Frontend/TestModuleFileExtension.cpp
+++ b/clang/lib/Frontend/TestModuleFileExtension.cpp
@@ -15,6 +15,8 @@
using namespace clang;
using namespace clang::serialization;
+char TestModuleFileExtension::ID = 0;
+
TestModuleFileExtension::Writer::~Writer() { }
void TestModuleFileExtension::Writer::writeExtensionContents(
@@ -127,3 +129,11 @@ TestModuleFileExtension::createExtensionReader(
return std::unique_ptr<ModuleFileExtensionReader>(
new TestModuleFileExtension::Reader(this, Stream));
}
+
+std::string TestModuleFileExtension::str() const {
+ std::string Buffer;
+ llvm::raw_string_ostream OS(Buffer);
+ OS << BlockName << ":" << MajorVersion << ":" << MinorVersion << ":" << Hashed
+ << ":" << UserInfo;
+ return OS.str();
+}
diff --git a/clang/lib/Frontend/TestModuleFileExtension.h b/clang/lib/Frontend/TestModuleFileExtension.h
index 13e090783b11..c8ca4cd4f210 100644
--- a/clang/lib/Frontend/TestModuleFileExtension.h
+++ b/clang/lib/Frontend/TestModuleFileExtension.h
@@ -17,7 +17,8 @@
namespace clang {
/// A module file extension used for testing purposes.
-class TestModuleFileExtension : public ModuleFileExtension {
+class TestModuleFileExtension
+ : public llvm::RTTIExtends<TestModuleFileExtension, ModuleFileExtension> {
std::string BlockName;
unsigned MajorVersion;
unsigned MinorVersion;
@@ -43,14 +44,13 @@ class TestModuleFileExtension : public ModuleFileExtension {
};
public:
- TestModuleFileExtension(StringRef BlockName,
- unsigned MajorVersion,
- unsigned MinorVersion,
- bool Hashed,
+ static char ID;
+
+ TestModuleFileExtension(StringRef BlockName, unsigned MajorVersion,
+ unsigned MinorVersion, bool Hashed,
StringRef UserInfo)
- : BlockName(BlockName),
- MajorVersion(MajorVersion), MinorVersion(MinorVersion),
- Hashed(Hashed), UserInfo(UserInfo) { }
+ : BlockName(BlockName), MajorVersion(MajorVersion),
+ MinorVersion(MinorVersion), Hashed(Hashed), UserInfo(UserInfo) {}
~TestModuleFileExtension() override;
ModuleFileExtensionMetadata getExtensionMetadata() const override;
@@ -64,6 +64,8 @@ public:
createExtensionReader(const ModuleFileExtensionMetadata &Metadata,
ASTReader &Reader, serialization::ModuleFile &Mod,
const llvm::BitstreamCursor &Stream) override;
+
+ std::string str() const;
};
} // end namespace clang
diff --git a/clang/lib/Frontend/TextDiagnostic.cpp b/clang/lib/Frontend/TextDiagnostic.cpp
index e781fd2c0229..8df7496c6ddd 100644
--- a/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/clang/lib/Frontend/TextDiagnostic.cpp
@@ -684,8 +684,7 @@ void TextDiagnostic::emitDiagnosticMessage(
OS.resetColor();
if (DiagOpts->ShowLevel)
- printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
- DiagOpts->CLFallbackMode);
+ printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
printDiagnosticMessage(OS,
/*IsSupplemental*/ Level == DiagnosticsEngine::Note,
Message, OS.tell() - StartOfLocationInfo,
@@ -695,8 +694,7 @@ void TextDiagnostic::emitDiagnosticMessage(
/*static*/ void
TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
DiagnosticsEngine::Level Level,
- bool ShowColors,
- bool CLFallbackMode) {
+ bool ShowColors) {
if (ShowColors) {
// Print diagnostic category in bold and color
switch (Level) {
@@ -713,22 +711,13 @@ TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
switch (Level) {
case DiagnosticsEngine::Ignored:
llvm_unreachable("Invalid diagnostic type");
- case DiagnosticsEngine::Note: OS << "note"; break;
- case DiagnosticsEngine::Remark: OS << "remark"; break;
- case DiagnosticsEngine::Warning: OS << "warning"; break;
- case DiagnosticsEngine::Error: OS << "error"; break;
- case DiagnosticsEngine::Fatal: OS << "fatal error"; break;
+ case DiagnosticsEngine::Note: OS << "note: "; break;
+ case DiagnosticsEngine::Remark: OS << "remark: "; break;
+ case DiagnosticsEngine::Warning: OS << "warning: "; break;
+ case DiagnosticsEngine::Error: OS << "error: "; break;
+ case DiagnosticsEngine::Fatal: OS << "fatal error: "; break;
}
- // In clang-cl /fallback mode, print diagnostics as "error(clang):". This
- // makes it more clear whether a message is coming from clang or cl.exe,
- // and it prevents MSBuild from concluding that the build failed just because
- // there is an "error:" in the output.
- if (CLFallbackMode)
- OS << "(clang)";
-
- OS << ": ";
-
if (ShowColors)
OS.resetColor();
}
diff --git a/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/clang/lib/Frontend/TextDiagnosticPrinter.cpp
index 9feb3c64039f..0ff5376098ff 100644
--- a/clang/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -133,8 +133,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// diagnostics in a context that lacks language options, a source manager, or
// other infrastructure necessary when emitting more rich diagnostics.
if (!Info.getLocation().isValid()) {
- TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors,
- DiagOpts->CLFallbackMode);
+ TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
TextDiagnostic::printDiagnosticMessage(
OS, /*IsSupplemental=*/Level == DiagnosticsEngine::Note,
DiagMessageStream.str(), OS.tell() - StartOfLocationInfo,
diff --git a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index ac64e1708da6..b95851e380d2 100644
--- a/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -187,7 +187,7 @@ CreateFrontendAction(CompilerInstance &CI) {
bool ExecuteCompilerInvocation(CompilerInstance *Clang) {
// Honor -help.
if (Clang->getFrontendOpts().ShowHelp) {
- driver::getDriverOptTable().PrintHelp(
+ driver::getDriverOptTable().printHelp(
llvm::outs(), "clang -cc1 [options] file...",
"LLVM 'Clang' Compiler: http://clang.llvm.org",
/*Include=*/driver::options::CC1Option,
diff --git a/clang/lib/Headers/__clang_cuda_math.h b/clang/lib/Headers/__clang_cuda_math.h
index acb26ad345d5..538556f394da 100644
--- a/clang/lib/Headers/__clang_cuda_math.h
+++ b/clang/lib/Headers/__clang_cuda_math.h
@@ -166,6 +166,8 @@ __DEVICE__ long long llrint(double __a) { return __nv_llrint(__a); }
__DEVICE__ long long llrintf(float __a) { return __nv_llrintf(__a); }
__DEVICE__ long long llround(double __a) { return __nv_llround(__a); }
__DEVICE__ long long llroundf(float __a) { return __nv_llroundf(__a); }
+__DEVICE__ double round(double __a) { return __nv_round(__a); }
+__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
__DEVICE__ double log(double __a) { return __nv_log(__a); }
__DEVICE__ double log10(double __a) { return __nv_log10(__a); }
__DEVICE__ float log10f(float __a) { return __nv_log10f(__a); }
@@ -270,8 +272,6 @@ __DEVICE__ float rnorm4df(float __a, float __b, float __c, float __d) {
__DEVICE__ float rnormf(int __dim, const float *__t) {
return __nv_rnormf(__dim, __t);
}
-__DEVICE__ double round(double __a) { return __nv_round(__a); }
-__DEVICE__ float roundf(float __a) { return __nv_roundf(__a); }
__DEVICE__ double rsqrt(double __a) { return __nv_rsqrt(__a); }
__DEVICE__ float rsqrtf(float __a) { return __nv_rsqrtf(__a); }
__DEVICE__ double scalbn(double __a, int __b) { return __nv_scalbn(__a, __b); }
diff --git a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
index f88c39a9b6e5..f401964bd529 100644
--- a/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_cuda_runtime_wrapper.h
@@ -349,9 +349,14 @@ extern "C" {
__device__ int vprintf(const char *, const char *);
__device__ void free(void *) __attribute((nothrow));
__device__ void *malloc(size_t) __attribute((nothrow)) __attribute__((malloc));
+
+// __assertfail() used to have a `noreturn` attribute. Unfortunately that
+// contributed to triggering the longstanding bug in ptxas when assert was used
+// in sufficiently convoluted code. See
+// https://bugs.llvm.org/show_bug.cgi?id=27738 for the details.
__device__ void __assertfail(const char *__message, const char *__file,
unsigned __line, const char *__function,
- size_t __charSize) __attribute__((noreturn));
+ size_t __charSize);
// In order for standard assert() macro on linux to work we need to
// provide device-side __assert_fail()
diff --git a/clang/lib/Headers/__clang_hip_cmath.h b/clang/lib/Headers/__clang_hip_cmath.h
index cd22a2df954b..7342705434e6 100644
--- a/clang/lib/Headers/__clang_hip_cmath.h
+++ b/clang/lib/Headers/__clang_hip_cmath.h
@@ -14,6 +14,7 @@
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
+#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <limits>
#include <type_traits>
@@ -21,6 +22,7 @@
#endif
#include <limits.h>
#include <stdint.h>
+#endif // !defined(__HIPCC_RTC__)
#pragma push_macro("__DEVICE__")
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
@@ -34,6 +36,9 @@ __DEVICE__ long abs(long __n) { return ::labs(__n); }
__DEVICE__ float fma(float __x, float __y, float __z) {
return ::fmaf(__x, __y, __z);
}
+#if !defined(__HIPCC_RTC__)
+// The value returned by fpclassify is platform dependent, therefore it is not
+// supported by hipRTC.
__DEVICE__ int fpclassify(float __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
@@ -42,11 +47,51 @@ __DEVICE__ int fpclassify(double __x) {
return __builtin_fpclassify(FP_NAN, FP_INFINITE, FP_NORMAL, FP_SUBNORMAL,
FP_ZERO, __x);
}
+#endif // !defined(__HIPCC_RTC__)
+
__DEVICE__ float frexp(float __arg, int *__exp) {
return ::frexpf(__arg, __exp);
}
+
+#if defined(__OPENMP_AMDGCN__)
+// For OpenMP we work around some old system headers that have non-conforming
+// `isinf(float)` and `isnan(float)` implementations that return an `int`. We do
+// this by providing two versions of these functions, differing only in the
+// return type. To avoid conflicting definitions we disable implicit base
+// function generation. That means we will end up with two specializations, one
+// per type, but only one has a base function defined by the system header.
+#pragma omp begin declare variant match( \
+ implementation = {extension(disable_implicit_base)})
+
+// FIXME: We lack an extension to customize the mangling of the variants, e.g.,
+// add a suffix. This means we would clash with the names of the variants
+// (note that we do not create implicit base functions here). To avoid
+// this clash we add a new trait to some of them that is always true
+// (this is LLVM after all ;)). It will only influence the mangled name
+// of the variants inside the inner region and avoid the clash.
+#pragma omp begin declare variant match(implementation = {vendor(llvm)})
+
+__DEVICE__ int isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ int isinf(double __x) { return ::__isinf(__x); }
+__DEVICE__ int isfinite(float __x) { return ::__finitef(__x); }
+__DEVICE__ int isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ int isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ int isnan(double __x) { return ::__isnan(__x); }
+
+#pragma omp end declare variant
+#endif // defined(__OPENMP_AMDGCN__)
+
+__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
+__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isfinite(float __x) { return ::__finitef(__x); }
__DEVICE__ bool isfinite(double __x) { return ::__finite(__x); }
+__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
+__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
+
+#if defined(__OPENMP_AMDGCN__)
+#pragma omp end declare variant
+#endif // defined(__OPENMP_AMDGCN__)
+
__DEVICE__ bool isgreater(float __x, float __y) {
return __builtin_isgreater(__x, __y);
}
@@ -59,8 +104,6 @@ __DEVICE__ bool isgreaterequal(float __x, float __y) {
__DEVICE__ bool isgreaterequal(double __x, double __y) {
return __builtin_isgreaterequal(__x, __y);
}
-__DEVICE__ bool isinf(float __x) { return ::__isinff(__x); }
-__DEVICE__ bool isinf(double __x) { return ::__isinf(__x); }
__DEVICE__ bool isless(float __x, float __y) {
return __builtin_isless(__x, __y);
}
@@ -79,8 +122,6 @@ __DEVICE__ bool islessgreater(float __x, float __y) {
__DEVICE__ bool islessgreater(double __x, double __y) {
return __builtin_islessgreater(__x, __y);
}
-__DEVICE__ bool isnan(float __x) { return ::__isnanf(__x); }
-__DEVICE__ bool isnan(double __x) { return ::__isnan(__x); }
__DEVICE__ bool isnormal(float __x) { return __builtin_isnormal(__x); }
__DEVICE__ bool isnormal(double __x) { return __builtin_isnormal(__x); }
__DEVICE__ bool isunordered(float __x, float __y) {
@@ -207,11 +248,117 @@ template <bool __B, class __T = void> struct __hip_enable_if {};
template <class __T> struct __hip_enable_if<true, __T> { typedef __T type; };
+namespace __hip {
+template <class _Tp> struct is_integral {
+ enum { value = 0 };
+};
+template <> struct is_integral<bool> {
+ enum { value = 1 };
+};
+template <> struct is_integral<char> {
+ enum { value = 1 };
+};
+template <> struct is_integral<signed char> {
+ enum { value = 1 };
+};
+template <> struct is_integral<unsigned char> {
+ enum { value = 1 };
+};
+template <> struct is_integral<wchar_t> {
+ enum { value = 1 };
+};
+template <> struct is_integral<short> {
+ enum { value = 1 };
+};
+template <> struct is_integral<unsigned short> {
+ enum { value = 1 };
+};
+template <> struct is_integral<int> {
+ enum { value = 1 };
+};
+template <> struct is_integral<unsigned int> {
+ enum { value = 1 };
+};
+template <> struct is_integral<long> {
+ enum { value = 1 };
+};
+template <> struct is_integral<unsigned long> {
+ enum { value = 1 };
+};
+template <> struct is_integral<long long> {
+ enum { value = 1 };
+};
+template <> struct is_integral<unsigned long long> {
+ enum { value = 1 };
+};
+
+// ToDo: specializes is_arithmetic<_Float16>
+template <class _Tp> struct is_arithmetic {
+ enum { value = 0 };
+};
+template <> struct is_arithmetic<bool> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<char> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<signed char> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<unsigned char> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<wchar_t> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<short> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<unsigned short> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<int> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<unsigned int> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<long> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<unsigned long> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<long long> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<unsigned long long> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<float> {
+ enum { value = 1 };
+};
+template <> struct is_arithmetic<double> {
+ enum { value = 1 };
+};
+
+struct true_type {
+ static const __constant__ bool value = true;
+};
+struct false_type {
+ static const __constant__ bool value = false;
+};
+
+template <typename __T, typename __U> struct is_same : public false_type {};
+template <typename __T> struct is_same<__T, __T> : public true_type {};
+
+template <typename __T> struct add_rvalue_reference { typedef __T &&type; };
+
+template <typename __T> typename add_rvalue_reference<__T>::type declval();
+
// decltype is only available in C++11 and above.
#if __cplusplus >= 201103L
// __hip_promote
-namespace __hip {
-
template <class _Tp> struct __numeric_type {
static void __test(...);
static _Float16 __test(_Float16);
@@ -227,8 +374,8 @@ template <class _Tp> struct __numeric_type {
// No support for long double, use double instead.
static double __test(long double);
- typedef decltype(__test(std::declval<_Tp>())) type;
- static const bool value = !std::is_same<type, void>::value;
+ typedef decltype(__test(declval<_Tp>())) type;
+ static const bool value = !is_same<type, void>::value;
};
template <> struct __numeric_type<void> { static const bool value = true; };
@@ -271,18 +418,17 @@ public:
template <class _A1, class _A2 = void, class _A3 = void>
class __promote : public __promote_imp<_A1, _A2, _A3> {};
-
-} // namespace __hip
#endif //__cplusplus >= 201103L
+} // namespace __hip
// __HIP_OVERLOAD1 is used to resolve function calls with integer argument to
// avoid compilation error due to ambibuity. e.g. floor(5) is resolved with
// floor(double).
#define __HIP_OVERLOAD1(__retty, __fn) \
template <typename __T> \
- __DEVICE__ typename __hip_enable_if<std::numeric_limits<__T>::is_integer, \
- __retty>::type \
- __fn(__T __x) { \
+ __DEVICE__ \
+ typename __hip_enable_if<__hip::is_integral<__T>::value, __retty>::type \
+ __fn(__T __x) { \
return ::__fn((double)__x); \
}
@@ -293,8 +439,7 @@ class __promote : public __promote_imp<_A1, _A2, _A3> {};
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
__DEVICE__ typename __hip_enable_if< \
- std::numeric_limits<__T1>::is_specialized && \
- std::numeric_limits<__T2>::is_specialized, \
+ __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value, \
typename __hip::__promote<__T1, __T2>::type>::type \
__fn(__T1 __x, __T2 __y) { \
typedef typename __hip::__promote<__T1, __T2>::type __result_type; \
@@ -303,16 +448,14 @@ class __promote : public __promote_imp<_A1, _A2, _A3> {};
#else
#define __HIP_OVERLOAD2(__retty, __fn) \
template <typename __T1, typename __T2> \
- __DEVICE__ \
- typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized && \
- std::numeric_limits<__T2>::is_specialized, \
- __retty>::type \
- __fn(__T1 __x, __T2 __y) { \
+ __DEVICE__ typename __hip_enable_if<__hip::is_arithmetic<__T1>::value && \
+ __hip::is_arithmetic<__T2>::value, \
+ __retty>::type \
+ __fn(__T1 __x, __T2 __y) { \
return __fn((double)__x, (double)__y); \
}
#endif
-__HIP_OVERLOAD1(double, abs)
__HIP_OVERLOAD1(double, acos)
__HIP_OVERLOAD1(double, acosh)
__HIP_OVERLOAD1(double, asin)
@@ -336,7 +479,9 @@ __HIP_OVERLOAD1(double, floor)
__HIP_OVERLOAD2(double, fmax)
__HIP_OVERLOAD2(double, fmin)
__HIP_OVERLOAD2(double, fmod)
+#if !defined(__HIPCC_RTC__)
__HIP_OVERLOAD1(int, fpclassify)
+#endif // !defined(__HIPCC_RTC__)
__HIP_OVERLOAD2(double, hypot)
__HIP_OVERLOAD1(int, ilogb)
__HIP_OVERLOAD1(bool, isfinite)
@@ -382,9 +527,8 @@ __HIP_OVERLOAD2(double, min)
#if __cplusplus >= 201103L
template <typename __T1, typename __T2, typename __T3>
__DEVICE__ typename __hip_enable_if<
- std::numeric_limits<__T1>::is_specialized &&
- std::numeric_limits<__T2>::is_specialized &&
- std::numeric_limits<__T3>::is_specialized,
+ __hip::is_arithmetic<__T1>::value && __hip::is_arithmetic<__T2>::value &&
+ __hip::is_arithmetic<__T3>::value,
typename __hip::__promote<__T1, __T2, __T3>::type>::type
fma(__T1 __x, __T2 __y, __T3 __z) {
typedef typename __hip::__promote<__T1, __T2, __T3>::type __result_type;
@@ -392,33 +536,32 @@ fma(__T1 __x, __T2 __y, __T3 __z) {
}
#else
template <typename __T1, typename __T2, typename __T3>
-__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
- std::numeric_limits<__T2>::is_specialized &&
- std::numeric_limits<__T3>::is_specialized,
- double>::type
- fma(__T1 __x, __T2 __y, __T3 __z) {
+__DEVICE__ typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
+ __hip::is_arithmetic<__T2>::value &&
+ __hip::is_arithmetic<__T3>::value,
+ double>::type
+fma(__T1 __x, __T2 __y, __T3 __z) {
return ::fma((double)__x, (double)__y, (double)__z);
}
#endif
template <typename __T>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
frexp(__T __x, int *__exp) {
return ::frexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
ldexp(__T __x, int __exp) {
return ::ldexp((double)__x, __exp);
}
template <typename __T>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
modf(__T __x, double *__exp) {
return ::modf((double)__x, __exp);
}
@@ -426,8 +569,8 @@ __DEVICE__
#if __cplusplus >= 201103L
template <typename __T1, typename __T2>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
- std::numeric_limits<__T2>::is_specialized,
+ typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
+ __hip::is_arithmetic<__T2>::value,
typename __hip::__promote<__T1, __T2>::type>::type
remquo(__T1 __x, __T2 __y, int *__quo) {
typedef typename __hip::__promote<__T1, __T2>::type __result_type;
@@ -435,25 +578,24 @@ __DEVICE__
}
#else
template <typename __T1, typename __T2>
-__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T1>::is_specialized &&
- std::numeric_limits<__T2>::is_specialized,
- double>::type
- remquo(__T1 __x, __T2 __y, int *__quo) {
+__DEVICE__ typename __hip_enable_if<__hip::is_arithmetic<__T1>::value &&
+ __hip::is_arithmetic<__T2>::value,
+ double>::type
+remquo(__T1 __x, __T2 __y, int *__quo) {
return ::remquo((double)__x, (double)__y, __quo);
}
#endif
template <typename __T>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbln(__T __x, long int __exp) {
return ::scalbln((double)__x, __exp);
}
template <typename __T>
__DEVICE__
- typename __hip_enable_if<std::numeric_limits<__T>::is_integer, double>::type
+ typename __hip_enable_if<__hip::is_integral<__T>::value, double>::type
scalbn(__T __x, int __exp) {
return ::scalbn((double)__x, __exp);
}
@@ -468,14 +610,15 @@ __DEVICE__
#endif // defined(__cplusplus)
// Define these overloads inside the namespace our standard library uses.
+#if !defined(__HIPCC_RTC__)
#ifdef _LIBCPP_BEGIN_NAMESPACE_STD
_LIBCPP_BEGIN_NAMESPACE_STD
#else
namespace std {
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_BEGIN_NAMESPACE_VERSION
-#endif
-#endif
+#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
+#endif // _LIBCPP_BEGIN_NAMESPACE_STD
// Pull the new overloads we defined above into namespace std.
// using ::abs; - This may be considered for C++.
@@ -620,11 +763,13 @@ _LIBCPP_END_NAMESPACE_STD
#else
#ifdef _GLIBCXX_BEGIN_NAMESPACE_VERSION
_GLIBCXX_END_NAMESPACE_VERSION
-#endif
+#endif // _GLIBCXX_BEGIN_NAMESPACE_VERSION
} // namespace std
-#endif
+#endif // _LIBCPP_END_NAMESPACE_STD
+#endif // !defined(__HIPCC_RTC__)
// Define device-side math functions from <ymath.h> on MSVC.
+#if !defined(__HIPCC_RTC__)
#if defined(_MSC_VER)
// Before VS2019, `<ymath.h>` is also included in `<limits>` and other headers.
@@ -658,6 +803,7 @@ __DEVICE__ __attribute__((overloadable)) float _FSinh(float x, float y) {
}
#endif // defined(__cplusplus)
#endif // defined(_MSC_VER)
+#endif // !defined(__HIPCC_RTC__)
#pragma pop_macro("__DEVICE__")
diff --git a/clang/lib/Headers/__clang_hip_libdevice_declares.h b/clang/lib/Headers/__clang_hip_libdevice_declares.h
index ac98907ad5de..8be848ba2aa3 100644
--- a/clang/lib/Headers/__clang_hip_libdevice_declares.h
+++ b/clang/lib/Headers/__clang_hip_libdevice_declares.h
@@ -138,14 +138,22 @@ __device__ __attribute__((const)) float __ocml_fma_rtn_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtp_f32(float, float, float);
__device__ __attribute__((const)) float __ocml_fma_rtz_f32(float, float, float);
-__device__ __attribute__((const)) float
-__llvm_amdgcn_cos_f32(float) __asm("llvm.amdgcn.cos.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_rcp_f32(float) __asm("llvm.amdgcn.rcp.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_rsq_f32(float) __asm("llvm.amdgcn.rsq.f32");
-__device__ __attribute__((const)) float
-__llvm_amdgcn_sin_f32(float) __asm("llvm.amdgcn.sin.f32");
+__device__ inline __attribute__((const)) float
+__llvm_amdgcn_cos_f32(float __x) {
+ return __builtin_amdgcn_cosf(__x);
+}
+__device__ inline __attribute__((const)) float
+__llvm_amdgcn_rcp_f32(float __x) {
+ return __builtin_amdgcn_rcpf(__x);
+}
+__device__ inline __attribute__((const)) float
+__llvm_amdgcn_rsq_f32(float __x) {
+ return __builtin_amdgcn_rsqf(__x);
+}
+__device__ inline __attribute__((const)) float
+__llvm_amdgcn_sin_f32(float __x) {
+ return __builtin_amdgcn_sinf(__x);
+}
// END INTRINSICS
// END FLOAT
@@ -269,10 +277,14 @@ __device__ __attribute__((const)) double __ocml_fma_rtp_f64(double, double,
__device__ __attribute__((const)) double __ocml_fma_rtz_f64(double, double,
double);
-__device__ __attribute__((const)) double
-__llvm_amdgcn_rcp_f64(double) __asm("llvm.amdgcn.rcp.f64");
-__device__ __attribute__((const)) double
-__llvm_amdgcn_rsq_f64(double) __asm("llvm.amdgcn.rsq.f64");
+__device__ inline __attribute__((const)) double
+__llvm_amdgcn_rcp_f64(double __x) {
+ return __builtin_amdgcn_rcp(__x);
+}
+__device__ inline __attribute__((const)) double
+__llvm_amdgcn_rsq_f64(double __x) {
+ return __builtin_amdgcn_rsq(__x);
+}
__device__ __attribute__((const)) _Float16 __ocml_ceil_f16(_Float16);
__device__ _Float16 __ocml_cos_f16(_Float16);
diff --git a/clang/lib/Headers/__clang_hip_math.h b/clang/lib/Headers/__clang_hip_math.h
index 14d91c66b352..1f0982d92eff 100644
--- a/clang/lib/Headers/__clang_hip_math.h
+++ b/clang/lib/Headers/__clang_hip_math.h
@@ -13,11 +13,13 @@
#error "This file is for HIP and OpenMP AMDGCN device compilation only."
#endif
+#if !defined(__HIPCC_RTC__)
#if defined(__cplusplus)
#include <algorithm>
#endif
#include <limits.h>
#include <stdint.h>
+#endif // __HIPCC_RTC__
#pragma push_macro("__DEVICE__")
#define __DEVICE__ static __device__ inline __attribute__((always_inline))
@@ -36,7 +38,7 @@ template<bool>
struct __compare_result{};
template<>
struct __compare_result<true> {
- static const bool valid;
+ static const __device__ bool valid;
};
__DEVICE__
@@ -1260,6 +1262,7 @@ float min(float __x, float __y) { return fminf(__x, __y); }
__DEVICE__
double min(double __x, double __y) { return fmin(__x, __y); }
+#if !defined(__HIPCC_RTC__)
__host__ inline static int min(int __arg1, int __arg2) {
return std::min(__arg1, __arg2);
}
@@ -1267,6 +1270,7 @@ __host__ inline static int min(int __arg1, int __arg2) {
__host__ inline static int max(int __arg1, int __arg2) {
return std::max(__arg1, __arg2);
}
+#endif // __HIPCC_RTC__
#endif
#pragma pop_macro("__DEVICE__")
diff --git a/clang/lib/Headers/__clang_hip_runtime_wrapper.h b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
index 81a16a265ae8..73021d256cba 100644
--- a/clang/lib/Headers/__clang_hip_runtime_wrapper.h
+++ b/clang/lib/Headers/__clang_hip_runtime_wrapper.h
@@ -18,52 +18,107 @@
#if __HIP__
-#include <cmath>
-#include <cstdlib>
-#include <stdlib.h>
-
#define __host__ __attribute__((host))
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
+#define __managed__ __attribute__((managed))
#if !defined(__cplusplus) || __cplusplus < 201103L
#define nullptr NULL;
#endif
+#ifdef __cplusplus
+extern "C" {
+ __attribute__((__visibility__("default")))
+ __attribute__((weak))
+ __attribute__((noreturn))
+ __device__ void __cxa_pure_virtual(void) {
+ __builtin_trap();
+ }
+ __attribute__((__visibility__("default")))
+ __attribute__((weak))
+ __attribute__((noreturn))
+ __device__ void __cxa_deleted_virtual(void) {
+ __builtin_trap();
+ }
+}
+#endif //__cplusplus
+
+#if !defined(__HIPCC_RTC__)
+#include <cmath>
+#include <cstdlib>
+#include <stdlib.h>
+#else
+typedef __SIZE_TYPE__ size_t;
+// Define macros which are needed to declare HIP device API's without standard
+// C/C++ headers. This is for readability so that these API's can be written
+// the same way as non-hipRTC use case. These macros need to be popped so that
+// they do not pollute users' name space.
+#pragma push_macro("NULL")
+#pragma push_macro("uint32_t")
+#pragma push_macro("uint64_t")
+#pragma push_macro("CHAR_BIT")
+#pragma push_macro("INT_MAX")
+#define NULL (void *)0
+#define uint32_t __UINT32_TYPE__
+#define uint64_t __UINT64_TYPE__
+#define CHAR_BIT __CHAR_BIT__
+#define INT_MAX __INTMAX_MAX__
+#endif // __HIPCC_RTC__
+
+typedef __SIZE_TYPE__ __hip_size_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif //__cplusplus
+
#if __HIP_ENABLE_DEVICE_MALLOC__
-extern "C" __device__ void *__hip_malloc(size_t __size);
-extern "C" __device__ void *__hip_free(void *__ptr);
-static inline __device__ void *malloc(size_t __size) {
+__device__ void *__hip_malloc(__hip_size_t __size);
+__device__ void *__hip_free(void *__ptr);
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
return __hip_malloc(__size);
}
-static inline __device__ void *free(void *__ptr) { return __hip_free(__ptr); }
+__attribute__((weak)) inline __device__ void *free(void *__ptr) {
+ return __hip_free(__ptr);
+}
#else
-static inline __device__ void *malloc(size_t __size) {
+__attribute__((weak)) inline __device__ void *malloc(__hip_size_t __size) {
__builtin_trap();
- return nullptr;
+ return (void *)0;
}
-static inline __device__ void *free(void *__ptr) {
+__attribute__((weak)) inline __device__ void *free(void *__ptr) {
__builtin_trap();
- return nullptr;
+ return (void *)0;
}
#endif
+#ifdef __cplusplus
+} // extern "C"
+#endif //__cplusplus
+
#include <__clang_hip_libdevice_declares.h>
#include <__clang_hip_math.h>
-#if !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#if defined(__HIPCC_RTC__)
+#include <__clang_hip_cmath.h>
+#else
#include <__clang_cuda_math_forward_declares.h>
#include <__clang_hip_cmath.h>
#include <__clang_cuda_complex_builtins.h>
-
#include <algorithm>
#include <complex>
#include <new>
-#endif // !_OPENMP || __HIP_ENABLE_CUDA_WRAPPER_FOR_OPENMP__
+#endif // __HIPCC_RTC__
#define __CLANG_HIP_RUNTIME_WRAPPER_INCLUDED__ 1
-
+#if defined(__HIPCC_RTC__)
+#pragma pop_macro("NULL")
+#pragma pop_macro("uint32_t")
+#pragma pop_macro("uint64_t")
+#pragma pop_macro("CHAR_BIT")
+#pragma pop_macro("INT_MAX")
+#endif // __HIPCC_RTC__
#endif // __HIP__
#endif // __CLANG_HIP_RUNTIME_WRAPPER_H__
diff --git a/clang/lib/Headers/altivec.h b/clang/lib/Headers/altivec.h
index 4d50d47d51b5..0dd8c859366b 100644
--- a/clang/lib/Headers/altivec.h
+++ b/clang/lib/Headers/altivec.h
@@ -41,9 +41,7 @@
#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
-#ifdef __POWER9_VECTOR__
#include <stddef.h>
-#endif
static __inline__ vector signed char __ATTRS_o_ai vec_perm(
vector signed char __a, vector signed char __b, vector unsigned char __c);
@@ -126,7 +124,7 @@ vec_abs(vector signed int __a) {
return __builtin_altivec_vmaxsw(__a, -__a);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
static __inline__ vector signed long long __ATTRS_o_ai
vec_abs(vector signed long long __a) {
return __builtin_altivec_vmaxsd(__a, -__a);
@@ -284,7 +282,7 @@ vec_add(vector unsigned int __a, vector bool int __b) {
return __a + (vector unsigned int)__b;
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
static __inline__ vector signed long long __ATTRS_o_ai
vec_add(vector signed long long __a, vector signed long long __b) {
return __a + __b;
@@ -295,6 +293,7 @@ vec_add(vector unsigned long long __a, vector unsigned long long __b) {
return __a + __b;
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_add(vector signed __int128 __a, vector signed __int128 __b) {
return __a + __b;
@@ -304,7 +303,37 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a + __b;
}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#endif
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vadduqm(__a, __b);
+}
+#elif defined(__VSX__)
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_add(vector signed long long __a, vector signed long long __b) {
+#ifdef __LITTLE_ENDIAN__
+ // Little endian systems on CPU's prior to Power8 don't really exist
+ // so scalarizing is fine.
+ return __a + __b;
+#else
+ vector unsigned int __res =
+ (vector unsigned int)__a + (vector unsigned int)__b;
+ vector unsigned int __carry = __builtin_altivec_vaddcuw(
+ (vector unsigned int)__a, (vector unsigned int)__b);
+ __carry = __builtin_shufflevector((vector unsigned char)__carry,
+ (vector unsigned char)__carry, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
+ return (vector signed long long)(__res + __carry);
+#endif
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_add(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector unsigned long long)vec_add((vector signed long long)__a,
+ (vector signed long long)__b);
+}
+#endif // __POWER8_VECTOR__
static __inline__ vector float __ATTRS_o_ai vec_add(vector float __a,
vector float __b) {
@@ -320,7 +349,8 @@ static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
/* vec_adde */
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_adde(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
@@ -334,6 +364,13 @@ vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
}
#endif
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
+}
+#endif
+
static __inline__ vector signed int __ATTRS_o_ai
vec_adde(vector signed int __a, vector signed int __b,
vector signed int __c) {
@@ -352,7 +389,8 @@ vec_adde(vector unsigned int __a, vector unsigned int __b,
/* vec_addec */
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_addec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
@@ -364,7 +402,15 @@ vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vaddecuq(__a, __b, __c);
}
+#endif
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
+}
+
+#ifdef __powerpc64__
static __inline__ vector signed int __ATTRS_o_ai
vec_addec(vector signed int __a, vector signed int __b,
vector signed int __c) {
@@ -407,8 +453,8 @@ vec_addec(vector unsigned int __a, vector unsigned int __b,
vector unsigned int ret = { __result[0], __result[1], __result[2], __result[3] };
return ret;
}
-
-#endif
+#endif // __powerpc64__
+#endif // __POWER8_VECTOR__
/* vec_vaddubm */
@@ -534,7 +580,8 @@ vec_addc(vector unsigned int __a, vector unsigned int __b) {
return __builtin_altivec_vaddcuw(__a, __b);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_addc(vector signed __int128 __a, vector signed __int128 __b) {
return (vector signed __int128)__builtin_altivec_vaddcuq(
@@ -545,6 +592,12 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __builtin_altivec_vaddcuq(__a, __b);
}
+#endif
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
+}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
/* vec_vaddcuw */
@@ -748,7 +801,8 @@ vec_vadduws(vector unsigned int __a, vector bool int __b) {
return __builtin_altivec_vadduws(__a, (vector unsigned int)__b);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
/* vec_vadduqm */
static __inline__ vector signed __int128 __ATTRS_o_ai
@@ -1598,6 +1652,17 @@ static __inline__ vector double __ATTRS_o_ai vec_ceil(vector double __a) {
}
#endif
+/* vec_roundp */
+static __inline__ vector float __ATTRS_o_ai vec_roundp(vector float __a) {
+ return vec_ceil(__a);
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_roundp(vector double __a) {
+ return vec_ceil(__a);
+}
+#endif
+
/* vec_vrfip */
static __inline__ vector float __attribute__((__always_inline__))
@@ -1690,7 +1755,31 @@ vec_cmpeq(vector bool long long __a, vector bool long long __b) {
return (vector bool long long)__builtin_altivec_vcmpequd(
(vector long long)__a, (vector long long)__b);
}
+#elif defined(__VSX__)
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector signed long long __a, vector signed long long __b) {
+ vector bool int __wordcmp =
+ vec_cmpeq((vector signed int)__a, (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 3, 0, 1, 2);
+ return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 1,
+ 1, 3, 3);
+#else
+ __wordcmp &= __builtin_shufflevector(__wordcmp, __wordcmp, 1, 2, 3, 0);
+ return (vector bool long long)__builtin_shufflevector(__wordcmp, __wordcmp, 0,
+ 0, 2, 2);
+#endif
+}
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector unsigned long long __a, vector unsigned long long __b) {
+ return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b);
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpeq(vector bool long long __a, vector bool long long __b) {
+ return vec_cmpeq((vector signed long long)__a, (vector signed long long)__b);
+}
#endif
static __inline__ vector bool int __ATTRS_o_ai vec_cmpeq(vector float __a,
@@ -1709,7 +1798,7 @@ vec_cmpeq(vector double __a, vector double __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpequq(
@@ -1786,7 +1875,7 @@ vec_cmpne(vector float __a, vector float __b) {
(vector int)__b);
}
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
@@ -1884,6 +1973,7 @@ vec_parity_lsbb(vector signed int __a) {
return __builtin_altivec_vprtybw(__a);
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_parity_lsbb(vector unsigned __int128 __a) {
return __builtin_altivec_vprtybq(__a);
@@ -1893,6 +1983,7 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_parity_lsbb(vector signed __int128 __a) {
return __builtin_altivec_vprtybq(__a);
}
+#endif
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_parity_lsbb(vector unsigned long long __a) {
@@ -1976,6 +2067,24 @@ vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
return (vector bool long long)
~(__builtin_altivec_vcmpequd((vector long long)__a, (vector long long)__b));
}
+#elif defined(__VSX__)
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector bool long long __a, vector bool long long __b) {
+ return (vector bool long long)~(
+ vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector signed long long __a, vector signed long long __b) {
+ return (vector bool long long)~(
+ vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpne(vector unsigned long long __a, vector unsigned long long __b) {
+ return (vector bool long long)~(
+ vec_cmpeq((vector signed long long)__a, (vector signed long long)__b));
+}
#endif
#ifdef __VSX__
@@ -2028,6 +2137,46 @@ static __inline__ vector bool long long __ATTRS_o_ai
vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
return (vector bool long long)__builtin_altivec_vcmpgtud(__a, __b);
}
+#elif defined(__VSX__)
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector signed long long __a, vector signed long long __b) {
+ vector signed int __sgtw = (vector signed int)vec_cmpgt(
+ (vector signed int)__a, (vector signed int)__b);
+ vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt(
+ (vector unsigned int)__a, (vector unsigned int)__b);
+ vector unsigned int __eqw = (vector unsigned int)vec_cmpeq(
+ (vector signed int)__a, (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw;
+ __sgtw |= (vector signed int)__ugtw;
+ return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 1, 1, 3,
+ 3);
+#else
+ __ugtw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw;
+ __sgtw |= (vector signed int)__ugtw;
+ return (vector bool long long)__builtin_shufflevector(__sgtw, __sgtw, 0, 0, 2,
+ 2);
+#endif
+}
+
+static __inline__ vector bool long long __ATTRS_o_ai
+vec_cmpgt(vector unsigned long long __a, vector unsigned long long __b) {
+ vector unsigned int __ugtw = (vector unsigned int)vec_cmpgt(
+ (vector unsigned int)__a, (vector unsigned int)__b);
+ vector unsigned int __eqw = (vector unsigned int)vec_cmpeq(
+ (vector signed int)__a, (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __eqw = __builtin_shufflevector(__ugtw, __ugtw, 3, 0, 1, 2) & __eqw;
+ __ugtw |= __eqw;
+ return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 1, 1, 3,
+ 3);
+#else
+ __eqw = __builtin_shufflevector(__ugtw, __ugtw, 1, 2, 3, 0) & __eqw;
+ __ugtw |= __eqw;
+ return (vector bool long long)__builtin_shufflevector(__ugtw, __ugtw, 0, 0, 2,
+ 2);
+#endif
+}
#endif
static __inline__ vector bool int __ATTRS_o_ai vec_cmpgt(vector float __a,
@@ -2046,7 +2195,7 @@ vec_cmpgt(vector double __a, vector double __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpgtsq(
@@ -2106,9 +2255,7 @@ static __inline__ vector bool long long __ATTRS_o_ai
vec_cmpge(vector double __a, vector double __b) {
return (vector bool long long)__builtin_vsx_xvcmpgedp(__a, __b);
}
-#endif
-#ifdef __POWER8_VECTOR__
static __inline__ vector bool long long __ATTRS_o_ai
vec_cmpge(vector signed long long __a, vector signed long long __b) {
return ~(vec_cmpgt(__b, __a));
@@ -2120,7 +2267,7 @@ vec_cmpge(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpge(vector signed __int128 __a, vector signed __int128 __b) {
return ~(vec_cmpgt(__b, __a));
@@ -2230,9 +2377,7 @@ static __inline__ vector bool long long __ATTRS_o_ai
vec_cmple(vector double __a, vector double __b) {
return vec_cmpge(__b, __a);
}
-#endif
-#ifdef __POWER8_VECTOR__
static __inline__ vector bool long long __ATTRS_o_ai
vec_cmple(vector signed long long __a, vector signed long long __b) {
return vec_cmpge(__b, __a);
@@ -2244,7 +2389,7 @@ vec_cmple(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmple(vector signed __int128 __a, vector signed __int128 __b) {
return vec_cmpge(__b, __a);
@@ -2300,7 +2445,7 @@ vec_cmplt(vector double __a, vector double __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmplt(vector signed __int128 __a, vector signed __int128 __b) {
return vec_cmpgt(__b, __a);
@@ -2312,7 +2457,7 @@ vec_cmplt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
}
#endif
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ vector bool long long __ATTRS_o_ai
vec_cmplt(vector signed long long __a, vector signed long long __b) {
return vec_cmpgt(__b, __a);
@@ -2322,7 +2467,9 @@ static __inline__ vector bool long long __ATTRS_o_ai
vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
return vec_cmpgt(__b, __a);
}
+#endif
+#ifdef __POWER8_VECTOR__
/* vec_popcnt */
static __inline__ vector signed char __ATTRS_o_ai
@@ -2358,6 +2505,7 @@ vec_popcnt(vector unsigned long long __a) {
return __builtin_altivec_vpopcntd(__a);
}
+#define vec_vclz vec_cntlz
/* vec_cntlz */
static __inline__ vector signed char __ATTRS_o_ai
@@ -2870,6 +3018,7 @@ static __inline__ vector float __ATTRS_o_ai vec_xl_len(const float *__a, size_t
return (vector float)__builtin_vsx_lxvl(__a, (__b << 56));
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_len(const signed __int128 *__a, size_t __b) {
return (vector signed __int128)__builtin_vsx_lxvl(__a, (__b << 56));
@@ -2879,6 +3028,7 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_xl_len(const unsigned __int128 *__a, size_t __b) {
return (vector unsigned __int128)__builtin_vsx_lxvl(__a, (__b << 56));
}
+#endif
static __inline__ vector signed long long __ATTRS_o_ai
vec_xl_len(const signed long long *__a, size_t __b) {
@@ -2946,6 +3096,7 @@ static __inline__ void __ATTRS_o_ai vec_xst_len(vector float __a, float *__b,
return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
}
+#ifdef __SIZEOF_INT128__
static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed __int128 __a,
signed __int128 *__b,
size_t __c) {
@@ -2957,6 +3108,7 @@ static __inline__ void __ATTRS_o_ai vec_xst_len(vector unsigned __int128 __a,
size_t __c) {
return __builtin_vsx_stxvl((vector int)__a, __b, (__c << 56));
}
+#endif
static __inline__ void __ATTRS_o_ai vec_xst_len(vector signed long long __a,
signed long long *__b,
@@ -2991,6 +3143,15 @@ static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
#endif
#endif
+#if defined(__POWER9_VECTOR__) && defined(__powerpc64__)
+#define __vec_ldrmb(PTR, CNT) vec_xl_len_r((const unsigned char *)(PTR), (CNT))
+#define __vec_strmb(PTR, CNT, VAL) \
+ vec_xst_len_r((VAL), (unsigned char *)(PTR), (CNT))
+#else
+#define __vec_ldrmb __builtin_vsx_ldrmb
+#define __vec_strmb __builtin_vsx_strmb
+#endif
+
/* vec_cpsgn */
#ifdef __VSX__
@@ -3008,6 +3169,23 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
/* vec_ctf */
#ifdef __VSX__
+// There are some functions that have different signatures with the XL compiler
+// from those in Clang/GCC and documented in the PVIPR. This macro ensures that
+// the XL-compatible signatures are used for those functions.
+#ifdef __XL_COMPAT_ALTIVEC__
+#define vec_ctf(__a, __b) \
+ _Generic((__a), vector int \
+ : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
+ vector unsigned int \
+ : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+ (__b)), \
+ vector unsigned long long \
+ : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
+ vector signed long long \
+ : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+#else // __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
_Generic((__a), vector int \
: (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
@@ -3024,6 +3202,7 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
vector double) * \
(vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
<< 52)))
+#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
_Generic((__a), vector int \
@@ -3033,10 +3212,32 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
(__b)))
#endif
+/* vec_ctd */
+#ifdef __VSX__
+#define vec_ctd(__a, __b) \
+ _Generic((__a), vector signed int \
+ : (vec_doublee((vector signed int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
+ << 52)), \
+ vector unsigned int \
+ : (vec_doublee((vector unsigned int)(__a)) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
+ << 52)), \
+ vector unsigned long long \
+ : (__builtin_convertvector((vector unsigned long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
+ << 52)), \
+ vector signed long long \
+ : (__builtin_convertvector((vector signed long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
+ << 52)))
+#endif // __VSX__
+
/* vec_vcfsx */
#define vec_vcfux __builtin_altivec_vcfux
-
/* vec_vcfux */
#define vec_vcfsx(__a, __b) __builtin_altivec_vcfsx((vector int)(__a), (__b))
@@ -3044,6 +3245,19 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
/* vec_cts */
#ifdef __VSX__
+#ifdef __XL_COMPAT_ALTIVEC__
+#define vec_cts(__a, __b) \
+ _Generic((__a), vector float \
+ : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
+ << 52); \
+ __builtin_vsx_xvcvdpsxws(__ret); \
+ }))
+#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
_Generic((__a), vector float \
: __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
@@ -3055,6 +3269,7 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
<< 52); \
__builtin_convertvector(__ret, vector signed long long); \
}))
+#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_cts __builtin_altivec_vctsxs
#endif
@@ -3066,6 +3281,19 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
/* vec_ctu */
#ifdef __VSX__
+#ifdef __XL_COMPAT_ALTIVEC__
+#define vec_ctu(__a, __b) \
+ _Generic((__a), vector float \
+ : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ << 52); \
+ __builtin_vsx_xvcvdpuxws(__ret); \
+ }))
+#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
_Generic((__a), vector float \
: __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
@@ -3077,10 +3305,95 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
<< 52); \
__builtin_convertvector(__ret, vector unsigned long long); \
}))
+#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctu __builtin_altivec_vctuxs
#endif
+#ifdef __LITTLE_ENDIAN__
+/* vec_ctsl */
+
+#ifdef __VSX__
+#define vec_ctsl(__a, __b) \
+ _Generic((__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ __builtin_vsx_xvcvspsxds( \
+ __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ << 52); \
+ __builtin_convertvector(__ret, vector signed long long); \
+ }))
+
+/* vec_ctul */
+
+#define vec_ctul(__a, __b) \
+ _Generic((__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ __builtin_vsx_xvcvspuxds( \
+ __builtin_vsx_xxsldwi(__ret, __ret, 1)); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ << 52); \
+ __builtin_convertvector(__ret, vector unsigned long long); \
+ }))
+#endif
+#else // __LITTLE_ENDIAN__
+/* vec_ctsl */
+
+#ifdef __VSX__
+#define vec_ctsl(__a, __b) \
+ _Generic((__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ __builtin_vsx_xvcvspsxds(__ret); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ << 52); \
+ __builtin_convertvector(__ret, vector signed long long); \
+ }))
+
+/* vec_ctul */
+
+#define vec_ctul(__a, __b) \
+ _Generic((__a), vector float \
+ : __extension__({ \
+ vector float __ret = \
+ (vector float)(__a) * \
+ (vector float)(vector unsigned)((0x7f + (__b)) << 23); \
+ __builtin_vsx_xvcvspuxds(__ret); \
+ }), \
+ vector double \
+ : __extension__({ \
+ vector double __ret = \
+ (vector double)(__a) * \
+ (vector double)(vector unsigned long long)((0x3ffULL + __b) \
+ << 52); \
+ __builtin_convertvector(__ret, vector unsigned long long); \
+ }))
+#endif
+#endif // __LITTLE_ENDIAN__
+
/* vec_vctuxs */
#define vec_vctuxs __builtin_altivec_vctuxs
@@ -3114,7 +3427,7 @@ vec_signextll(vector signed int __a) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_signextq(vector signed long long __a) {
return __builtin_altivec_vextsd2q(__a);
@@ -3399,6 +3712,15 @@ vec_doubleo(vector float __a) {
return __builtin_vsx_xvcvspdp(vec_sld(__a, __a, 4));
#endif
}
+
+/* vec_cvf */
+static __inline__ vector double __ATTRS_o_ai vec_cvf(vector float __a) {
+ return vec_doublee(__a);
+}
+
+static __inline__ vector float __ATTRS_o_ai vec_cvf(vector double __a) {
+ return vec_floate(__a);
+}
#endif
/* vec_div */
@@ -3481,6 +3803,7 @@ vec_dive(vector unsigned long long __a, vector unsigned long long __b) {
return __builtin_altivec_vdiveud(__a, __b);
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_dive(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __builtin_altivec_vdiveuq(__a, __b);
@@ -3491,8 +3814,9 @@ vec_dive(vector signed __int128 __a, vector signed __int128 __b) {
return __builtin_altivec_vdivesq(__a, __b);
}
#endif
+#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_div(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a / __b;
@@ -3661,6 +3985,17 @@ static __inline__ vector double __ATTRS_o_ai vec_floor(vector double __a) {
}
#endif
+/* vec_roundm */
+static __inline__ vector float __ATTRS_o_ai vec_roundm(vector float __a) {
+ return vec_floor(__a);
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_roundm(vector double __a) {
+ return vec_floor(__a);
+}
+#endif
+
/* vec_vrfim */
static __inline__ vector float __attribute__((__always_inline__))
@@ -3671,251 +4006,251 @@ vec_vrfim(vector float __a) {
/* vec_ld */
static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const vector signed char *__b) {
+vec_ld(long __a, const vector signed char *__b) {
return (vector signed char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector signed char __ATTRS_o_ai
-vec_ld(int __a, const signed char *__b) {
+vec_ld(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned char *__b) {
+vec_ld(long __a, const vector unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ld(int __a, const unsigned char *__b) {
+vec_ld(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool char __ATTRS_o_ai
-vec_ld(int __a, const vector bool char *__b) {
+vec_ld(long __a, const vector bool char *__b) {
return (vector bool char)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_ld(long __a,
const vector short *__b) {
return (vector short)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_ld(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ld(long __a, const short *__b) {
return (vector short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned short *__b) {
+vec_ld(long __a, const vector unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ld(int __a, const unsigned short *__b) {
+vec_ld(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool short __ATTRS_o_ai
-vec_ld(int __a, const vector bool short *__b) {
+vec_ld(long __a, const vector bool short *__b) {
return (vector bool short)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector pixel __ATTRS_o_ai vec_ld(int __a,
+static __inline__ vector pixel __ATTRS_o_ai vec_ld(long __a,
const vector pixel *__b) {
return (vector pixel)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a,
+static __inline__ vector int __ATTRS_o_ai vec_ld(long __a,
const vector int *__b) {
return (vector int)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_ld(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ld(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const vector unsigned int *__b) {
+vec_ld(long __a, const vector unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ld(int __a, const unsigned int *__b) {
+vec_ld(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool int __ATTRS_o_ai
-vec_ld(int __a, const vector bool int *__b) {
+vec_ld(long __a, const vector bool int *__b) {
return (vector bool int)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_ld(long __a,
const vector float *__b) {
return (vector float)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_ld(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ld(long __a, const float *__b) {
return (vector float)__builtin_altivec_lvx(__a, __b);
}
/* vec_lvx */
static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const vector signed char *__b) {
+vec_lvx(long __a, const vector signed char *__b) {
return (vector signed char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector signed char __ATTRS_o_ai
-vec_lvx(int __a, const signed char *__b) {
+vec_lvx(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned char *__b) {
+vec_lvx(long __a, const vector unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvx(int __a, const unsigned char *__b) {
+vec_lvx(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool char __ATTRS_o_ai
-vec_lvx(int __a, const vector bool char *__b) {
+vec_lvx(long __a, const vector bool char *__b) {
return (vector bool char)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a,
const vector short *__b) {
return (vector short)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_lvx(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lvx(long __a, const short *__b) {
return (vector short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned short *__b) {
+vec_lvx(long __a, const vector unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvx(int __a, const unsigned short *__b) {
+vec_lvx(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool short __ATTRS_o_ai
-vec_lvx(int __a, const vector bool short *__b) {
+vec_lvx(long __a, const vector bool short *__b) {
return (vector bool short)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector pixel __ATTRS_o_ai vec_lvx(int __a,
+static __inline__ vector pixel __ATTRS_o_ai vec_lvx(long __a,
const vector pixel *__b) {
return (vector pixel)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a,
+static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a,
const vector int *__b) {
return (vector int)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_lvx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvx(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const vector unsigned int *__b) {
+vec_lvx(long __a, const vector unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvx(int __a, const unsigned int *__b) {
+vec_lvx(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvx(__a, __b);
}
static __inline__ vector bool int __ATTRS_o_ai
-vec_lvx(int __a, const vector bool int *__b) {
+vec_lvx(long __a, const vector bool int *__b) {
return (vector bool int)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a,
const vector float *__b) {
return (vector float)__builtin_altivec_lvx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lvx(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lvx(long __a, const float *__b) {
return (vector float)__builtin_altivec_lvx(__a, __b);
}
/* vec_lde */
static __inline__ vector signed char __ATTRS_o_ai
-vec_lde(int __a, const signed char *__b) {
+vec_lde(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvebx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lde(int __a, const unsigned char *__b) {
+vec_lde(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_lde(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_lde(long __a, const short *__b) {
return (vector short)__builtin_altivec_lvehx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lde(int __a, const unsigned short *__b) {
+vec_lde(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_lde(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lde(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvewx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lde(int __a, const unsigned int *__b) {
+vec_lde(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lde(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_lde(long __a, const float *__b) {
return (vector float)__builtin_altivec_lvewx(__a, __b);
}
/* vec_lvebx */
static __inline__ vector signed char __ATTRS_o_ai
-vec_lvebx(int __a, const signed char *__b) {
+vec_lvebx(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvebx(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvebx(int __a, const unsigned char *__b) {
+vec_lvebx(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvebx(__a, __b);
}
/* vec_lvehx */
-static __inline__ vector short __ATTRS_o_ai vec_lvehx(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_lvehx(long __a,
const short *__b) {
return (vector short)__builtin_altivec_lvehx(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvehx(int __a, const unsigned short *__b) {
+vec_lvehx(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvehx(__a, __b);
}
/* vec_lvewx */
-static __inline__ vector int __ATTRS_o_ai vec_lvewx(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvewx(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvewx(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvewx(int __a, const unsigned int *__b) {
+vec_lvewx(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvewx(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_lvewx(long __a,
const float *__b) {
return (vector float)__builtin_altivec_lvewx(__a, __b);
}
@@ -3923,179 +4258,179 @@ static __inline__ vector float __ATTRS_o_ai vec_lvewx(int __a,
/* vec_ldl */
static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const vector signed char *__b) {
+vec_ldl(long __a, const vector signed char *__b) {
return (vector signed char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector signed char __ATTRS_o_ai
-vec_ldl(int __a, const signed char *__b) {
+vec_ldl(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned char *__b) {
+vec_ldl(long __a, const vector unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_ldl(int __a, const unsigned char *__b) {
+vec_ldl(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool char __ATTRS_o_ai
-vec_ldl(int __a, const vector bool char *__b) {
+vec_ldl(long __a, const vector bool char *__b) {
return (vector bool char)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a,
const vector short *__b) {
return (vector short)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_ldl(int __a, const short *__b) {
+static __inline__ vector short __ATTRS_o_ai vec_ldl(long __a, const short *__b) {
return (vector short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned short *__b) {
+vec_ldl(long __a, const vector unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_ldl(int __a, const unsigned short *__b) {
+vec_ldl(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool short __ATTRS_o_ai
-vec_ldl(int __a, const vector bool short *__b) {
+vec_ldl(long __a, const vector bool short *__b) {
return (vector bool short)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector pixel __ATTRS_o_ai vec_ldl(int __a,
+static __inline__ vector pixel __ATTRS_o_ai vec_ldl(long __a,
const vector pixel *__b) {
return (vector pixel short)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a,
+static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a,
const vector int *__b) {
return (vector int)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_ldl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_ldl(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const vector unsigned int *__b) {
+vec_ldl(long __a, const vector unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_ldl(int __a, const unsigned int *__b) {
+vec_ldl(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool int __ATTRS_o_ai
-vec_ldl(int __a, const vector bool int *__b) {
+vec_ldl(long __a, const vector bool int *__b) {
return (vector bool int)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a,
const vector float *__b) {
return (vector float)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_ldl(int __a, const float *__b) {
+static __inline__ vector float __ATTRS_o_ai vec_ldl(long __a, const float *__b) {
return (vector float)__builtin_altivec_lvxl(__a, __b);
}
/* vec_lvxl */
static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const vector signed char *__b) {
+vec_lvxl(long __a, const vector signed char *__b) {
return (vector signed char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector signed char __ATTRS_o_ai
-vec_lvxl(int __a, const signed char *__b) {
+vec_lvxl(long __a, const signed char *__b) {
return (vector signed char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned char *__b) {
+vec_lvxl(long __a, const vector unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned char *__b) {
+vec_lvxl(long __a, const unsigned char *__b) {
return (vector unsigned char)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool char __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool char *__b) {
+vec_lvxl(long __a, const vector bool char *__b) {
return (vector bool char)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a,
const vector short *__b) {
return (vector short)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector short __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector short __ATTRS_o_ai vec_lvxl(long __a,
const short *__b) {
return (vector short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned short *__b) {
+vec_lvxl(long __a, const vector unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned short *__b) {
+vec_lvxl(long __a, const unsigned short *__b) {
return (vector unsigned short)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool short __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool short *__b) {
+vec_lvxl(long __a, const vector bool short *__b) {
return (vector bool short)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector pixel __ATTRS_o_ai vec_lvxl(long __a,
const vector pixel *__b) {
return (vector pixel)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a,
const vector int *__b) {
return (vector int)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector int __ATTRS_o_ai vec_lvxl(int __a, const int *__b) {
+static __inline__ vector int __ATTRS_o_ai vec_lvxl(long __a, const int *__b) {
return (vector int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const vector unsigned int *__b) {
+vec_lvxl(long __a, const vector unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
-vec_lvxl(int __a, const unsigned int *__b) {
+vec_lvxl(long __a, const unsigned int *__b) {
return (vector unsigned int)__builtin_altivec_lvxl(__a, __b);
}
static __inline__ vector bool int __ATTRS_o_ai
-vec_lvxl(int __a, const vector bool int *__b) {
+vec_lvxl(long __a, const vector bool int *__b) {
return (vector bool int)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a,
const vector float *__b) {
return (vector float)__builtin_altivec_lvxl(__a, __b);
}
-static __inline__ vector float __ATTRS_o_ai vec_lvxl(int __a,
+static __inline__ vector float __ATTRS_o_ai vec_lvxl(long __a,
const float *__b) {
return (vector float)__builtin_altivec_lvxl(__a, __b);
}
@@ -5695,7 +6030,7 @@ vec_msum(vector unsigned short __a, vector unsigned short __b,
/* vec_msumc */
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_msumc(vector unsigned long long __a, vector unsigned long long __b,
vector unsigned __int128 __c) {
@@ -5929,7 +6264,7 @@ vec_mule(vector unsigned int __a, vector unsigned int __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_mule(vector signed long long __a, vector signed long long __b) {
#ifdef __LITTLE_ENDIAN__
@@ -6075,7 +6410,7 @@ vec_mulo(vector unsigned int __a, vector unsigned int __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_mulo(vector signed long long __a, vector signed long long __b) {
#ifdef __LITTLE_ENDIAN__
@@ -7927,7 +8262,7 @@ vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
@@ -7954,7 +8289,7 @@ vec_rlmi(vector unsigned long long __a, vector unsigned long long __b,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
@@ -7985,7 +8320,7 @@ vec_rlnm(vector unsigned long long __a, vector unsigned long long __b,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_rlnm(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
@@ -8060,11 +8395,7 @@ vec_vrlw(vector unsigned int __a, vector unsigned int __b) {
/* vec_round */
static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
-#ifdef __VSX__
- return __builtin_vsx_xvrspi(__a);
-#else
return __builtin_altivec_vrfin(__a);
-#endif
}
#ifdef __VSX__
@@ -8082,6 +8413,16 @@ static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a) {
return __builtin_vsx_xvrdpic(__a);
}
+/* vec_roundc */
+
+static __inline__ vector float __ATTRS_o_ai vec_roundc(vector float __a) {
+ return __builtin_vsx_xvrspic(__a);
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_roundc(vector double __a) {
+ return __builtin_vsx_xvrdpic(__a);
+}
+
/* vec_nearbyint */
static __inline__ vector float __ATTRS_o_ai vec_nearbyint(vector float __a) {
@@ -8128,6 +8469,16 @@ static __inline__ vector double __ATTRS_o_ai vec_rsqrte(vector double __a) {
}
#endif
+static vector float __ATTRS_o_ai vec_rsqrt(vector float __a) {
+ return __builtin_ppc_rsqrtf(__a);
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_rsqrt(vector double __a) {
+ return __builtin_ppc_rsqrtd(__a);
+}
+#endif
+
/* vec_vrsqrtefp */
static __inline__ __vector float __attribute__((__always_inline__))
@@ -8488,6 +8839,52 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sl(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sl((vector unsigned long long)__a, __b);
}
+#else
+static __inline__ vector unsigned char __ATTRS_o_ai
+vec_vspltb(vector unsigned char __a, unsigned char __b);
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sl(vector unsigned long long __a, vector unsigned long long __b) {
+ __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
+
+ // Big endian element one (the right doubleword) can be left shifted as-is.
+ // The other element needs to be swapped into the right doubleword and
+ // shifted. Then the right doublewords of the two result vectors are merged.
+ vector signed long long __rightelt =
+ (vector signed long long)__builtin_altivec_vslo((vector signed int)__a,
+ (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __rightelt = (vector signed long long)__builtin_altivec_vsl(
+ (vector signed int)__rightelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
+#else
+ __rightelt = (vector signed long long)__builtin_altivec_vsl(
+ (vector signed int)__rightelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
+#endif
+ __a = __builtin_shufflevector(__a, __a, 1, 0);
+ __b = __builtin_shufflevector(__b, __b, 1, 0);
+ vector signed long long __leftelt =
+ (vector signed long long)__builtin_altivec_vslo((vector signed int)__a,
+ (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __leftelt = (vector signed long long)__builtin_altivec_vsl(
+ (vector signed int)__leftelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
+ return (vector unsigned long long)__builtin_shufflevector(__rightelt,
+ __leftelt, 0, 2);
+#else
+ __leftelt = (vector signed long long)__builtin_altivec_vsl(
+ (vector signed int)__leftelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
+ return (vector unsigned long long)__builtin_shufflevector(__leftelt,
+ __rightelt, 1, 3);
+#endif
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_sl(vector long long __a, vector unsigned long long __b) {
+ return (vector long long)vec_sl((vector unsigned long long)__a, __b);
+}
#endif
/* vec_vslb */
@@ -8850,6 +9247,11 @@ static __inline__ vector unsigned int __ATTRS_o_ai vec_sldw(
return vec_sld(__a, __b, ((__c << 2) & 0x0F));
}
+static __inline__ vector float __ATTRS_o_ai vec_sldw(
+ vector float __a, vector float __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
+
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
vec_sldw(vector signed long long __a, vector signed long long __b,
@@ -8862,6 +9264,11 @@ vec_sldw(vector unsigned long long __a, vector unsigned long long __b,
unsigned const int __c) {
return vec_sld(__a, __b, ((__c << 2) & 0x0F));
}
+
+static __inline__ vector double __ATTRS_o_ai vec_sldw(
+ vector double __a, vector double __b, unsigned const int __c) {
+ return vec_sld(__a, __b, ((__c << 2) & 0x0F));
+}
#endif
#ifdef __POWER9_VECTOR__
@@ -9943,6 +10350,50 @@ static __inline__ vector long long __ATTRS_o_ai
vec_sr(vector long long __a, vector unsigned long long __b) {
return (vector long long)vec_sr((vector unsigned long long)__a, __b);
}
+#else
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sr(vector unsigned long long __a, vector unsigned long long __b) {
+ __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
+
+ // Big endian element zero (the left doubleword) can be right shifted as-is.
+ // However the shift amount must be in the right doubleword.
+ // The other element needs to be swapped into the left doubleword and
+ // shifted. Then the left doublewords of the two result vectors are merged.
+ vector unsigned long long __swapshift =
+ __builtin_shufflevector(__b, __b, 1, 0);
+ vector unsigned long long __leftelt =
+ (vector unsigned long long)__builtin_altivec_vsro(
+ (vector signed int)__a, (vector signed int)__swapshift);
+#ifdef __LITTLE_ENDIAN__
+ __leftelt = (vector unsigned long long)__builtin_altivec_vsr(
+ (vector signed int)__leftelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 0));
+#else
+ __leftelt = (vector unsigned long long)__builtin_altivec_vsr(
+ (vector signed int)__leftelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__swapshift, 15));
+#endif
+ __a = __builtin_shufflevector(__a, __a, 1, 0);
+ vector unsigned long long __rightelt =
+ (vector unsigned long long)__builtin_altivec_vsro((vector signed int)__a,
+ (vector signed int)__b);
+#ifdef __LITTLE_ENDIAN__
+ __rightelt = (vector unsigned long long)__builtin_altivec_vsr(
+ (vector signed int)__rightelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 0));
+ return __builtin_shufflevector(__rightelt, __leftelt, 1, 3);
+#else
+ __rightelt = (vector unsigned long long)__builtin_altivec_vsr(
+ (vector signed int)__rightelt,
+ (vector signed int)vec_vspltb((vector unsigned char)__b, 15));
+ return __builtin_shufflevector(__leftelt, __rightelt, 0, 2);
+#endif
+}
+
+static __inline__ vector long long __ATTRS_o_ai
+vec_sr(vector long long __a, vector unsigned long long __b) {
+ return (vector long long)vec_sr((vector unsigned long long)__a, __b);
+}
#endif
/* vec_vsrb */
@@ -10029,6 +10480,18 @@ static __inline__ vector unsigned long long __ATTRS_o_ai
vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
return (vector unsigned long long)((vector signed long long)__a >> __b);
}
+#else
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_sra(vector signed long long __a, vector unsigned long long __b) {
+ __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
+ return __a >> __b;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_sra(vector unsigned long long __a, vector unsigned long long __b) {
+ __b %= (vector unsigned long long)(sizeof(unsigned long long) * __CHAR_BIT__);
+ return (vector unsigned long long)((vector signed long long)__a >> __b);
+}
#endif
/* vec_vsrab */
@@ -10635,420 +11098,420 @@ static __inline__ vector float __ATTRS_o_ai vec_vsro(vector float __a,
/* vec_st */
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b,
vector signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector signed char __a, long __b,
signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b,
vector unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool char __a, long __b,
vector bool char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b,
vector short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector short __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b,
vector unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool short __a, long __b,
vector bool short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector pixel __a, long __b,
vector pixel *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b,
vector int *__c) {
__builtin_altivec_stvx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_st(vector int __a, long __b, int *__c) {
__builtin_altivec_stvx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b,
vector unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector unsigned int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector bool int __a, long __b,
vector bool int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b,
vector float *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_st(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_st(vector float __a, long __b,
float *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
/* vec_stvx */
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b,
vector signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector signed char __a, long __b,
signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b,
vector unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
signed char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool char __a, long __b,
vector bool char *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b,
vector short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector short __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b,
vector unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool short __a, long __b,
vector bool short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector pixel __a, long __b,
vector pixel *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b,
vector int *__c) {
__builtin_altivec_stvx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector int __a, long __b,
int *__c) {
__builtin_altivec_stvx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b,
vector unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector unsigned int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector bool int __a, long __b,
vector bool int *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b,
vector float *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvx(vector float __a, long __b,
float *__c) {
__builtin_altivec_stvx((vector int)__a, __b, __c);
}
/* vec_ste */
-static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector signed char __a, long __b,
signed char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b,
signed char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector short __a, long __b,
short *__c) {
__builtin_altivec_stvehx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b,
short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b,
short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector pixel __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, int __b, int *__c) {
+static __inline__ void __ATTRS_o_ai vec_ste(vector int __a, long __b, int *__c) {
__builtin_altivec_stvewx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector unsigned int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b,
int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector bool int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_ste(vector float __a, long __b,
float *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
/* vec_stvebx */
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector signed char __a, long __b,
signed char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
static __inline__ void __ATTRS_o_ai vec_stvebx(vector unsigned char __a,
- int __b, unsigned char *__c) {
+ long __b, unsigned char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b,
signed char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvebx(vector bool char __a, long __b,
unsigned char *__c) {
__builtin_altivec_stvebx((vector char)__a, __b, __c);
}
/* vec_stvehx */
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector short __a, long __b,
short *__c) {
__builtin_altivec_stvehx(__a, __b, __c);
}
static __inline__ void __ATTRS_o_ai vec_stvehx(vector unsigned short __a,
- int __b, unsigned short *__c) {
+ long __b, unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b,
short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector bool short __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b,
short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvehx(vector pixel __a, long __b,
unsigned short *__c) {
__builtin_altivec_stvehx((vector short)__a, __b, __c);
}
/* vec_stvewx */
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector int __a, long __b,
int *__c) {
__builtin_altivec_stvewx(__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector unsigned int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b,
int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector bool int __a, long __b,
unsigned int *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
-static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, int __b,
+static __inline__ void __ATTRS_o_ai vec_stvewx(vector float __a, long __b,
float *__c) {
__builtin_altivec_stvewx((vector int)__a, __b, __c);
}
@@ -11409,7 +11872,8 @@ vec_sub(vector unsigned int __a, vector bool int __b) {
return __a - (vector unsigned int)__b;
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_sub(vector signed __int128 __a, vector signed __int128 __b) {
return __a - __b;
@@ -11419,7 +11883,8 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_sub(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a - __b;
}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__) &&
+ // defined(__SIZEOF_INT128__)
#ifdef __VSX__
static __inline__ vector signed long long __ATTRS_o_ai
@@ -11567,7 +12032,8 @@ vec_subc(vector unsigned int __a, vector unsigned int __b) {
return __builtin_altivec_vsubcuw(__a, __b);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __builtin_altivec_vsubcuq(__a, __b);
@@ -11577,7 +12043,13 @@ static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
return __builtin_altivec_vsubcuq(__a, __b);
}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#endif
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
+ return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
+}
+#endif // __POWER8_VECTOR__
/* vec_vsubcuw */
@@ -11780,9 +12252,10 @@ vec_vsubuws(vector unsigned int __a, vector bool int __b) {
return __builtin_altivec_vsubuws(__a, (vector unsigned int)__b);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
/* vec_vsubuqm */
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubuqm(vector signed __int128 __a, vector signed __int128 __b) {
return __a - __b;
@@ -11792,10 +12265,16 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a - __b;
}
+#endif
-/* vec_vsubeuqm */
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
+ return __builtin_altivec_vsubuqm(__a, __b);
+}
+/* vec_vsubeuqm */
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
@@ -11819,9 +12298,17 @@ vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vsubeuqm(__a, __b, __c);
}
+#endif
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
+}
/* vec_vsubcuq */
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
return __builtin_altivec_vsubcuq(__a, __b);
@@ -11845,7 +12332,9 @@ vec_vsubecuq(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vsubecuq(__a, __b, __c);
}
+#endif
+#ifdef __powerpc64__
static __inline__ vector signed int __ATTRS_o_ai
vec_subec(vector signed int __a, vector signed int __b,
vector signed int __c) {
@@ -11857,7 +12346,9 @@ vec_subec(vector unsigned int __a, vector unsigned int __b,
vector unsigned int __c) {
return vec_addec(__a, ~__b, __c);
}
+#endif
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
@@ -11869,7 +12360,14 @@ vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
vector unsigned __int128 __c) {
return __builtin_altivec_vsubecuq(__a, __b, __c);
}
-#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#endif
+
+static __inline__ vector unsigned char __attribute__((__always_inline__))
+vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
+ vector unsigned char __c) {
+ return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
+}
+#endif // __POWER8_VECTOR__
static __inline__ vector signed int __ATTRS_o_ai
vec_sube(vector signed int __a, vector signed int __b,
@@ -12012,6 +12510,17 @@ static __inline__ vector double __ATTRS_o_ai vec_trunc(vector double __a) {
}
#endif
+/* vec_roundz */
+static __inline__ vector float __ATTRS_o_ai vec_roundz(vector float __a) {
+ return vec_trunc(__a);
+}
+
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_roundz(vector double __a) {
+ return vec_trunc(__a);
+}
+#endif
+
/* vec_vrfiz */
static __inline__ vector float __attribute__((__always_inline__))
@@ -12023,6 +12532,13 @@ vec_vrfiz(vector float __a) {
/* The vector unpack instructions all have a big-endian bias, so for
little endian we must reverse the meanings of "high" and "low." */
+#ifdef __LITTLE_ENDIAN__
+#define vec_vupkhpx(__a) __builtin_altivec_vupklpx((vector short)(__a))
+#define vec_vupklpx(__a) __builtin_altivec_vupkhpx((vector short)(__a))
+#else
+#define vec_vupkhpx(__a) __builtin_altivec_vupkhpx((vector short)(__a))
+#define vec_vupklpx(__a) __builtin_altivec_vupklpx((vector short)(__a))
+#endif
static __inline__ vector short __ATTRS_o_ai
vec_unpackh(vector signed char __a) {
@@ -12558,6 +13074,16 @@ static __inline__ void __ATTRS_o_ai vec_vsx_st(vector unsigned char __a,
#ifdef __VSX__
#define vec_xxpermdi __builtin_vsx_xxpermdi
#define vec_xxsldwi __builtin_vsx_xxsldwi
+#define vec_permi(__a, __b, __c) \
+ _Generic((__a), vector signed long long \
+ : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \
+ (((__c)&0x1) + 2)), \
+ vector unsigned long long \
+ : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \
+ (((__c)&0x1) + 2)), \
+ vector double \
+ : __builtin_shufflevector((__a), (__b), (((__c) >> 1) & 0x1), \
+ (((__c)&0x1) + 2)))
#endif
/* vec_xor */
@@ -12915,73 +13441,75 @@ vec_vxor(vector bool long long __a, vector bool long long __b) {
/* vec_extract */
static __inline__ signed char __ATTRS_o_ai vec_extract(vector signed char __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0xf];
}
static __inline__ unsigned char __ATTRS_o_ai
-vec_extract(vector unsigned char __a, int __b) {
- return __a[__b];
+vec_extract(vector unsigned char __a, unsigned int __b) {
+ return __a[__b & 0xf];
}
static __inline__ unsigned char __ATTRS_o_ai vec_extract(vector bool char __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0xf];
}
static __inline__ signed short __ATTRS_o_ai vec_extract(vector signed short __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0x7];
}
static __inline__ unsigned short __ATTRS_o_ai
-vec_extract(vector unsigned short __a, int __b) {
- return __a[__b];
+vec_extract(vector unsigned short __a, unsigned int __b) {
+ return __a[__b & 0x7];
}
static __inline__ unsigned short __ATTRS_o_ai vec_extract(vector bool short __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0x7];
}
static __inline__ signed int __ATTRS_o_ai vec_extract(vector signed int __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0x3];
}
static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector unsigned int __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0x3];
}
static __inline__ unsigned int __ATTRS_o_ai vec_extract(vector bool int __a,
- int __b) {
- return __a[__b];
+ unsigned int __b) {
+ return __a[__b & 0x3];
}
#ifdef __VSX__
static __inline__ signed long long __ATTRS_o_ai
-vec_extract(vector signed long long __a, int __b) {
- return __a[__b];
+vec_extract(vector signed long long __a, unsigned int __b) {
+ return __a[__b & 0x1];
}
static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector unsigned long long __a, int __b) {
- return __a[__b];
+vec_extract(vector unsigned long long __a, unsigned int __b) {
+ return __a[__b & 0x1];
}
static __inline__ unsigned long long __ATTRS_o_ai
-vec_extract(vector bool long long __a, int __b) {
- return __a[__b];
+vec_extract(vector bool long long __a, unsigned int __b) {
+ return __a[__b & 0x1];
}
-static __inline__ double __ATTRS_o_ai vec_extract(vector double __a, int __b) {
- return __a[__b];
+static __inline__ double __ATTRS_o_ai vec_extract(vector double __a,
+ unsigned int __b) {
+ return __a[__b & 0x1];
}
#endif
-static __inline__ float __ATTRS_o_ai vec_extract(vector float __a, int __b) {
- return __a[__b];
+static __inline__ float __ATTRS_o_ai vec_extract(vector float __a,
+ unsigned int __b) {
+ return __a[__b & 0x3];
}
#ifdef __POWER9_VECTOR__
@@ -14022,49 +14550,71 @@ static __inline__ void __ATTRS_o_ai vec_stvrxl(vector float __a, int __b,
static __inline__ vector signed char __ATTRS_o_ai vec_promote(signed char __a,
int __b) {
vector signed char __res = (vector signed char)(0);
- __res[__b] = __a;
+ __res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_promote(unsigned char __a, int __b) {
vector unsigned char __res = (vector unsigned char)(0);
- __res[__b] = __a;
+ __res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector short __ATTRS_o_ai vec_promote(short __a, int __b) {
vector short __res = (vector short)(0);
- __res[__b] = __a;
+ __res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_promote(unsigned short __a, int __b) {
vector unsigned short __res = (vector unsigned short)(0);
- __res[__b] = __a;
+ __res[__b & 0x7] = __a;
return __res;
}
static __inline__ vector int __ATTRS_o_ai vec_promote(int __a, int __b) {
vector int __res = (vector int)(0);
- __res[__b] = __a;
+ __res[__b & 0x3] = __a;
return __res;
}
static __inline__ vector unsigned int __ATTRS_o_ai vec_promote(unsigned int __a,
int __b) {
vector unsigned int __res = (vector unsigned int)(0);
- __res[__b] = __a;
+ __res[__b & 0x3] = __a;
return __res;
}
static __inline__ vector float __ATTRS_o_ai vec_promote(float __a, int __b) {
vector float __res = (vector float)(0);
- __res[__b] = __a;
+ __res[__b & 0x3] = __a;
return __res;
}
+#ifdef __VSX__
+static __inline__ vector double __ATTRS_o_ai vec_promote(double __a, int __b) {
+ vector double __res = (vector double)(0);
+ __res[__b & 0x1] = __a;
+ return __res;
+}
+
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_promote(signed long long __a, int __b) {
+ vector signed long long __res = (vector signed long long)(0);
+ __res[__b & 0x1] = __a;
+ return __res;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_promote(unsigned long long __a, int __b) {
+ vector unsigned long long __res = (vector unsigned long long)(0);
+ __res[__b & 0x1] = __a;
+ return __res;
+}
+#endif
+
/* vec_splats */
static __inline__ vector signed char __ATTRS_o_ai vec_splats(signed char __a) {
@@ -14105,7 +14655,8 @@ vec_splats(unsigned long long __a) {
return (vector unsigned long long)(__a);
}
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_splats(signed __int128 __a) {
return (vector signed __int128)(__a);
@@ -14258,7 +14809,7 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool int __a,
(vector int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpequd_p(__CR6_LT, __a, __b);
@@ -14316,7 +14867,7 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
@@ -14352,8 +14903,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__b,
- (vector unsigned char)__a);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, __b, (vector signed char)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool char __a,
@@ -14390,8 +14940,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__b,
- (vector unsigned short)__a);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, __b, (vector signed short)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool short __a,
@@ -14427,8 +14976,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__b,
- (vector unsigned int)__a);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, __b, (vector signed int)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
@@ -14442,7 +14990,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool int __a,
(vector unsigned int)__a);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b, __a);
@@ -14466,8 +15014,8 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__b,
- (vector unsigned long long)__a);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __b,
+ (vector signed long long)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_ge(vector bool long long __a,
@@ -14499,7 +15047,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ge(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_ge(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __b, __a);
@@ -14535,8 +15083,7 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__a,
- (vector unsigned char)__b);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool char __a,
@@ -14573,8 +15120,7 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__a,
- (vector unsigned short)__b);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector signed short)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool short __a,
@@ -14610,8 +15156,7 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector signed int)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
@@ -14625,7 +15170,7 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool int __a,
(vector unsigned int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __a, __b);
@@ -14649,8 +15194,8 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__a,
- (vector unsigned long long)__b);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, (vector signed long long)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_all_gt(vector bool long long __a,
@@ -14682,7 +15227,7 @@ static __inline__ int __ATTRS_o_ai vec_all_gt(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_gt(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __a, __b);
@@ -14725,8 +15270,7 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)__a,
- (vector unsigned char)__b);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool char __a,
@@ -14763,8 +15307,7 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)__a,
- (vector unsigned short)__b);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector signed short)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool short __a,
@@ -14800,8 +15343,7 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector signed int)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
@@ -14815,7 +15357,7 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector bool int __a,
(vector unsigned int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_le(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, __a, __b);
@@ -14840,8 +15382,8 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_EQ, (vector unsigned long long)__a,
- (vector unsigned long long)__b);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ, (vector signed long long)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_all_le(vector bool long long __a,
@@ -14873,7 +15415,7 @@ static __inline__ int __ATTRS_o_ai vec_all_le(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_le(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_EQ, __a, __b);
@@ -14909,8 +15451,7 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)__b,
- (vector unsigned char)__a);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, __b, (vector signed char)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool char __a,
@@ -14947,8 +15488,7 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)__b,
- (vector unsigned short)__a);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, __b, (vector signed short)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool short __a,
@@ -14984,8 +15524,7 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)__b,
- (vector unsigned int)__a);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, __b, (vector signed int)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
@@ -14999,7 +15538,7 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool int __a,
(vector unsigned int)__a);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b, __a);
@@ -15024,8 +15563,8 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_LT, (vector unsigned long long)__b,
- (vector unsigned long long)__a);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT, __b,
+ (vector signed long long)__a);
}
static __inline__ int __ATTRS_o_ai vec_all_lt(vector bool long long __a,
@@ -15057,7 +15596,7 @@ static __inline__ int __ATTRS_o_ai vec_all_lt(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_lt(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_LT, __b, __a);
@@ -15214,7 +15753,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool int __a,
(vector int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpequd_p(__CR6_EQ, __a, __b);
@@ -15273,7 +15812,7 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
@@ -15323,25 +15862,57 @@ static __inline__ int __ATTRS_o_ai vec_all_ngt(vector double __a,
/* vec_all_nle */
-static __inline__ int __attribute__((__always_inline__))
+static __inline__ int __ATTRS_o_ai
vec_all_nle(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_EQ, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_EQ, __b, __a);
+#endif
}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_nle(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_EQ, __b, __a);
+}
+#endif
+
/* vec_all_nlt */
-static __inline__ int __attribute__((__always_inline__))
+static __inline__ int __ATTRS_o_ai
vec_all_nlt(vector float __a, vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_EQ, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_nlt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_EQ, __b, __a);
}
+#endif
/* vec_all_numeric */
-static __inline__ int __attribute__((__always_inline__))
+static __inline__ int __ATTRS_o_ai
vec_all_numeric(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT, __a, __a);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT, __a, __a);
+#endif
}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_all_numeric(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT, __a, __a);
+}
+#endif
+
/* vec_any_eq */
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed char __a,
@@ -15471,7 +16042,7 @@ static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool int __a,
(vector int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpequd_p(__CR6_EQ_REV, __a, __b);
@@ -15530,7 +16101,7 @@ static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
@@ -15568,8 +16139,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__b,
- (vector unsigned char)__a);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, __b,
+ (vector signed char)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool char __a,
@@ -15607,8 +16178,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__b,
- (vector unsigned short)__a);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, __b,
+ (vector signed short)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool short __a,
@@ -15645,8 +16216,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__b,
- (vector unsigned int)__a);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, __b,
+ (vector signed int)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
@@ -15661,7 +16232,7 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool int __a,
(vector unsigned int)__a);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b, __a);
@@ -15686,9 +16257,8 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
- (vector unsigned long long)__b,
- (vector unsigned long long)__a);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __b,
+ (vector signed long long)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_ge(vector bool long long __a,
@@ -15721,7 +16291,7 @@ static __inline__ int __ATTRS_o_ai vec_any_ge(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_ge(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __b, __a);
@@ -15759,8 +16329,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__a,
- (vector unsigned char)__b);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool char __a,
@@ -15798,8 +16368,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__a,
- (vector unsigned short)__b);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector signed short)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool short __a,
@@ -15836,8 +16406,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector signed int)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
@@ -15852,7 +16422,7 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool int __a,
(vector unsigned int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __a, __b);
@@ -15877,9 +16447,8 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
- (vector unsigned long long)__a,
- (vector unsigned long long)__b);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV,
+ (vector signed long long)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_gt(vector bool long long __a,
@@ -15912,7 +16481,7 @@ static __inline__ int __ATTRS_o_ai vec_any_gt(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_gt(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __a, __b);
@@ -15950,8 +16519,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)__a,
- (vector unsigned char)__b);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool char __a,
@@ -15989,8 +16558,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)__a,
- (vector unsigned short)__b);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector signed short)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool short __a,
@@ -16027,8 +16596,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)__a,
- (vector unsigned int)__b);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector signed int)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
@@ -16043,7 +16612,7 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector bool int __a,
(vector unsigned int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_le(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV, __a, __b);
@@ -16068,9 +16637,8 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_LT_REV,
- (vector unsigned long long)__a,
- (vector unsigned long long)__b);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_LT_REV,
+ (vector signed long long)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_le(vector bool long long __a,
@@ -16103,7 +16671,7 @@ static __inline__ int __ATTRS_o_ai vec_any_le(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_le(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_LT_REV, __a, __b);
@@ -16141,8 +16709,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned char __a,
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
vector signed char __b) {
- return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)__b,
- (vector unsigned char)__a);
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, __b,
+ (vector signed char)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool char __a,
@@ -16180,8 +16748,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned short __a,
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
vector short __b) {
- return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)__b,
- (vector unsigned short)__a);
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, __b,
+ (vector signed short)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool short __a,
@@ -16218,8 +16786,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned int __a,
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
vector int __b) {
- return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)__b,
- (vector unsigned int)__a);
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, __b,
+ (vector signed int)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
@@ -16234,7 +16802,7 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool int __a,
(vector unsigned int)__a);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b, __a);
@@ -16259,9 +16827,8 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned long long __a,
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
vector signed long long __b) {
- return __builtin_altivec_vcmpgtud_p(__CR6_EQ_REV,
- (vector unsigned long long)__b,
- (vector unsigned long long)__a);
+ return __builtin_altivec_vcmpgtsd_p(__CR6_EQ_REV, __b,
+ (vector signed long long)__a);
}
static __inline__ int __ATTRS_o_ai vec_any_lt(vector bool long long __a,
@@ -16294,7 +16861,7 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_lt(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpgtsq_p(__CR6_EQ_REV, __b, __a);
@@ -16308,10 +16875,18 @@ static __inline__ int __ATTRS_o_ai vec_any_lt(vector unsigned __int128 __a,
/* vec_any_nan */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nan(vector float __a) {
+static __inline__ int __ATTRS_o_ai vec_any_nan(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_LT_REV, __a, __a);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, __a, __a);
+#endif
+}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_nan(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_LT_REV, __a, __a);
}
+#endif
/* vec_any_ne */
@@ -16442,7 +17017,7 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool int __a,
(vector int)__b);
}
-#ifdef __POWER8_VECTOR__
+#ifdef __VSX__
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed long long __a,
vector signed long long __b) {
return __builtin_altivec_vcmpequd_p(__CR6_LT_REV, __a, __b);
@@ -16501,7 +17076,7 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
}
#endif
-#ifdef __POWER10_VECTOR__
+#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
vector signed __int128 __b) {
return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
@@ -16515,39 +17090,92 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
/* vec_any_nge */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nge(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_nge(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __a, __b);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_nge(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __a, __b);
}
+#endif
/* vec_any_ngt */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_ngt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_ngt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __a, __b);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __a, __b);
+#endif
}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_ngt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __a, __b);
+}
+#endif
+
/* vec_any_nle */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nle(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_nle(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgesp_p(__CR6_LT_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, __b, __a);
+#endif
+}
+
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_nle(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgedp_p(__CR6_LT_REV, __b, __a);
}
+#endif
/* vec_any_nlt */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_nlt(vector float __a, vector float __b) {
+static __inline__ int __ATTRS_o_ai vec_any_nlt(vector float __a,
+ vector float __b) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpgtsp_p(__CR6_LT_REV, __b, __a);
+#else
return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, __b, __a);
+#endif
}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_nlt(vector double __a,
+ vector double __b) {
+ return __builtin_vsx_xvcmpgtdp_p(__CR6_LT_REV, __b, __a);
+}
+#endif
+
/* vec_any_numeric */
-static __inline__ int __attribute__((__always_inline__))
-vec_any_numeric(vector float __a) {
+static __inline__ int __ATTRS_o_ai vec_any_numeric(vector float __a) {
+#ifdef __VSX__
+ return __builtin_vsx_xvcmpeqsp_p(__CR6_EQ_REV, __a, __a);
+#else
return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, __a, __a);
+#endif
}
+#ifdef __VSX__
+static __inline__ int __ATTRS_o_ai vec_any_numeric(vector double __a) {
+ return __builtin_vsx_xvcmpeqdp_p(__CR6_EQ_REV, __a, __a);
+}
+#endif
+
/* vec_any_out */
static __inline__ int __attribute__((__always_inline__))
@@ -16696,6 +17324,16 @@ vec_vgbbd(vector unsigned char __a) {
return __builtin_altivec_vgbbd(__a);
}
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_gbb(vector signed long long __a) {
+ return __builtin_altivec_vgbbd((vector unsigned char)__a);
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_gbb(vector unsigned long long __a) {
+ return __builtin_altivec_vgbbd((vector unsigned char)__a);
+}
+
static __inline__ vector long long __ATTRS_o_ai
vec_vbpermq(vector signed char __a, vector signed char __b) {
return __builtin_altivec_vbpermq((vector unsigned char)__a,
@@ -16707,7 +17345,7 @@ vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
return __builtin_altivec_vbpermq(__a, __b);
}
-#ifdef __powerpc64__
+#if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
static __inline__ vector unsigned long long __attribute__((__always_inline__))
vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
return __builtin_altivec_vbpermq((vector unsigned char)__a,
@@ -16882,7 +17520,8 @@ vec_revb(vector double __a) {
}
#endif /* End __VSX__ */
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_revb(vector signed __int128 __a) {
vector unsigned char __indices =
@@ -16904,6 +17543,8 @@ vec_revb(vector unsigned __int128 __a) {
/* vec_xl */
+#define vec_xld2 vec_xl
+#define vec_xlw4 vec_xl
typedef vector signed char unaligned_vec_schar __attribute__((aligned(1)));
typedef vector unsigned char unaligned_vec_uchar __attribute__((aligned(1)));
typedef vector signed short unaligned_vec_sshort __attribute__((aligned(1)));
@@ -16912,41 +17553,41 @@ typedef vector signed int unaligned_vec_sint __attribute__((aligned(1)));
typedef vector unsigned int unaligned_vec_uint __attribute__((aligned(1)));
typedef vector float unaligned_vec_float __attribute__((aligned(1)));
-static inline __ATTRS_o_ai vector signed char vec_xl(signed long long __offset,
+static inline __ATTRS_o_ai vector signed char vec_xl(ptrdiff_t __offset,
const signed char *__ptr) {
return *(unaligned_vec_schar *)(__ptr + __offset);
}
static inline __ATTRS_o_ai vector unsigned char
-vec_xl(signed long long __offset, const unsigned char *__ptr) {
+vec_xl(ptrdiff_t __offset, const unsigned char *__ptr) {
return *(unaligned_vec_uchar*)(__ptr + __offset);
}
-static inline __ATTRS_o_ai vector signed short vec_xl(signed long long __offset,
- const signed short *__ptr) {
+static inline __ATTRS_o_ai vector signed short
+vec_xl(ptrdiff_t __offset, const signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sshort *)__addr;
}
static inline __ATTRS_o_ai vector unsigned short
-vec_xl(signed long long __offset, const unsigned short *__ptr) {
+vec_xl(ptrdiff_t __offset, const unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ushort *)__addr;
}
-static inline __ATTRS_o_ai vector signed int vec_xl(signed long long __offset,
+static inline __ATTRS_o_ai vector signed int vec_xl(ptrdiff_t __offset,
const signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sint *)__addr;
}
-static inline __ATTRS_o_ai vector unsigned int vec_xl(signed long long __offset,
- const unsigned int *__ptr) {
+static inline __ATTRS_o_ai vector unsigned int
+vec_xl(ptrdiff_t __offset, const unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_uint *)__addr;
}
-static inline __ATTRS_o_ai vector float vec_xl(signed long long __offset,
+static inline __ATTRS_o_ai vector float vec_xl(ptrdiff_t __offset,
const float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_float *)__addr;
@@ -16958,36 +17599,37 @@ typedef vector unsigned long long unaligned_vec_ull __attribute__((aligned(1)));
typedef vector double unaligned_vec_double __attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed long long
-vec_xl(signed long long __offset, const signed long long *__ptr) {
+vec_xl(ptrdiff_t __offset, const signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_sll *)__addr;
}
static inline __ATTRS_o_ai vector unsigned long long
-vec_xl(signed long long __offset, const unsigned long long *__ptr) {
+vec_xl(ptrdiff_t __offset, const unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ull *)__addr;
}
-static inline __ATTRS_o_ai vector double vec_xl(signed long long __offset,
+static inline __ATTRS_o_ai vector double vec_xl(ptrdiff_t __offset,
const double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_double *)__addr;
}
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
typedef vector signed __int128 unaligned_vec_si128 __attribute__((aligned(1)));
typedef vector unsigned __int128 unaligned_vec_ui128
__attribute__((aligned(1)));
static inline __ATTRS_o_ai vector signed __int128
-vec_xl(signed long long __offset, const signed __int128 *__ptr) {
+vec_xl(ptrdiff_t __offset, const signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_si128 *)__addr;
}
static inline __ATTRS_o_ai vector unsigned __int128
-vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
+vec_xl(ptrdiff_t __offset, const unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
return *(unaligned_vec_ui128 *)__addr;
}
@@ -16997,27 +17639,27 @@ vec_xl(signed long long __offset, const unsigned __int128 *__ptr) {
#ifdef __LITTLE_ENDIAN__
static __inline__ vector signed char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, const signed char *__ptr) {
+vec_xl_be(ptrdiff_t __offset, const signed char *__ptr) {
vector signed char __vec = (vector signed char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
static __inline__ vector unsigned char __ATTRS_o_ai
-vec_xl_be(signed long long __offset, const unsigned char *__ptr) {
+vec_xl_be(ptrdiff_t __offset, const unsigned char *__ptr) {
vector unsigned char __vec = (vector unsigned char)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 7, 6, 5, 4, 3, 2, 1, 0, 15, 14,
13, 12, 11, 10, 9, 8);
}
-static __inline__ vector signed short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, const signed short *__ptr) {
+static __inline__ vector signed short __ATTRS_o_ai
+vec_xl_be(ptrdiff_t __offset, const signed short *__ptr) {
vector signed short __vec = (vector signed short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
static __inline__ vector unsigned short __ATTRS_o_ai
-vec_xl_be(signed long long __offset, const unsigned short *__ptr) {
+vec_xl_be(ptrdiff_t __offset, const unsigned short *__ptr) {
vector unsigned short __vec = (vector unsigned short)__builtin_vsx_lxvd2x_be(__offset, __ptr);
return __builtin_shufflevector(__vec, __vec, 3, 2, 1, 0, 7, 6, 5, 4);
}
@@ -17054,7 +17696,8 @@ vec_xl_be(signed long long __offset, const double *__ptr) {
}
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_be(signed long long __offset, const signed __int128 *__ptr) {
return vec_xl(__offset, __ptr);
@@ -17069,98 +17712,153 @@ vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) {
#define vec_xl_be vec_xl
#endif
-#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
+ defined(__SIZEOF_INT128__)
/* vect_xl_sext */
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(signed long long __offset, const signed char *__pointer) {
+vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(signed long long __offset, const signed short *__pointer) {
+vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(signed long long __offset, const signed int *__pointer) {
+vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_sext(signed long long __offset, const signed long long *__pointer) {
+vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
/* vec_xl_zext */
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(signed long long __offset, const unsigned char *__pointer) {
+vec_xl_zext(ptrdiff_t __offset, const unsigned char *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(signed long long __offset, const unsigned short *__pointer) {
+vec_xl_zext(ptrdiff_t __offset, const unsigned short *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(signed long long __offset, const unsigned int *__pointer) {
+vec_xl_zext(ptrdiff_t __offset, const unsigned int *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
-vec_xl_zext(signed long long __offset, const unsigned long long *__pointer) {
+vec_xl_zext(ptrdiff_t __offset, const unsigned long long *__pointer) {
return (vector unsigned __int128)*(__pointer + __offset);
}
#endif
+/* vec_xlds */
+#ifdef __VSX__
+static __inline__ vector signed long long __ATTRS_o_ai
+vec_xlds(ptrdiff_t __offset, const signed long long *__ptr) {
+ signed long long *__addr = (signed long long*)((signed char *)__ptr + __offset);
+ return (vector signed long long) *__addr;
+}
+
+static __inline__ vector unsigned long long __ATTRS_o_ai
+vec_xlds(ptrdiff_t __offset, const unsigned long long *__ptr) {
+ unsigned long long *__addr = (unsigned long long *)((signed char *)__ptr + __offset);
+ return (unaligned_vec_ull) *__addr;
+}
+
+static __inline__ vector double __ATTRS_o_ai vec_xlds(ptrdiff_t __offset,
+ const double *__ptr) {
+ double *__addr = (double*)((signed char *)__ptr + __offset);
+ return (unaligned_vec_double) *__addr;
+}
+
+/* vec_load_splats */
+static __inline__ vector signed int __ATTRS_o_ai
+vec_load_splats(signed long long __offset, const signed int *__ptr) {
+ signed int *__addr = (signed int*)((signed char *)__ptr + __offset);
+ return (vector signed int)*__addr;
+}
+
+static __inline__ vector signed int __ATTRS_o_ai
+vec_load_splats(unsigned long long __offset, const signed int *__ptr) {
+ signed int *__addr = (signed int*)((signed char *)__ptr + __offset);
+ return (vector signed int)*__addr;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_load_splats(signed long long __offset, const unsigned int *__ptr) {
+ unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset);
+ return (vector unsigned int)*__addr;
+}
+
+static __inline__ vector unsigned int __ATTRS_o_ai
+vec_load_splats(unsigned long long __offset, const unsigned int *__ptr) {
+ unsigned int *__addr = (unsigned int*)((signed char *)__ptr + __offset);
+ return (vector unsigned int)*__addr;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_load_splats(signed long long __offset, const float *__ptr) {
+ float *__addr = (float*)((signed char *)__ptr + __offset);
+ return (vector float)*__addr;
+}
+
+static __inline__ vector float __ATTRS_o_ai
+vec_load_splats(unsigned long long __offset, const float *__ptr) {
+ float *__addr = (float*)((signed char *)__ptr + __offset);
+ return (vector float)*__addr;
+}
+#endif
+
/* vec_xst */
-static inline __ATTRS_o_ai void vec_xst(vector signed char __vec,
- signed long long __offset,
- signed char *__ptr) {
+#define vec_xstd2 vec_xst
+#define vec_xstw4 vec_xst
+static inline __ATTRS_o_ai void
+vec_xst(vector signed char __vec, ptrdiff_t __offset, signed char *__ptr) {
*(unaligned_vec_schar *)(__ptr + __offset) = __vec;
}
-static inline __ATTRS_o_ai void vec_xst(vector unsigned char __vec,
- signed long long __offset,
- unsigned char *__ptr) {
+static inline __ATTRS_o_ai void
+vec_xst(vector unsigned char __vec, ptrdiff_t __offset, unsigned char *__ptr) {
*(unaligned_vec_uchar *)(__ptr + __offset) = __vec;
}
-static inline __ATTRS_o_ai void vec_xst(vector signed short __vec,
- signed long long __offset,
- signed short *__ptr) {
+static inline __ATTRS_o_ai void
+vec_xst(vector signed short __vec, ptrdiff_t __offset, signed short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sshort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned short __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned short *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ushort *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector signed int __vec,
- signed long long __offset,
- signed int *__ptr) {
+ ptrdiff_t __offset, signed int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sint *)__addr = __vec;
}
-static inline __ATTRS_o_ai void vec_xst(vector unsigned int __vec,
- signed long long __offset,
- unsigned int *__ptr) {
+static inline __ATTRS_o_ai void
+vec_xst(vector unsigned int __vec, ptrdiff_t __offset, unsigned int *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_uint *)__addr = __vec;
}
-static inline __ATTRS_o_ai void vec_xst(vector float __vec,
- signed long long __offset,
+static inline __ATTRS_o_ai void vec_xst(vector float __vec, ptrdiff_t __offset,
float *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_float *)__addr = __vec;
@@ -17168,37 +17866,37 @@ static inline __ATTRS_o_ai void vec_xst(vector float __vec,
#ifdef __VSX__
static inline __ATTRS_o_ai void vec_xst(vector signed long long __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_sll *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned long long __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned long long *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ull *)__addr = __vec;
}
-static inline __ATTRS_o_ai void vec_xst(vector double __vec,
- signed long long __offset,
+static inline __ATTRS_o_ai void vec_xst(vector double __vec, ptrdiff_t __offset,
double *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_double *)__addr = __vec;
}
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst(vector signed __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_si128 *)__addr = __vec;
}
static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned __int128 *__ptr) {
signed char *__addr = (signed char *)__ptr + __offset;
*(unaligned_vec_ui128 *)__addr = __vec;
@@ -17207,51 +17905,52 @@ static inline __ATTRS_o_ai void vec_xst(vector unsigned __int128 __vec,
/* vec_xst_trunc */
-#if defined(__POWER10_VECTOR__) && defined(__VSX__)
+#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
+ defined(__SIZEOF_INT128__)
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed char *__ptr) {
*(__ptr + __offset) = (signed char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned char *__ptr) {
*(__ptr + __offset) = (unsigned char)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed short *__ptr) {
*(__ptr + __offset) = (signed short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned short *__ptr) {
*(__ptr + __offset) = (unsigned short)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed int *__ptr) {
*(__ptr + __offset) = (signed int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned int *__ptr) {
*(__ptr + __offset) = (unsigned int)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector signed __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
signed long long *__ptr) {
*(__ptr + __offset) = (signed long long)__vec[0];
}
static inline __ATTRS_o_ai void vec_xst_trunc(vector unsigned __int128 __vec,
- signed long long __offset,
+ ptrdiff_t __offset,
unsigned long long *__ptr) {
*(__ptr + __offset) = (unsigned long long)__vec[0];
}
@@ -17336,7 +18035,8 @@ static __inline__ void __ATTRS_o_ai vec_xst_be(vector double __vec,
}
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#if defined(__POWER8_VECTOR__) && defined(__powerpc64__) && \
+ defined(__SIZEOF_INT128__)
static __inline__ void __ATTRS_o_ai vec_xst_be(vector signed __int128 __vec,
signed long long __offset,
signed __int128 *__ptr) {
@@ -17375,7 +18075,7 @@ static vector double __ATTRS_o_ai vec_neg(vector double __a) {
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __VSX__
static vector long long __ATTRS_o_ai vec_neg(vector long long __a) {
return -__a;
}
@@ -17404,7 +18104,7 @@ static vector double __ATTRS_o_ai vec_nabs(vector double __a) {
#endif
-#if defined(__POWER8_VECTOR__) && defined(__powerpc64__)
+#ifdef __POWER8_VECTOR__
static vector long long __ATTRS_o_ai vec_nabs(vector long long __a) {
return __builtin_altivec_vminsd(__a, -__a);
}
@@ -17422,6 +18122,18 @@ static vector signed char __ATTRS_o_ai vec_nabs(vector signed char __a) {
return __builtin_altivec_vminsb(__a, -__a);
}
+static vector float __ATTRS_o_ai vec_recipdiv(vector float __a,
+ vector float __b) {
+ return __builtin_ppc_recipdivf(__a, __b);
+}
+
+#ifdef __VSX__
+static vector double __ATTRS_o_ai vec_recipdiv(vector double __a,
+ vector double __b) {
+ return __builtin_ppc_recipdivd(__a, __b);
+}
+#endif
+
#ifdef __POWER10_VECTOR__
/* vec_extractm */
@@ -17446,10 +18158,12 @@ vec_extractm(vector unsigned long long __a) {
return __builtin_altivec_vextractdm(__a);
}
+#ifdef __SIZEOF_INT128__
static __inline__ unsigned int __ATTRS_o_ai
vec_extractm(vector unsigned __int128 __a) {
return __builtin_altivec_vextractqm(__a);
}
+#endif
/* vec_expandm */
@@ -17473,10 +18187,12 @@ vec_expandm(vector unsigned long long __a) {
return __builtin_altivec_vexpanddm(__a);
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_expandm(vector unsigned __int128 __a) {
return __builtin_altivec_vexpandqm(__a);
}
+#endif
/* vec_cntm */
@@ -17512,10 +18228,12 @@ vec_gendm(unsigned long long __bm) {
return __builtin_altivec_mtvsrdm(__bm);
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_genqm(unsigned long long __bm) {
return __builtin_altivec_mtvsrqm(__bm);
}
+#endif
/* vec_pdep */
@@ -17544,6 +18262,7 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
/* vec_ternarylogic */
#ifdef __VSX__
+#ifdef __SIZEOF_INT128__
#define vec_ternarylogic(__a, __b, __c, __imm) \
_Generic((__a), vector unsigned char \
: __builtin_vsx_xxeval((vector unsigned long long)(__a), \
@@ -17565,6 +18284,25 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
: __builtin_vsx_xxeval((vector unsigned long long)(__a), \
(vector unsigned long long)(__b), \
(vector unsigned long long)(__c), (__imm)))
+#else
+#define vec_ternarylogic(__a, __b, __c, __imm) \
+ _Generic((__a), vector unsigned char \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
+#endif /* __SIZEOF_INT128__ */
#endif /* __VSX__ */
/* vec_genpcvm */
@@ -17657,6 +18395,7 @@ vec_mod(vector unsigned long long __a, vector unsigned long long __b) {
return __a % __b;
}
+#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_mod(vector signed __int128 __a, vector signed __int128 __b) {
return __a % __b;
@@ -17666,6 +18405,7 @@ static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a % __b;
}
+#endif
/* vec_sldbi */
@@ -18188,6 +18928,7 @@ static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed short __a) {
/* vs[l | r | ra] */
+#ifdef __SIZEOF_INT128__
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_sl(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return __a << (__b % (vector unsigned __int128)(sizeof(unsigned __int128) *
@@ -18232,6 +18973,7 @@ vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
__CHAR_BIT__));
}
+#endif /* __SIZEOF_INT128__ */
#endif /* __POWER10_VECTOR__ */
#undef __ATTRS_o_ai
diff --git a/clang/lib/Headers/amxintrin.h b/clang/lib/Headers/amxintrin.h
index 823c7ca1f076..ec601a58e7c3 100644
--- a/clang/lib/Headers/amxintrin.h
+++ b/clang/lib/Headers/amxintrin.h
@@ -15,8 +15,13 @@
#define __AMXINTRIN_H
#ifdef __x86_64__
+/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS_TILE \
__attribute__((__always_inline__, __nodebug__, __target__("amx-tile")))
+#define __DEFAULT_FN_ATTRS_INT8 \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
+#define __DEFAULT_FN_ATTRS_BF16 \
+ __attribute__((__always_inline__, __nodebug__, __target__("amx-bf16")))
/// Load tile configuration from a 64-byte memory location specified by
/// "mem_addr". The tile configuration includes the tile type palette, the
@@ -25,7 +30,7 @@
/// config and the tile data, and the tiles are zeroed. Any invalid
/// configurations will result in #GP fault.
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> LDTILECFG </c> instruction.
///
@@ -41,7 +46,7 @@ _tile_loadconfig(const void *__config) {
/// palette, the number of bytes per row, and the number of rows. If tiles
/// are not configured, all zeroes will be stored to memory.
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> STTILECFG </c> instruction.
///
@@ -55,7 +60,7 @@ _tile_storeconfig(void *__config) {
/// Release the tile configuration to return to the init state, which
/// releases all storage it currently holds.
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TILERELEASE </c> instruction.
static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
@@ -66,7 +71,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// destination tile "dst" using the tile configuration previously configured
/// via "_tile_loadconfig".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
///
@@ -86,7 +91,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// that the data will likely not be reused in the near future and the data
/// caching can be optimized accordingly.
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
///
@@ -104,7 +109,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// "stride" using the tile configuration previously configured via
/// "_tile_loadconfig".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
///
@@ -119,7 +124,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// Zero the tile specified by "tdest".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
///
@@ -133,7 +138,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
/// and store the 32-bit result back to tile "dst".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
///
@@ -152,7 +157,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
/// in "dst", and store the 32-bit result back to tile "dst".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
///
@@ -171,7 +176,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
/// and store the 32-bit result back to tile "dst".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
///
@@ -190,7 +195,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
/// "dst", and store the 32-bit result back to tile "dst".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
///
@@ -208,7 +213,7 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
/// elements with elements in "dst", and store the 32-bit result back to tile
/// "dst".
///
-/// \headerfile <x86intrin.h>
+/// \headerfile <immintrin.h>
///
/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
///
@@ -221,10 +226,12 @@ static __inline__ void __DEFAULT_FN_ATTRS_TILE _tile_release(void) {
#define _tile_dpbf16ps(dst, src0, src1) \
__builtin_ia32_tdpbf16ps((dst), (src0), (src1))
-#define __DEFAULT_FN_ATTRS_INT8 \
- __attribute__((__always_inline__, __nodebug__, __target__("amx-int8")))
-
+/// AMX tile register size can be configured, the maximum size is 16x64=1024
+/// bytes. Since there is no 2D type in llvm IR, we use vector type to
+/// represent 2D tile and the fixed size is maximum amx tile register size.
typedef int _tile1024i __attribute__((__vector_size__(1024), __aligned__(64)));
+
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
_tile_loadd_internal(unsigned short m, unsigned short n, const void *base,
__SIZE_TYPE__ stride) {
@@ -232,12 +239,43 @@ _tile_loadd_internal(unsigned short m, unsigned short n, const void *base,
(__SIZE_TYPE__)(stride));
}
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_loaddt1_internal(unsigned short m, unsigned short n, const void *base,
+ __SIZE_TYPE__ stride) {
+ return __builtin_ia32_tileloaddt164_internal(m, n, base,
+ (__SIZE_TYPE__)(stride));
+}
+
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
_tile_dpbssd_internal(unsigned short m, unsigned short n, unsigned short k,
_tile1024i dst, _tile1024i src1, _tile1024i src2) {
return __builtin_ia32_tdpbssd_internal(m, n, k, dst, src1, src2);
}
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_dpbsud_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpbsud_internal(m, n, k, dst, src1, src2);
+}
+
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_dpbusd_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpbusd_internal(m, n, k, dst, src1, src2);
+}
+
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_INT8
+_tile_dpbuud_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpbuud_internal(m, n, k, dst, src1, src2);
+}
+
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
static __inline__ void __DEFAULT_FN_ATTRS_INT8
_tile_stored_internal(unsigned short m, unsigned short n, void *base,
__SIZE_TYPE__ stride, _tile1024i tile) {
@@ -245,34 +283,211 @@ _tile_stored_internal(unsigned short m, unsigned short n, void *base,
(__SIZE_TYPE__)(stride), tile);
}
+/// This is internal intrinsic. C/C++ user should avoid calling it directly.
+static __inline__ _tile1024i __DEFAULT_FN_ATTRS_BF16
+_tile_dpbf16ps_internal(unsigned short m, unsigned short n, unsigned short k,
+ _tile1024i dst, _tile1024i src1, _tile1024i src2) {
+ return __builtin_ia32_tdpbf16ps_internal(m, n, k, dst, src1, src2);
+}
+
+/// This struct pack the shape and tile data together for user. We suggest
+/// initializing the struct as early as possible, because compiler depends
+/// on the shape information to do configure. The constant value is preferred
+/// for optimization by compiler.
typedef struct __tile1024i_str {
const unsigned short row;
const unsigned short col;
_tile1024i tile;
} __tile1024i;
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADD </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
__DEFAULT_FN_ATTRS_TILE
static void __tile_loadd(__tile1024i *dst, const void *base,
__SIZE_TYPE__ stride) {
dst->tile = _tile_loadd_internal(dst->row, dst->col, base, stride);
}
+/// Load tile rows from memory specifieid by "base" address and "stride" into
+/// destination tile "dst". This intrinsic provides a hint to the implementation
+/// that the data will likely not be reused in the near future and the data
+/// caching can be optimized accordingly.
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TILELOADDT1 </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be loaded in memory.
+__DEFAULT_FN_ATTRS_TILE
+static void __tile_stream_loadd(__tile1024i *dst, const void *base,
+ __SIZE_TYPE__ stride) {
+ dst->tile = _tile_loaddt1_internal(dst->row, dst->col, base, stride);
+}
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_INT8
-static void __tile_dpbssd(__tile1024i *dst, __tile1024i src1,
- __tile1024i src2) {
- dst->tile = _tile_dpbssd_internal(src1.row, src2.col, src1.col, dst->tile,
- src1.tile, src2.tile);
+static void __tile_dpbssd(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpbssd_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
}
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of signed 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer
+/// in "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBSUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_INT8
+static void __tile_dpbsud(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpbsud_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
+}
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding signed 8-bit integers in src1, producing 4 intermediate 32-bit
+/// results. Sum these 4 results with the corresponding 32-bit integer in "dst",
+/// and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUSD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_INT8
+static void __tile_dpbusd(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpbusd_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
+}
+
+/// Compute dot-product of bytes in tiles with a source/destination accumulator.
+/// Multiply groups of 4 adjacent pairs of unsigned 8-bit integers in src0 with
+/// corresponding unsigned 8-bit integers in src1, producing 4 intermediate
+/// 32-bit results. Sum these 4 results with the corresponding 32-bit integer in
+/// "dst", and store the 32-bit result back to tile "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBUUD </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_INT8
+static void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpbuud_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
+}
+
+/// Store the tile specified by "src" to memory specifieid by "base" address and
+/// "stride".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
+///
+/// \param dst
+/// A destination tile. Max size is 1024 Bytes.
+/// \param base
+/// A pointer to base address.
+/// \param stride
+/// The stride between the rows' data to be stored in memory.
__DEFAULT_FN_ATTRS_TILE
static void __tile_stored(void *base, __SIZE_TYPE__ stride, __tile1024i src) {
_tile_stored_internal(src.row, src.col, base, stride, src.tile);
}
+/// Zero the tile specified by "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TILEZERO </c> instruction.
+///
+/// \param dst
+/// The destination tile to be zero. Max size is 1024 Bytes.
__DEFAULT_FN_ATTRS_TILE
static void __tile_zero(__tile1024i *dst) {
dst->tile = __builtin_ia32_tilezero_internal(dst->row, dst->col);
}
+/// Compute dot-product of BF16 (16-bit) floating-point pairs in tiles src0 and
+/// src1, accumulating the intermediate single-precision (32-bit) floating-point
+/// elements with elements in "dst", and store the 32-bit result back to tile
+/// "dst".
+///
+/// \headerfile <immintrin.h>
+///
+/// This intrinsic corresponds to the <c> TDPBF16PS </c> instruction.
+///
+/// \param dst
+/// The destination tile. Max size is 1024 Bytes.
+/// \param src0
+/// The 1st source tile. Max size is 1024 Bytes.
+/// \param src1
+/// The 2nd source tile. Max size is 1024 Bytes.
+__DEFAULT_FN_ATTRS_BF16
+static void __tile_dpbf16ps(__tile1024i *dst, __tile1024i src0,
+ __tile1024i src1) {
+ dst->tile = _tile_dpbf16ps_internal(src0.row, src1.col, src0.col, dst->tile,
+ src0.tile, src1.tile);
+}
+
+#undef __DEFAULT_FN_ATTRS_TILE
+#undef __DEFAULT_FN_ATTRS_INT8
+#undef __DEFAULT_FN_ATTRS_BF16
+
#endif /* __x86_64__ */
#endif /* __AMXINTRIN_H */
diff --git a/clang/lib/Headers/arm_acle.h b/clang/lib/Headers/arm_acle.h
index c156d89c1f84..45fac248dadb 100644
--- a/clang/lib/Headers/arm_acle.h
+++ b/clang/lib/Headers/arm_acle.h
@@ -639,6 +639,49 @@ __jcvt(double __a) {
}
#endif
+/* Armv8.5-A FP rounding intrinsics */
+#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_FRINT)
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+__frint32zf(float __a) {
+ return __builtin_arm_frint32zf(__a);
+}
+
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+__frint32z(double __a) {
+ return __builtin_arm_frint32z(__a);
+}
+
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+__frint64zf(float __a) {
+ return __builtin_arm_frint64zf(__a);
+}
+
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+__frint64z(double __a) {
+ return __builtin_arm_frint64z(__a);
+}
+
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+__frint32xf(float __a) {
+ return __builtin_arm_frint32xf(__a);
+}
+
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+__frint32x(double __a) {
+ return __builtin_arm_frint32x(__a);
+}
+
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+__frint64xf(float __a) {
+ return __builtin_arm_frint64xf(__a);
+}
+
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+__frint64x(double __a) {
+ return __builtin_arm_frint64x(__a);
+}
+#endif
+
/* Armv8.7-A load/store 64-byte intrinsics */
#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_LS64)
typedef struct {
@@ -709,6 +752,18 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#endif /* __ARM_FEATURE_TME */
+/* Armv8.5-A Random number generation intrinsics */
+#if __ARM_64BIT_STATE && defined(__ARM_FEATURE_RNG)
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__rndr(uint64_t *__p) {
+ return __builtin_arm_rndr(__p);
+}
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+__rndrrs(uint64_t *__p) {
+ return __builtin_arm_rndrrs(__p);
+}
+#endif
+
#if defined(__cplusplus)
}
#endif
diff --git a/clang/lib/Headers/avx512fintrin.h b/clang/lib/Headers/avx512fintrin.h
index 2ee4350b14d4..010bcadab019 100644
--- a/clang/lib/Headers/avx512fintrin.h
+++ b/clang/lib/Headers/avx512fintrin.h
@@ -9297,9 +9297,15 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
/* Vector-reduction arithmetic accepts vectors as inputs and produces scalars as
* outputs. This class of vector operation forms the basis of many scientific
- * computations. In vector-reduction arithmetic, the evaluation off is
+ * computations. In vector-reduction arithmetic, the evaluation order is
* independent of the order of the input elements of V.
+ * For floating-point intrinsics:
+ * 1. When using fadd/fmul intrinsics, the order of operations within the
+ * vector is unspecified (associative math).
+ * 2. When using fmin/fmax intrinsics, NaN or -0.0 elements within the vector
+ * produce unspecified results.
+
* Used bisection method. At each step, we partition the vector with previous
* step in half, and the operation is performed on its two halves.
* This takes log2(n) steps where n is the number of elements in the vector.
@@ -9345,8 +9351,11 @@ _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) {
return __builtin_ia32_reduce_or_q512(__W);
}
+// -0.0 is used to ignore the start value since it is the neutral value of
+// floating point addition. For more information, please refer to
+// https://llvm.org/docs/LangRef.html#llvm-vector-reduce-fadd-intrinsic
static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_add_pd(__m512d __W) {
- return __builtin_ia32_reduce_fadd_pd512(0.0, __W);
+ return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
}
static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W) {
@@ -9356,7 +9365,7 @@ static __inline__ double __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_pd(__m512d __W)
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) {
__W = _mm512_maskz_mov_pd(__M, __W);
- return __builtin_ia32_reduce_fadd_pd512(0.0, __W);
+ return __builtin_ia32_reduce_fadd_pd512(-0.0, __W);
}
static __inline__ double __DEFAULT_FN_ATTRS512
@@ -9411,7 +9420,7 @@ _mm512_mask_reduce_or_epi32(__mmask16 __M, __m512i __W) {
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_add_ps(__m512 __W) {
- return __builtin_ia32_reduce_fadd_ps512(0.0f, __W);
+ return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9422,7 +9431,7 @@ _mm512_reduce_mul_ps(__m512 __W) {
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_ps(__mmask16 __M, __m512 __W) {
__W = _mm512_maskz_mov_ps(__M, __W);
- return __builtin_ia32_reduce_fadd_ps512(0.0f, __W);
+ return __builtin_ia32_reduce_fadd_ps512(-0.0f, __W);
}
static __inline__ float __DEFAULT_FN_ATTRS512
@@ -9518,75 +9527,49 @@ _mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
return __builtin_ia32_reduce_umin_d512((__v16si)__V);
}
-#define _mm512_mask_reduce_operator(op) \
- __m256d __t1 = _mm512_extractf64x4_pd(__V, 0); \
- __m256d __t2 = _mm512_extractf64x4_pd(__V, 1); \
- __m256d __t3 = _mm256_##op(__t1, __t2); \
- __m128d __t4 = _mm256_extractf128_pd(__t3, 0); \
- __m128d __t5 = _mm256_extractf128_pd(__t3, 1); \
- __m128d __t6 = _mm_##op(__t4, __t5); \
- __m128d __t7 = __builtin_shufflevector(__t6, __t6, 1, 0); \
- __m128d __t8 = _mm_##op(__t6, __t7); \
- return __t8[0]
-
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_reduce_max_pd(__m512d __V) {
- _mm512_mask_reduce_operator(max_pd);
+ return __builtin_ia32_reduce_fmax_pd512(__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_reduce_min_pd(__m512d __V) {
- _mm512_mask_reduce_operator(min_pd);
+ return __builtin_ia32_reduce_fmin_pd512(__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_pd(__mmask8 __M, __m512d __V) {
__V = _mm512_mask_mov_pd(_mm512_set1_pd(-__builtin_inf()), __M, __V);
- _mm512_mask_reduce_operator(max_pd);
+ return __builtin_ia32_reduce_fmax_pd512(__V);
}
static __inline__ double __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_pd(__mmask8 __M, __m512d __V) {
__V = _mm512_mask_mov_pd(_mm512_set1_pd(__builtin_inf()), __M, __V);
- _mm512_mask_reduce_operator(min_pd);
-}
-#undef _mm512_mask_reduce_operator
-
-#define _mm512_mask_reduce_operator(op) \
- __m256 __t1 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 0); \
- __m256 __t2 = (__m256)_mm512_extractf64x4_pd((__m512d)__V, 1); \
- __m256 __t3 = _mm256_##op(__t1, __t2); \
- __m128 __t4 = _mm256_extractf128_ps(__t3, 0); \
- __m128 __t5 = _mm256_extractf128_ps(__t3, 1); \
- __m128 __t6 = _mm_##op(__t4, __t5); \
- __m128 __t7 = __builtin_shufflevector(__t6, __t6, 2, 3, 0, 1); \
- __m128 __t8 = _mm_##op(__t6, __t7); \
- __m128 __t9 = __builtin_shufflevector(__t8, __t8, 1, 0, 3, 2); \
- __m128 __t10 = _mm_##op(__t8, __t9); \
- return __t10[0]
+ return __builtin_ia32_reduce_fmin_pd512(__V);
+}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_max_ps(__m512 __V) {
- _mm512_mask_reduce_operator(max_ps);
+ return __builtin_ia32_reduce_fmax_ps512(__V);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_reduce_min_ps(__m512 __V) {
- _mm512_mask_reduce_operator(min_ps);
+ return __builtin_ia32_reduce_fmin_ps512(__V);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_max_ps(__mmask16 __M, __m512 __V) {
__V = _mm512_mask_mov_ps(_mm512_set1_ps(-__builtin_inff()), __M, __V);
- _mm512_mask_reduce_operator(max_ps);
+ return __builtin_ia32_reduce_fmax_ps512(__V);
}
static __inline__ float __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_ps(__mmask16 __M, __m512 __V) {
__V = _mm512_mask_mov_ps(_mm512_set1_ps(__builtin_inff()), __M, __V);
- _mm512_mask_reduce_operator(min_ps);
+ return __builtin_ia32_reduce_fmin_ps512(__V);
}
-#undef _mm512_mask_reduce_operator
/// Moves the least significant 32 bits of a vector of [16 x i32] to a
/// 32-bit signed integer value.
@@ -9605,6 +9588,169 @@ _mm512_cvtsi512_si32(__m512i __A) {
return __b[0];
}
+/// Loads 8 double-precision (64-bit) floating-point elements stored at memory
+/// locations starting at location \a base_addr at packed 32-bit integer indices
+/// stored in the lower half of \a vindex scaled by \a scale them in dst.
+///
+/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// dst[i+63:i] := MEM[addr+63:addr]
+/// ENDFOR
+/// dst[MAX:512] := 0
+/// \endoperation
+#define _mm512_i32logather_pd(vindex, base_addr, scale) \
+ _mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
+
+/// Loads 8 double-precision (64-bit) floating-point elements from memory
+/// starting at location \a base_addr at packed 32-bit integer indices stored in
+/// the lower half of \a vindex scaled by \a scale into dst using writemask
+/// \a mask (elements are copied from \a src when the corresponding mask bit is
+/// not set).
+///
+/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// IF mask[j]
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// dst[i+63:i] := MEM[addr+63:addr]
+/// ELSE
+/// dst[i+63:i] := src[i+63:i]
+/// FI
+/// ENDFOR
+/// dst[MAX:512] := 0
+/// \endoperation
+#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \
+ _mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \
+ (base_addr), (scale))
+
+/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
+/// at packed 32-bit integer indices stored in the lower half of \a vindex
+/// scaled by \a scale and stores them in dst.
+///
+/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// dst[i+63:i] := MEM[addr+63:addr]
+/// ENDFOR
+/// dst[MAX:512] := 0
+/// \endoperation
+#define _mm512_i32logather_epi64(vindex, base_addr, scale) \
+ _mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
+
+/// Loads 8 64-bit integer elements from memory starting at location \a base_addr
+/// at packed 32-bit integer indices stored in the lower half of \a vindex
+/// scaled by \a scale and stores them in dst using writemask \a mask (elements
+/// are copied from \a src when the corresponding mask bit is not set).
+///
+/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// IF mask[j]
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// dst[i+63:i] := MEM[addr+63:addr]
+/// ELSE
+/// dst[i+63:i] := src[i+63:i]
+/// FI
+/// ENDFOR
+/// dst[MAX:512] := 0
+/// \endoperation
+#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \
+ _mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \
+ (base_addr), (scale))
+
+/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
+/// and to memory locations starting at location \a base_addr at packed 32-bit
+/// integer indices stored in \a vindex scaled by \a scale.
+///
+/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// MEM[addr+63:addr] := v1[i+63:i]
+/// ENDFOR
+/// \endoperation
+#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \
+ _mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
+
+/// Stores 8 packed double-precision (64-bit) floating-point elements in \a v1
+/// to memory locations starting at location \a base_addr at packed 32-bit
+/// integer indices stored in \a vindex scaled by \a scale. Only those elements
+/// whose corresponding mask bit is set in writemask \a mask are written to
+/// memory.
+///
+/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// IF mask[j]
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// MEM[addr+63:addr] := a[i+63:i]
+/// FI
+/// ENDFOR
+/// \endoperation
+#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \
+ _mm512_mask_i32scatter_pd((base_addr), (mask), \
+ _mm512_castsi512_si256(vindex), (v1), (scale))
+
+/// Stores 8 packed 64-bit integer elements located in \a v1 and stores them in
+/// memory locations starting at location \a base_addr at packed 32-bit integer
+/// indices stored in \a vindex scaled by \a scale.
+///
+/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// MEM[addr+63:addr] := a[i+63:i]
+/// ENDFOR
+/// \endoperation
+#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \
+ _mm512_i32scatter_epi64((base_addr), \
+ _mm512_castsi512_si256(vindex), (v1), (scale))
+
+/// Stores 8 packed 64-bit integer elements located in a and stores them in
+/// memory locations starting at location \a base_addr at packed 32-bit integer
+/// indices stored in \a vindex scaled by scale using writemask \a mask (elements
+/// whose corresponding mask bit is not set are not written to memory).
+///
+/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
+///
+/// \operation
+/// FOR j := 0 to 7
+/// i := j*64
+/// m := j*32
+/// IF mask[j]
+/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
+/// MEM[addr+63:addr] := a[i+63:i]
+/// FI
+/// ENDFOR
+/// \endoperation
+#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \
+ _mm512_mask_i32scatter_epi64((base_addr), (mask), \
+ _mm512_castsi512_si256(vindex), (v1), (scale))
+
#undef __DEFAULT_FN_ATTRS512
#undef __DEFAULT_FN_ATTRS128
#undef __DEFAULT_FN_ATTRS
diff --git a/clang/lib/Headers/builtins.h b/clang/lib/Headers/builtins.h
new file mode 100644
index 000000000000..65095861ca9b
--- /dev/null
+++ b/clang/lib/Headers/builtins.h
@@ -0,0 +1,16 @@
+/*===---- builtins.h - Standard header for extra builtins -----------------===*\
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+\*===----------------------------------------------------------------------===*/
+
+/// Some legacy compilers have builtin definitions in a file named builtins.h.
+/// This header file has been added to allow compatibility with code that was
+/// written for those compilers. Code may have an include line for this file
+/// and to avoid an error an empty file with this name is provided.
+#ifndef __BUILTINS_H
+#define __BUILTINS_H
+
+#endif /* __BUILTINS_H */
diff --git a/clang/lib/Headers/cuda_wrappers/complex b/clang/lib/Headers/cuda_wrappers/complex
index 11d40a82a8f6..e6805b6044e9 100644
--- a/clang/lib/Headers/cuda_wrappers/complex
+++ b/clang/lib/Headers/cuda_wrappers/complex
@@ -72,8 +72,16 @@
#define _GLIBCXX_USE_C99_COMPLEX 0
#define _GLIBCXX_USE_C99_COMPLEX_TR1 0
+// Work around a compatibility issue with libstdc++ 11.1.0
+// https://bugs.llvm.org/show_bug.cgi?id=50383
+#pragma push_macro("__failed_assertion")
+#if _GLIBCXX_RELEASE == 11
+#define __failed_assertion __cuda_failed_assertion
+#endif
+
#include_next <complex>
+#pragma pop_macro("__failed_assertion")
#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX_TR1")
#pragma pop_macro("_GLIBCXX_USE_C99_COMPLEX")
diff --git a/clang/lib/Headers/hexagon_circ_brev_intrinsics.h b/clang/lib/Headers/hexagon_circ_brev_intrinsics.h
new file mode 100644
index 000000000000..c53786d3c37b
--- /dev/null
+++ b/clang/lib/Headers/hexagon_circ_brev_intrinsics.h
@@ -0,0 +1,298 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_
+#define _HEXAGON_CIRC_BREV_INTRINSICS_H_ 1
+
+#include <hexagon_protos.h>
+#include <stdint.h>
+
+/* Circular Load */
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_D(Word64 dst, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_D(dest,ptr,incr,bufsize,K) \
+ { ptr = (int64_t *) HEXAGON_circ_ldd (ptr, &(dest), ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_W(Word32 dst, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_W(dest,ptr,incr,bufsize,K) \
+ { ptr = (int *) HEXAGON_circ_ldw (ptr, &(dest), (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_H(Word16 dst, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_H(dest,ptr,incr,bufsize,K) \
+ { ptr = (int16_t *) HEXAGON_circ_ldh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_UH( UWord16 dst, UWord16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_UH(dest,ptr,incr,bufsize,K) \
+ { ptr = (uint16_t *) HEXAGON_circ_lduh (ptr, &(dest), ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_B(Word8 dst, Word8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_B(dest,ptr,incr,bufsize,K) \
+ { ptr = (int8_t *) HEXAGON_circ_ldb (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_load_update_UB(dest,ptr,incr,bufsize,K) \
+ { ptr = (uint8_t *) HEXAGON_circ_ldub (ptr, &(dest), ((((K)-2)<<24)|(bufsize)), incr); }
+
+/* Circular Store */
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_store_update_D(Word64 *src, Word64 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_store_update_D(src,ptr,incr,bufsize,K) \
+ { ptr = (int64_t *) HEXAGON_circ_std (ptr, src, ((((K)+1)<<24)|((bufsize)<<3)), ((incr)*8)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_store_update_W(Word32 *src, Word32 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_store_update_W(src,ptr,incr,bufsize,K) \
+ { ptr = (int *) HEXAGON_circ_stw (ptr, src, (((K)<<24)|((bufsize)<<2)), ((incr)*4)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_store_update_HL(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_store_update_HL(src,ptr,incr,bufsize,K) \
+ { ptr = (int16_t *) HEXAGON_circ_sth (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_store_update_HH(Word16 *src, Word16 *ptr, UWord32 incr, UWord32 bufsize, UWord32 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_store_update_HH(src,ptr,incr,bufsize,K) \
+ { ptr = (int16_t *) HEXAGON_circ_sthhi (ptr, src, ((((K)-1)<<24)|((bufsize)<<1)), ((incr)*2)); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_circ_store_update_B(Word8 *src, Word8 *ptr, UWord32 I4, UWord32 bufsize, UWord64 K)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_circ_store_update_B(src,ptr,incr,bufsize,K) \
+ { ptr = (int8_t *) HEXAGON_circ_stb (ptr, src, ((((K)-2)<<24)|(bufsize)), incr); }
+
+
+/* Bit Reverse Load */
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_D(Word64 dst, Word64 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_D(dest,ptr,log2bufsize) \
+ { ptr = (int64_t *) HEXAGON_brev_ldd (ptr, &(dest), (1<<(16-((log2bufsize) + 3)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_W(Word32 dst, Word32 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_W(dest,ptr,log2bufsize) \
+ { ptr = (int *) HEXAGON_brev_ldw (ptr, &(dest), (1<<(16-((log2bufsize) + 2)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_H(Word16 dst, Word16 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_H(dest,ptr,log2bufsize) \
+ { ptr = (int16_t *) HEXAGON_brev_ldh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_UH(UWord16 dst, UWord16 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_UH(dest,ptr,log2bufsize) \
+ { ptr = (uint16_t *) HEXAGON_brev_lduh (ptr, &(dest), (1<<(16-((log2bufsize) + 1)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_B(Word8 dst, Word8 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_B(dest,ptr,log2bufsize) \
+ { ptr = (int8_t *) HEXAGON_brev_ldb (ptr, &(dest), (1<<(16-((log2bufsize))))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_load_update_UB(UWord8 dst, UWord8 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_load_update_UB(dest,ptr,log2bufsize) \
+ { ptr = (uint8_t *) HEXAGON_brev_ldub (ptr, &(dest), (1<<(16-((log2bufsize))))); }
+
+/* Bit Reverse Store */
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_store_update_D(Word64 *src, Word64 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_store_update_D(src,ptr,log2bufsize) \
+ { ptr = (int64_t *) HEXAGON_brev_std (ptr, src, (1<<(16-((log2bufsize) + 3)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_store_update_W(Word32 *src, Word32 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_store_update_W(src,ptr,log2bufsize) \
+ { ptr = (int *) HEXAGON_brev_stw (ptr, src, (1<<(16-((log2bufsize) + 2)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_store_update_HL(Word16 *src, Word16 *ptr, Word32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_store_update_HL(src,ptr,log2bufsize) \
+ { ptr = (int16_t *) HEXAGON_brev_sth (ptr, src, (1<<(16-((log2bufsize) + 1)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_store_update_HH(Word16 *src, Word16 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_store_update_HH(src,ptr,log2bufsize) \
+ { ptr = (int16_t *) HEXAGON_brev_sthhi (ptr, src, (1<<(16-((log2bufsize) + 1)))); }
+
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: void Q6_bitrev_store_update_B(Word8 *src, Word8 *ptr, UWord32 Iu4)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#define Q6_bitrev_store_update_B(src,ptr,log2bufsize) \
+ { ptr = (int8_t *) HEXAGON_brev_stb (ptr, src, (1<<(16-((log2bufsize))))); }
+
+
+#define HEXAGON_circ_ldd __builtin_circ_ldd
+#define HEXAGON_circ_ldw __builtin_circ_ldw
+#define HEXAGON_circ_ldh __builtin_circ_ldh
+#define HEXAGON_circ_lduh __builtin_circ_lduh
+#define HEXAGON_circ_ldb __builtin_circ_ldb
+#define HEXAGON_circ_ldub __builtin_circ_ldub
+
+
+#define HEXAGON_circ_std __builtin_circ_std
+#define HEXAGON_circ_stw __builtin_circ_stw
+#define HEXAGON_circ_sth __builtin_circ_sth
+#define HEXAGON_circ_sthhi __builtin_circ_sthhi
+#define HEXAGON_circ_stb __builtin_circ_stb
+
+
+#define HEXAGON_brev_ldd __builtin_brev_ldd
+#define HEXAGON_brev_ldw __builtin_brev_ldw
+#define HEXAGON_brev_ldh __builtin_brev_ldh
+#define HEXAGON_brev_lduh __builtin_brev_lduh
+#define HEXAGON_brev_ldb __builtin_brev_ldb
+#define HEXAGON_brev_ldub __builtin_brev_ldub
+
+#define HEXAGON_brev_std __builtin_brev_std
+#define HEXAGON_brev_stw __builtin_brev_stw
+#define HEXAGON_brev_sth __builtin_brev_sth
+#define HEXAGON_brev_sthhi __builtin_brev_sthhi
+#define HEXAGON_brev_stb __builtin_brev_stb
+
+#ifdef __HVX__
+/* ==========================================================================
+ Assembly Syntax: if (Qt) vmem(Rt+#0) = Vs
+ C Intrinsic Prototype: void Q6_vmaskedstoreq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
+ Instruction Type: COPROC_VMEM
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmaskedstoreq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstoreq)
+
+/* ==========================================================================
+ Assembly Syntax: if (!Qt) vmem(Rt+#0) = Vs
+ C Intrinsic Prototype: void Q6_vmaskedstorenq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
+ Instruction Type: COPROC_VMEM
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmaskedstorenq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorenq)
+
+/* ==========================================================================
+ Assembly Syntax: if (Qt) vmem(Rt+#0):nt = Vs
+ C Intrinsic Prototype: void Q6_vmaskedstorentq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
+ Instruction Type: COPROC_VMEM
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmaskedstorentq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentq)
+
+/* ==========================================================================
+ Assembly Syntax: if (!Qt) vmem(Rt+#0):nt = Vs
+ C Intrinsic Prototype: void Q6_vmaskedstorentnq_QAV(HVX_VectorPred Qt, HVX_VectorAddress A, HVX_Vector Vs)
+ Instruction Type: COPROC_VMEM
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmaskedstorentnq_QAV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaskedstorentnq)
+
+#endif
+
+
+#endif /* #ifndef _HEXAGON_CIRC_BREV_INTRINSICS_H_ */
+
+#ifdef __NOT_DEFINED__
+/*** comment block template ***/
+/* ==========================================================================
+ Assembly Syntax: Return=instruction()
+ C Intrinsic Prototype: ReturnType Intrinsic(ParamType Rs, ParamType Rt)
+ Instruction Type: InstructionType
+ Execution Slots: SLOT0123
+ ========================================================================== */
+#endif /*** __NOT_DEFINED__ ***/
diff --git a/clang/lib/Headers/hexagon_protos.h b/clang/lib/Headers/hexagon_protos.h
new file mode 100644
index 000000000000..cdffd93bb859
--- /dev/null
+++ b/clang/lib/Headers/hexagon_protos.h
@@ -0,0 +1,8450 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Automatically generated file, do not edit!
+//===----------------------------------------------------------------------===//
+
+
+
+#ifndef __HEXAGON_PROTOS_H_
+#define __HEXAGON_PROTOS_H_ 1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=abs(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_abs_R __builtin_HEXAGON_A2_abs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=abs(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_abs_P __builtin_HEXAGON_A2_absp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=abs(Rs32):sat
+ C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_add_RR __builtin_HEXAGON_A2_add
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.h)
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.l)
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rs32,#s16)
+ C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16)
+ Instruction Type: ALU32_ADDI
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_add_RI __builtin_HEXAGON_A2_addi
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=add(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_add_PP __builtin_HEXAGON_A2_addp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=add(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=add(Rs32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=and(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_and_RR __builtin_HEXAGON_A2_and
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=and(Rs32,#s10)
+ C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_and_RI __builtin_HEXAGON_A2_andir
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=and(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_and_PP __builtin_HEXAGON_A2_andp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=aslh(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asrh(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=combine(Rt32.h,Rs32.h)
+ C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=combine(Rt32.h,Rs32.l)
+ C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=combine(Rt32.l,Rs32.h)
+ C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=combine(Rt32.l,Rs32.l)
+ C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=combine(#s8,#S8)
+ C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=combine(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=max(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_max_RR __builtin_HEXAGON_A2_max
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=max(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=maxu(Rs32,Rt32)
+ C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=maxu(Rss32,Rtt32)
+ C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=min(Rt32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_min_RR __builtin_HEXAGON_A2_min
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=min(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_min_PP __builtin_HEXAGON_A2_minp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=minu(Rt32,Rs32)
+ C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=minu(Rtt32,Rss32)
+ C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=neg(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_neg_R __builtin_HEXAGON_A2_neg
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=neg(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_neg_P __builtin_HEXAGON_A2_negp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=neg(Rs32):sat
+ C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=not(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_not_R __builtin_HEXAGON_A2_not
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=not(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_not_P __builtin_HEXAGON_A2_notp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=or(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_or_RR __builtin_HEXAGON_A2_or
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=or(Rs32,#s10)
+ C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_or_RI __builtin_HEXAGON_A2_orir
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=or(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_or_PP __builtin_HEXAGON_A2_orp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=round(Rss32):sat
+ C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sat(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sat_P __builtin_HEXAGON_A2_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=satb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_satb_R __builtin_HEXAGON_A2_satb
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sath(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sath_R __builtin_HEXAGON_A2_sath
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=satub(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_satub_R __builtin_HEXAGON_A2_satub
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=satuh(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat:<<16
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h)
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l)
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=sub(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(#s10,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sub(Rt32,Rs32):sat
+ C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vaddh(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vaddh(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vadduh(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vavgh(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vavgh(Rs32,Rt32):rnd
+ C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vnavgh(Rt32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsubh(Rt32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsubh(Rt32,Rs32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsubuh(Rt32,Rs32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=swiz(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sxtb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sxth(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=sxtw(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=Rs32
+ C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr
+
+/* ==========================================================================
+ Assembly Syntax: Rx32.h=#u16
+ C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih
+
+/* ==========================================================================
+ Assembly Syntax: Rx32.l=#u16
+ C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=Rss32
+ C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=#s8
+ C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=#s16
+ C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsh(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsh(Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsw(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsw(Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddb(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vadduh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):crnd
+ C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):crnd
+ C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.eq(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.gtu(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.eq(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.gt(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.gtu(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.eq(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.gt(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.gtu(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vconj(Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxb(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxub(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxuh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxuw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmaxw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminb(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminub(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminuh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminuw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vminw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):crnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):crnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vraddub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vraddub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrsadub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrsadub(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubb(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubuh(Rtt32,Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=xor(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=xor(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=zxtb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=zxth(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=and(Rt32,~Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=and(Rtt32,~Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=bitsplit(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=bitsplit(Rs32,#u5)
+ C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=boundscheck(Rs32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.eq(Rs32,#u8)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.gt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.gt(Rs32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.gtu(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmpb.gtu(Rs32,#u7)
+ C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.eq(Rs32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.gt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.gt(Rs32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.gtu(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmph.gtu(Rs32,#u7)
+ C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=combine(#s8,Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=combine(Rs32,#s8)
+ C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cround(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cround(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=modwrap(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=or(Rt32,~Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=or(Rtt32,~Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmp.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmp.eq(Rs32,#s8)
+ C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=!cmp.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=!cmp.eq(Rs32,#s8)
+ C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=round(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=round(Rs32,#u5):sat
+ C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=round(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=round(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=tlbmatch(Rss32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=any8(vcmpb.eq(Rss32,Rtt32))
+ C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.eq(Rss32,#u8)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.gt(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.gt(Rss32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpb.gtu(Rss32,#u7)
+ C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.eq(Rss32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.gt(Rss32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmph.gtu(Rss32,#u7)
+ C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.eq(Rss32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.gt(Rss32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=vcmpw.gtu(Rss32,#u7)
+ C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrmaxh(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrmaxuh(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrmaxuw(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrmaxw(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrminh(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrminuh(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrminuw(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=vrminw(Rss32,Ru32)
+ C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vaddhub(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=all8(Ps4)
+ C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_all8_p __builtin_HEXAGON_C2_all8
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Pt4,Ps4)
+ C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_pp __builtin_HEXAGON_C2_and
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Pt4,!Ps4)
+ C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=any8(Ps4)
+ C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_any8_p __builtin_HEXAGON_C2_any8
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=bitsclr(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=bitsclr(Rs32,#u6)
+ C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=bitsset(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.eq(Rs32,#s10)
+ C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.eq(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.ge(Rs32,#s8)
+ C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.geu(Rs32,#u8)
+ C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gt(Rs32,#s10)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gt(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gtu(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gtu(Rs32,#u9)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.gtu(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.lt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=cmp.ltu(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mask(Pt4)
+ C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mask_p __builtin_HEXAGON_C2_mask
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mux(Pu4,Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mux(Pu4,#s8,#S8)
+ C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mux(Pu4,Rs32,#s8)
+ C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mux(Pu4,#s8,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=not(Ps4)
+ C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_p __builtin_HEXAGON_C2_not
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Pt4,Ps4)
+ C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_pp __builtin_HEXAGON_C2_or
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Pt4,!Ps4)
+ C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=Ps4
+ C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps)
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=Ps4
+ C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=Rs32
+ C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vitpack(Ps4,Pt4)
+ C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmux(Pu4,Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=xor(Ps4,Pt4)
+ C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Ps4,and(Pt4,Pu4))
+ C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Ps4,and(Pt4,!Pu4))
+ C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Ps4,or(Pt4,Pu4))
+ C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=and(Ps4,or(Pt4,!Pu4))
+ C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.gt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.gt(Rs32,#s10)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.gtu(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.gtu(Rs32,#u9)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!cmp.eq(Rs32,#s10)
+ C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10)
+ Instruction Type: ALU32_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=fastcorner9(Ps4,Pt4)
+ C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!fastcorner9(Ps4,Pt4)
+ C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!bitsclr(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!bitsclr(Rs32,#u6)
+ C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!bitsset(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Ps4,and(Pt4,Pu4))
+ C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Ps4,and(Pt4,!Pu4))
+ C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Ps4,or(Pt4,Pu4))
+ C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=or(Ps4,or(Pt4,!Pu4))
+ C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu)
+ Instruction Type: CR
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_d2df(Rss32)
+ C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_d2sf(Rss32)
+ C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_df2d(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_df2d(Rss32):chop
+ C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_df2sf(Rss32)
+ C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_df2ud(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_df2ud(Rss32):chop
+ C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_df2uw(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_df2uw(Rss32):chop
+ C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_df2w(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_df2w(Rss32):chop
+ C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_sf2d(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_sf2d(Rs32):chop
+ C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_sf2df(Rs32)
+ C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_sf2ud(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_sf2ud(Rs32):chop
+ C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_sf2uw(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_sf2uw(Rs32):chop
+ C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_sf2w(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_sf2w(Rs32):chop
+ C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_ud2df(Rss32)
+ C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_ud2sf(Rss32)
+ C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_uw2df(Rs32)
+ C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_uw2sf(Rs32)
+ C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=convert_w2df(Rs32)
+ C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=convert_w2sf(Rs32)
+ C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=dfclass(Rss32,#u5)
+ C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=dfcmp.eq(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=dfcmp.ge(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=dfcmp.gt(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=dfcmp.uo(Rss32,Rtt32)
+ C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmake(#u10):neg
+ C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmake(#u10):pos
+ C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfadd(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=sfclass(Rs32,#u5)
+ C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=sfcmp.eq(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=sfcmp.ge(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=sfcmp.gt(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=sfcmp.uo(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sffixupd(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sffixupn(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sffixupr(Rs32)
+ C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32):lib
+ C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32,Pu4):scale
+ C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32):lib
+ C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfmake(#u10):neg
+ C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfmake(#u10):pos
+ C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfmax(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfmin(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfmpy(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=sfsub(Rs32,Rt32)
+ C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memb(Rx32++#s4:0:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memb(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=memd(Rx32++#s4:3:circ(Mu2))
+ C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=memd(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memh(Rx32++#s4:1:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memh(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memw(Rx32++#s4:2:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memw(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memub(Rx32++#s4:0:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memub(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memuh(Rx32++#s4:1:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=memuh(Rx32++I:circ(Mu2))
+ C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress)
+ Instruction Type: LD
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=add(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=add(Rs32,#s8)
+ C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyi(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyr(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyi(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyr(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpy(Rs32,Rt32):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32):rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32,Rt32)
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyi(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyi(Rs32,#u8)
+ C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyi(Rs32,#u8)
+ C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd
+ C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyi(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyi(Rs32,#m9)
+ C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9)
+ Instruction Type: M
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpysu(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyu(Rs32,Rt32)
+ C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h)
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l)
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l):<<1
+ C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=mpyui(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=add(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=add(Rs32,#s8)
+ C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=sub(Rt32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsdiffh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsdiffw(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vcmpyi(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vcmpyr(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vraddh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vradduh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcmpys(Rss32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpyh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpyh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=xor(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=and(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=and(Rs32,~Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=or(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=xor(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=mpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,#U6))
+ C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Ru32,mpyi(Rs32,#u6))
+ C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Ru32,mpyi(#u6:2,Rs32))
+ C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,Rt32))
+ C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi
+
+/* ==========================================================================
+ Assembly Syntax: Ry32=add(Ru32,mpyi(Ry32,Rs32))
+ C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpy(Rs32,Rt32):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=and(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=and(Rs32,~Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=or(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=xor(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=pmpyw(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=pmpyw(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vpmpyh(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=vpmpyh(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32):<<1
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32):<<1
+ C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32):<<1
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32):<<1
+ C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=and(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=and(Rs32,~Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=or(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=xor(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vdmpybsu(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vdmpybsu(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpybsu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vmpybu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpybsu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vmpybu(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpybsu(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrmpybu(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpybsu(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrmpybu(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=addasl(Rt32,Rs32,#u3)
+ C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=asl(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asl(Rs32,#u5):sat
+ C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=asl(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaslh(Rss32,#u4)
+ C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaslw(Rss32,#u5)
+ C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=asl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=asl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=asl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=asl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=asl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asl(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaslh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vaslw(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=asr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=asr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=asr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=asr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asr(Rss32,#u6):rnd
+ C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asrrnd(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=asr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=asr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=asr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=asr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asr(Rs32,#u5):rnd
+ C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asrrnd(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vasrw(Rss32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vasrh(Rss32,#u4)
+ C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vasrw(Rss32,#u5)
+ C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=asr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=asr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=asr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=asr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=asr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=asr(Rs32,Rt32):sat
+ C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vasrw(Rss32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vasrh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vasrw(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=brev(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_brev_R __builtin_HEXAGON_S2_brev
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=brev(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cl0(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cl0(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cl1(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=cl1(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=clb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_clb_R __builtin_HEXAGON_S2_clb
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=normamt(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=clb(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=clrbit(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=clrbit(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=ct0(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=ct0(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=ct1(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=ct1(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=deinterleave(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=extractu(Rs32,#u5,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=extractu(Rs32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=extractu(Rss32,#u6,#U6)
+ C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=extractu(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=insert(Rs32,#u5,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=insert(Rs32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=insert(Rss32,#u6,#U6)
+ C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32=insert(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=interleave(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=lfs(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=lsl(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=lsl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=lsl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=lsl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=lsl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=lsl(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlslh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlslw(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=lsr(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rx32^=lsr(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlsrh(Rss32,#u4)
+ C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlsrw(Rss32,#u5)
+ C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=lsr(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=lsr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32+=lsr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rx32&=lsr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and
+
+/* ==========================================================================
+ Assembly Syntax: Rx32-=lsr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=lsr(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlsrh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vlsrw(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=packhl(Rs32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU32_3op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=parity(Rss32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=setbit(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=setbit(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=shuffeb(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=shuffeh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=shuffob(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=shuffoh(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh
+
+/* ==========================================================================
+ Assembly Syntax: memb(Rx32++#s4:0:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci
+
+/* ==========================================================================
+ Assembly Syntax: memb(Rx32++I:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr
+
+/* ==========================================================================
+ Assembly Syntax: memd(Rx32++#s4:3:circ(Mu2))=Rtt32
+ C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci
+
+/* ==========================================================================
+ Assembly Syntax: memd(Rx32++I:circ(Mu2))=Rtt32
+ C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr
+
+/* ==========================================================================
+ Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32.h
+ C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci
+
+/* ==========================================================================
+ Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32.h
+ C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr
+
+/* ==========================================================================
+ Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci
+
+/* ==========================================================================
+ Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr
+
+/* ==========================================================================
+ Assembly Syntax: memw(Rx32++#s4:2:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci
+
+/* ==========================================================================
+ Assembly Syntax: memw(Rx32++I:circ(Mu2))=Rt32
+ C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress)
+ Instruction Type: ST
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsathb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsathub(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=tableidxb(Rs32,#u4,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=tableidxd(Rs32,#u4,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=tableidxh(Rs32,#u4,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=tableidxw(Rs32,#u4,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=togglebit(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=togglebit(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=tstbit(Rs32,#u5)
+ C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=tstbit(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,#u3)
+ C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,Pu4)
+ C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcnegh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vcrotate(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcnegh(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vrndwh(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vrndwh(Rss32):sat
+ C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsathb(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsathb(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsathub(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsathub(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsatwh(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsatwh(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsatwuh(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsatwuh(Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vsplatb(Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsplath(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,#u3)
+ C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,Pu4)
+ C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsxtbh(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsxthw(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vtrunehb(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vtrunewh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vtrunohb(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vtrunowh(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vzxtbh(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vzxthw(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rs32,add(Ru32,#s6))
+ C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=add(#u8,asl(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=add(#u8,lsr(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=and(#u8,asl(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=and(#u8,lsr(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(clb(Rs32),#s6)
+ C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(clb(Rss32),#s6)
+ C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=normamt(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=extract(Rs32,#u5,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=extract(Rs32,Rtt32)
+ C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=extract(Rss32,#u6,#U6)
+ C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=extract(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=lsl(#s6,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!tstbit(Rs32,#u5)
+ C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i
+
+/* ==========================================================================
+ Assembly Syntax: Pd4=!tstbit(Rs32,Rt32)
+ C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=and(Rs32,#s10)
+ C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=or(Ru32,and(Rx32,#s10))
+ C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix
+
+/* ==========================================================================
+ Assembly Syntax: Rx32|=or(Rs32,#s10)
+ C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=or(#u8,asl(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=or(#u8,lsr(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=parity(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=add(Rs32,sub(#s6,Ru32))
+ C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=sub(#u8,asl(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rx32=sub(#u8,lsr(Rx32,#U5))
+ C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vrcrotate(Rss32,Rt32,#u2)
+ C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate
+
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vrcrotate(Rss32,Rt32,#u2)
+ C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxaddsubw(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat
+ C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vxsubaddw(Rss32,Rtt32):sat
+ C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vasrhub(Rss32,#u4):rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=vasrhub(Rss32,#u4):sat
+ C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat
+
+/* ==========================================================================
+ Assembly Syntax: Rd32=popcount(Rss32)
+ C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp
+
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vasrh(Rss32,#u4):rnd
+ C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4)
+ Instruction Type: S_2op
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax
+
+/* ==========================================================================
+ Assembly Syntax: dccleana(Rs32)
+ C Intrinsic Prototype: void Q6_dccleana_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana
+
+/* ==========================================================================
+ Assembly Syntax: dccleaninva(Rs32)
+ C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva
+
+/* ==========================================================================
+ Assembly Syntax: dcfetch(Rs32)
+ C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs)
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch
+
+/* ==========================================================================
+ Assembly Syntax: dcinva(Rs32)
+ C Intrinsic Prototype: void Q6_dcinva_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva
+
+/* ==========================================================================
+ Assembly Syntax: dczeroa(Rs32)
+ C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa
+
+/* ==========================================================================
+ Assembly Syntax: l2fetch(Rs32,Rt32)
+ C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch
+
+/* ==========================================================================
+ Assembly Syntax: l2fetch(Rs32,Rtt32)
+ C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rdd32=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rxx32&=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rxx32-=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rxx32|=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rxx32^=rol(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rd32=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rx32+=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rx32&=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rx32-=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rx32|=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rx32^=rol(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsdiffb(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vabsdiffub(Rtt32,Rss32)
+ C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vsplatb(Rs32)
+ C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vtrunehb(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vtrunohb(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32=vmem(Rt32):nt
+ C Intrinsic Prototype: HVX_Vector Q6_V_vmem_R_nt(Word32 Rt)
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vmem_R_nt __builtin_HEXAGON_V6_ldntnt0
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HEXAGON_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32))
+ C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: ALU64
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HEXAGON_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfadd(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HEXAGON_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfsub(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HEXAGON_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Rx32-=mpyi(Rs32,Rt32)
+ C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HEXAGON_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Rd32=mask(#u5,#U5)
+ C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_mask_II __builtin_HEXAGON_S2_mask
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=clip(Rs32,#u5)
+ C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cround(Rss32,#u6)
+ C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cround(Rss32,Rt32)
+ C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt)
+ Instruction Type: S_3op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vclip(Rss32,#u5)
+ C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5)
+ Instruction Type: S_2op
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmax(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmin(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmpyfix(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=dfmpyhh(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=dfmpylh(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67
+/* ==========================================================================
+ Assembly Syntax: Rdd32=dfmpyll(Rss32,Rtt32)
+ C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll
+#endif /* __HEXAGON_ARCH___ >= 67 */
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32*)
+ C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rdd32=vdmpyw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rxx32+=vdmpyw(Rss32,Rtt32)
+ C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__
+/* ==========================================================================
+ Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat
+ C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt)
+ Instruction Type: M
+ Execution Slots: SLOT3
+ ========================================================================== */
+
+#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd
+#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: dmlink(Rs32,Rt32)
+ C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Rd32=dmpause
+ C Intrinsic Prototype: Word32 Q6_R_dmpause()
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Rd32=dmpoll
+ C Intrinsic Prototype: Word32 Q6_R_dmpoll()
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: dmresume(Rs32)
+ C Intrinsic Prototype: void Q6_dmresume_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: dmstart(Rs32)
+ C Intrinsic Prototype: void Q6_dmstart_A(Address Rs)
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HEXAGON_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Rd32=dmwait
+ C Intrinsic Prototype: Word32 Q6_R_dmwait()
+ Instruction Type: ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#include <hexagon_circ_brev_intrinsics.h>
+#ifdef __HVX__
+#include <hvx_hexagon_protos.h>
+#endif /* __HVX__ */
+#endif
diff --git a/clang/lib/Headers/hexagon_types.h b/clang/lib/Headers/hexagon_types.h
new file mode 100644
index 000000000000..6958809418d8
--- /dev/null
+++ b/clang/lib/Headers/hexagon_types.h
@@ -0,0 +1,2653 @@
+/******************************************************************************/
+/* (c) 2020 Qualcomm Innovation Center, Inc. All rights reserved. */
+/* */
+/******************************************************************************/
+#ifndef HEXAGON_TYPES_H
+#define HEXAGON_TYPES_H
+
+#include <hexagon_protos.h>
+
+/* Hexagon names */
+#define HEXAGON_Vect HEXAGON_Vect64
+#define HEXAGON_V_GET_D HEXAGON_V64_GET_D
+#define HEXAGON_V_GET_UD HEXAGON_V64_GET_UD
+#define HEXAGON_V_GET_W0 HEXAGON_V64_GET_W0
+#define HEXAGON_V_GET_W1 HEXAGON_V64_GET_W1
+#define HEXAGON_V_GET_UW0 HEXAGON_V64_GET_UW0
+#define HEXAGON_V_GET_UW1 HEXAGON_V64_GET_UW1
+#define HEXAGON_V_GET_H0 HEXAGON_V64_GET_H0
+#define HEXAGON_V_GET_H1 HEXAGON_V64_GET_H1
+#define HEXAGON_V_GET_H2 HEXAGON_V64_GET_H2
+#define HEXAGON_V_GET_H3 HEXAGON_V64_GET_H3
+#define HEXAGON_V_GET_UH0 HEXAGON_V64_GET_UH0
+#define HEXAGON_V_GET_UH1 HEXAGON_V64_GET_UH1
+#define HEXAGON_V_GET_UH2 HEXAGON_V64_GET_UH2
+#define HEXAGON_V_GET_UH3 HEXAGON_V64_GET_UH3
+#define HEXAGON_V_GET_B0 HEXAGON_V64_GET_B0
+#define HEXAGON_V_GET_B1 HEXAGON_V64_GET_B1
+#define HEXAGON_V_GET_B2 HEXAGON_V64_GET_B2
+#define HEXAGON_V_GET_B3 HEXAGON_V64_GET_B3
+#define HEXAGON_V_GET_B4 HEXAGON_V64_GET_B4
+#define HEXAGON_V_GET_B5 HEXAGON_V64_GET_B5
+#define HEXAGON_V_GET_B6 HEXAGON_V64_GET_B6
+#define HEXAGON_V_GET_B7 HEXAGON_V64_GET_B7
+#define HEXAGON_V_GET_UB0 HEXAGON_V64_GET_UB0
+#define HEXAGON_V_GET_UB1 HEXAGON_V64_GET_UB1
+#define HEXAGON_V_GET_UB2 HEXAGON_V64_GET_UB2
+#define HEXAGON_V_GET_UB3 HEXAGON_V64_GET_UB3
+#define HEXAGON_V_GET_UB4 HEXAGON_V64_GET_UB4
+#define HEXAGON_V_GET_UB5 HEXAGON_V64_GET_UB5
+#define HEXAGON_V_GET_UB6 HEXAGON_V64_GET_UB6
+#define HEXAGON_V_GET_UB7 HEXAGON_V64_GET_UB7
+#define HEXAGON_V_PUT_D HEXAGON_V64_PUT_D
+#define HEXAGON_V_PUT_W0 HEXAGON_V64_PUT_W0
+#define HEXAGON_V_PUT_W1 HEXAGON_V64_PUT_W1
+#define HEXAGON_V_PUT_H0 HEXAGON_V64_PUT_H0
+#define HEXAGON_V_PUT_H1 HEXAGON_V64_PUT_H1
+#define HEXAGON_V_PUT_H2 HEXAGON_V64_PUT_H2
+#define HEXAGON_V_PUT_H3 HEXAGON_V64_PUT_H3
+#define HEXAGON_V_PUT_B0 HEXAGON_V64_PUT_B0
+#define HEXAGON_V_PUT_B1 HEXAGON_V64_PUT_B1
+#define HEXAGON_V_PUT_B2 HEXAGON_V64_PUT_B2
+#define HEXAGON_V_PUT_B3 HEXAGON_V64_PUT_B3
+#define HEXAGON_V_PUT_B4 HEXAGON_V64_PUT_B4
+#define HEXAGON_V_PUT_B5 HEXAGON_V64_PUT_B5
+#define HEXAGON_V_PUT_B6 HEXAGON_V64_PUT_B6
+#define HEXAGON_V_PUT_B7 HEXAGON_V64_PUT_B7
+#define HEXAGON_V_CREATE_D HEXAGON_V64_CREATE_D
+#define HEXAGON_V_CREATE_W HEXAGON_V64_CREATE_W
+#define HEXAGON_V_CREATE_H HEXAGON_V64_CREATE_H
+#define HEXAGON_V_CREATE_B HEXAGON_V64_CREATE_B
+
+#ifdef __cplusplus
+#define HEXAGON_VectC HEXAGON_Vect64C
+#endif /* __cplusplus */
+
+/* 64 Bit Vectors */
+
+typedef long long __attribute__((__may_alias__)) HEXAGON_Vect64;
+
+/* Extract doubleword macros */
+
+#define HEXAGON_V64_GET_D(v) (v)
+#define HEXAGON_V64_GET_UD(v) ((unsigned long long)(v))
+
+/* Extract word macros */
+
+#define HEXAGON_V64_GET_W0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.w[0]; \
+ })
+#define HEXAGON_V64_GET_W1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.w[1]; \
+ })
+#define HEXAGON_V64_GET_UW0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned int uw[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uw[0]; \
+ })
+#define HEXAGON_V64_GET_UW1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned int uw[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uw[1]; \
+ })
+
+/* Extract half word macros */
+
+#define HEXAGON_V64_GET_H0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[0]; \
+ })
+#define HEXAGON_V64_GET_H1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[1]; \
+ })
+#define HEXAGON_V64_GET_H2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[2]; \
+ })
+#define HEXAGON_V64_GET_H3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[3]; \
+ })
+#define HEXAGON_V64_GET_UH0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uh[0]; \
+ })
+#define HEXAGON_V64_GET_UH1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uh[1]; \
+ })
+#define HEXAGON_V64_GET_UH2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uh[2]; \
+ })
+#define HEXAGON_V64_GET_UH3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.uh[3]; \
+ })
+
+/* Extract byte macros */
+
+#define HEXAGON_V64_GET_B0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[0]; \
+ })
+#define HEXAGON_V64_GET_B1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[1]; \
+ })
+#define HEXAGON_V64_GET_B2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[2]; \
+ })
+#define HEXAGON_V64_GET_B3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[3]; \
+ })
+#define HEXAGON_V64_GET_B4(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[4]; \
+ })
+#define HEXAGON_V64_GET_B5(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[5]; \
+ })
+#define HEXAGON_V64_GET_B6(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[6]; \
+ })
+#define HEXAGON_V64_GET_B7(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[7]; \
+ })
+#define HEXAGON_V64_GET_UB0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[0]; \
+ })
+#define HEXAGON_V64_GET_UB1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[1]; \
+ })
+#define HEXAGON_V64_GET_UB2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[2]; \
+ })
+#define HEXAGON_V64_GET_UB3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[3]; \
+ })
+#define HEXAGON_V64_GET_UB4(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[4]; \
+ })
+#define HEXAGON_V64_GET_UB5(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[5]; \
+ })
+#define HEXAGON_V64_GET_UB6(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[6]; \
+ })
+#define HEXAGON_V64_GET_UB7(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.ub[7]; \
+ })
+
+/* NOTE: All set macros return a HEXAGON_Vect64 type */
+
+/* Set doubleword macro */
+
+#define HEXAGON_V64_PUT_D(v, new) (new)
+
+/* Set word macros */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_PUT_W0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.w[0] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_W1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.w[1] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_PUT_W0(v, new) \
+ (((v) & 0xffffffff00000000LL) | ((HEXAGON_Vect64)((unsigned int)(new))))
+#define HEXAGON_V64_PUT_W1(v, new) \
+ (((v) & 0x00000000ffffffffLL) | (((HEXAGON_Vect64)(new)) << 32LL))
+
+#endif /* !__hexagon__ */
+
+/* Set half word macros */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_PUT_H0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[0] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_H1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[1] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_H2(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[2] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_H3(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.h[3] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_PUT_H0(v, new) \
+ (((v) & 0xffffffffffff0000LL) | ((HEXAGON_Vect64)((unsigned short)(new))))
+#define HEXAGON_V64_PUT_H1(v, new) \
+ (((v) & 0xffffffff0000ffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 16LL))
+#define HEXAGON_V64_PUT_H2(v, new) \
+ (((v) & 0xffff0000ffffffffLL) | (((HEXAGON_Vect64)((unsigned short)(new))) << 32LL))
+#define HEXAGON_V64_PUT_H3(v, new) \
+ (((v) & 0x0000ffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 48LL))
+
+#endif /* !__hexagon__ */
+
+/* Set byte macros */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_PUT_B0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[0] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[1] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B2(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[2] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B3(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[3] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B4(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[4] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B5(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[5] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B6(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[6] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+#define HEXAGON_V64_PUT_B7(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.d = (v); \
+ _HEXAGON_V64_internal_union.b[7] = (new); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_PUT_B0(v, new) \
+ (((v) & 0xffffffffffffff00LL) | ((HEXAGON_Vect64)((unsigned char)(new))))
+#define HEXAGON_V64_PUT_B1(v, new) \
+ (((v) & 0xffffffffffff00ffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 8LL))
+#define HEXAGON_V64_PUT_B2(v, new) \
+ (((v) & 0xffffffffff00ffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 16LL))
+#define HEXAGON_V64_PUT_B3(v, new) \
+ (((v) & 0xffffffff00ffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 24LL))
+#define HEXAGON_V64_PUT_B4(v, new) \
+ (((v) & 0xffffff00ffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 32LL))
+#define HEXAGON_V64_PUT_B5(v, new) \
+ (((v) & 0xffff00ffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 40LL))
+#define HEXAGON_V64_PUT_B6(v, new) \
+ (((v) & 0xff00ffffffffffffLL) | (((HEXAGON_Vect64)((unsigned char)(new))) << 48LL))
+#define HEXAGON_V64_PUT_B7(v, new) \
+ (((v) & 0x00ffffffffffffffLL) | (((HEXAGON_Vect64)(new)) << 56LL))
+
+#endif /* !__hexagon__ */
+
+/* NOTE: All create macros return a HEXAGON_Vect64 type */
+
+/* Create from a doubleword */
+
+#define HEXAGON_V64_CREATE_D(d) (d)
+
+/* Create from words */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_CREATE_W(w1, w0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.w[0] = (w0); \
+ _HEXAGON_V64_internal_union.w[1] = (w1); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_CREATE_W(w1, w0) \
+ ((((HEXAGON_Vect64)(w1)) << 32LL) | ((HEXAGON_Vect64)((w0) & 0xffffffff)))
+
+#endif /* !__hexagon__ */
+
+/* Create from half words */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.h[0] = (h0); \
+ _HEXAGON_V64_internal_union.h[1] = (h1); \
+ _HEXAGON_V64_internal_union.h[2] = (h2); \
+ _HEXAGON_V64_internal_union.h[3] = (h3); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_CREATE_H(h3, h2, h1, h0) \
+ ((((HEXAGON_Vect64)(h3)) << 48LL) | (((HEXAGON_Vect64)((h2) & 0xffff)) << 32LL) | \
+ (((HEXAGON_Vect64)((h1) & 0xffff)) << 16LL) | ((HEXAGON_Vect64)((h0) & 0xffff)))
+
+#endif /* !__hexagon__ */
+
+/* Create from bytes */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _HEXAGON_V64_internal_union; \
+ _HEXAGON_V64_internal_union.b[0] = (b0); \
+ _HEXAGON_V64_internal_union.b[1] = (b1); \
+ _HEXAGON_V64_internal_union.b[2] = (b2); \
+ _HEXAGON_V64_internal_union.b[3] = (b3); \
+ _HEXAGON_V64_internal_union.b[4] = (b4); \
+ _HEXAGON_V64_internal_union.b[5] = (b5); \
+ _HEXAGON_V64_internal_union.b[6] = (b6); \
+ _HEXAGON_V64_internal_union.b[7] = (b7); \
+ _HEXAGON_V64_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \
+ ((((HEXAGON_Vect64)(b7)) << 56LL) | (((HEXAGON_Vect64)((b6) & 0xff)) << 48LL) | \
+ (((HEXAGON_Vect64)((b5) & 0xff)) << 40LL) | (((HEXAGON_Vect64)((b4) & 0xff)) << 32LL) | \
+ (((HEXAGON_Vect64)((b3) & 0xff)) << 24LL) | (((HEXAGON_Vect64)((b2) & 0xff)) << 16LL) | \
+ (((HEXAGON_Vect64)((b1) & 0xff)) << 8LL) | ((HEXAGON_Vect64)((b0) & 0xff)))
+
+#endif /* !__hexagon__ */
+
+#ifdef __cplusplus
+
+class HEXAGON_Vect64C {
+public:
+ // Constructors
+ HEXAGON_Vect64C(long long d = 0) : data(d) {};
+ HEXAGON_Vect64C(int w1, int w0) : data(HEXAGON_V64_CREATE_W(w1, w0)) {};
+ HEXAGON_Vect64C(short h3, short h2, short h1, short h0)
+ : data(HEXAGON_V64_CREATE_H(h3, h2, h1, h0)) {};
+ HEXAGON_Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
+ signed char b3, signed char b2, signed char b1, signed char b0)
+ : data(HEXAGON_V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
+ HEXAGON_Vect64C(const HEXAGON_Vect64C &v) : data(v.data) {};
+
+ HEXAGON_Vect64C &operator=(const HEXAGON_Vect64C &v) {
+ data = v.data;
+ return *this;
+ };
+
+ operator long long() {
+ return data;
+ };
+
+ // Extract doubleword methods
+ long long D(void) {
+ return HEXAGON_V64_GET_D(data);
+ };
+ unsigned long long UD(void) {
+ return HEXAGON_V64_GET_UD(data);
+ };
+
+ // Extract word methods
+ int W0(void) {
+ return HEXAGON_V64_GET_W0(data);
+ };
+ int W1(void) {
+ return HEXAGON_V64_GET_W1(data);
+ };
+ unsigned int UW0(void) {
+ return HEXAGON_V64_GET_UW0(data);
+ };
+ unsigned int UW1(void) {
+ return HEXAGON_V64_GET_UW1(data);
+ };
+
+ // Extract half word methods
+ short H0(void) {
+ return HEXAGON_V64_GET_H0(data);
+ };
+ short H1(void) {
+ return HEXAGON_V64_GET_H1(data);
+ };
+ short H2(void) {
+ return HEXAGON_V64_GET_H2(data);
+ };
+ short H3(void) {
+ return HEXAGON_V64_GET_H3(data);
+ };
+ unsigned short UH0(void) {
+ return HEXAGON_V64_GET_UH0(data);
+ };
+ unsigned short UH1(void) {
+ return HEXAGON_V64_GET_UH1(data);
+ };
+ unsigned short UH2(void) {
+ return HEXAGON_V64_GET_UH2(data);
+ };
+ unsigned short UH3(void) {
+ return HEXAGON_V64_GET_UH3(data);
+ };
+
+ // Extract byte methods
+ signed char B0(void) {
+ return HEXAGON_V64_GET_B0(data);
+ };
+ signed char B1(void) {
+ return HEXAGON_V64_GET_B1(data);
+ };
+ signed char B2(void) {
+ return HEXAGON_V64_GET_B2(data);
+ };
+ signed char B3(void) {
+ return HEXAGON_V64_GET_B3(data);
+ };
+ signed char B4(void) {
+ return HEXAGON_V64_GET_B4(data);
+ };
+ signed char B5(void) {
+ return HEXAGON_V64_GET_B5(data);
+ };
+ signed char B6(void) {
+ return HEXAGON_V64_GET_B6(data);
+ };
+ signed char B7(void) {
+ return HEXAGON_V64_GET_B7(data);
+ };
+ unsigned char UB0(void) {
+ return HEXAGON_V64_GET_UB0(data);
+ };
+ unsigned char UB1(void) {
+ return HEXAGON_V64_GET_UB1(data);
+ };
+ unsigned char UB2(void) {
+ return HEXAGON_V64_GET_UB2(data);
+ };
+ unsigned char UB3(void) {
+ return HEXAGON_V64_GET_UB3(data);
+ };
+ unsigned char UB4(void) {
+ return HEXAGON_V64_GET_UB4(data);
+ };
+ unsigned char UB5(void) {
+ return HEXAGON_V64_GET_UB5(data);
+ };
+ unsigned char UB6(void) {
+ return HEXAGON_V64_GET_UB6(data);
+ };
+ unsigned char UB7(void) {
+ return HEXAGON_V64_GET_UB7(data);
+ };
+
+ // NOTE: All set methods return a HEXAGON_Vect64C type
+
+ // Set doubleword method
+ HEXAGON_Vect64C D(long long d) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_D(data, d));
+ };
+
+ // Set word methods
+ HEXAGON_Vect64C W0(int w) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_W0(data, w));
+ };
+ HEXAGON_Vect64C W1(int w) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_W1(data, w));
+ };
+
+ // Set half word methods
+ HEXAGON_Vect64C H0(short h) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_H0(data, h));
+ };
+ HEXAGON_Vect64C H1(short h) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_H1(data, h));
+ };
+ HEXAGON_Vect64C H2(short h) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_H2(data, h));
+ };
+ HEXAGON_Vect64C H3(short h) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_H3(data, h));
+ };
+
+ // Set byte methods
+ HEXAGON_Vect64C B0(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B0(data, b));
+ };
+ HEXAGON_Vect64C B1(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B1(data, b));
+ };
+ HEXAGON_Vect64C B2(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B2(data, b));
+ };
+ HEXAGON_Vect64C B3(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B3(data, b));
+ };
+ HEXAGON_Vect64C B4(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B4(data, b));
+ };
+ HEXAGON_Vect64C B5(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B5(data, b));
+ };
+ HEXAGON_Vect64C B6(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B6(data, b));
+ };
+ HEXAGON_Vect64C B7(signed char b) {
+ return HEXAGON_Vect64C(HEXAGON_V64_PUT_B7(data, b));
+ };
+
+private:
+ long long data;
+};
+
+#endif /* __cplusplus */
+
+/* 32 Bit Vectors */
+
+typedef int HEXAGON_Vect32;
+
+/* Extract word macros */
+
+#define HEXAGON_V32_GET_W(v) (v)
+#define HEXAGON_V32_GET_UW(v) ((unsigned int)(v))
+
+/* Extract half word macros */
+
+#define HEXAGON_V32_GET_H0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.h[0]; \
+ })
+#define HEXAGON_V32_GET_H1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.h[1]; \
+ })
+#define HEXAGON_V32_GET_UH0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned short uh[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.uh[0]; \
+ })
+#define HEXAGON_V32_GET_UH1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned short uh[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.uh[1]; \
+ })
+
+/* Extract byte macros */
+
+#define HEXAGON_V32_GET_B0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[0]; \
+ })
+#define HEXAGON_V32_GET_B1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[1]; \
+ })
+#define HEXAGON_V32_GET_B2(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[2]; \
+ })
+#define HEXAGON_V32_GET_B3(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[3]; \
+ })
+#define HEXAGON_V32_GET_UB0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.ub[0]; \
+ })
+#define HEXAGON_V32_GET_UB1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.ub[1]; \
+ })
+#define HEXAGON_V32_GET_UB2(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.ub[2]; \
+ })
+#define HEXAGON_V32_GET_UB3(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.ub[3]; \
+ })
+
+/* NOTE: All set macros return a HEXAGON_Vect32 type */
+
+/* Set word macro */
+
+#define HEXAGON_V32_PUT_W(v, new) (new)
+
+/* Set half word macros */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V32_PUT_H0(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.h[0] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+#define HEXAGON_V32_PUT_H1(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.h[1] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V32_PUT_H0(v, new) \
+ (((v) & 0xffff0000) | ((HEXAGON_Vect32)((unsigned short)(new))))
+#define HEXAGON_V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((HEXAGON_Vect32)(new)) << 16))
+
+#endif /* !__hexagon__ */
+
+/* Set byte macros */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V32_PUT_B0(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[0] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+#define HEXAGON_V32_PUT_B1(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[1] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+#define HEXAGON_V32_PUT_B2(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[2] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+#define HEXAGON_V32_PUT_B3(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.w = (v); \
+ _HEXAGON_V32_internal_union.b[3] = (new); \
+ _HEXAGON_V32_internal_union.w; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V32_PUT_B0(v, new) \
+ (((v) & 0xffffff00) | ((HEXAGON_Vect32)((unsigned char)(new))))
+#define HEXAGON_V32_PUT_B1(v, new) \
+ (((v) & 0xffff00ff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 8))
+#define HEXAGON_V32_PUT_B2(v, new) \
+ (((v) & 0xff00ffff) | (((HEXAGON_Vect32)((unsigned char)(new))) << 16))
+#define HEXAGON_V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((HEXAGON_Vect32)(new)) << 24))
+
+#endif /* !__hexagon__ */
+
+/* NOTE: All create macros return a HEXAGON_Vect32 type */
+
+/* Create from a word */
+
+#define HEXAGON_V32_CREATE_W(w) (w)
+
+/* Create from half words */
+
+#ifdef __hexagon__
+
+#define HEXAGON_V32_CREATE_H(h1, h0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[2]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.h[0] = (h0); \
+ _HEXAGON_V32_internal_union.h[1] = (h1); \
+ _HEXAGON_V32_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V32_CREATE_H(h1, h0) \
+ ((((HEXAGON_Vect32)(h1)) << 16) | ((HEXAGON_Vect32)((h0) & 0xffff)))
+
+#endif /* !__hexagon__ */
+
+/* Create from bytes */
+#ifdef __hexagon__
+
+#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[4]; \
+ } _HEXAGON_V32_internal_union; \
+ _HEXAGON_V32_internal_union.b[0] = (b0); \
+ _HEXAGON_V32_internal_union.b[1] = (b1); \
+ _HEXAGON_V32_internal_union.b[2] = (b2); \
+ _HEXAGON_V32_internal_union.b[3] = (b3); \
+ _HEXAGON_V32_internal_union.d; \
+ })
+
+#else /* !__hexagon__ */
+
+#define HEXAGON_V32_CREATE_B(b3, b2, b1, b0) \
+ ((((HEXAGON_Vect32)(b3)) << 24) | (((HEXAGON_Vect32)((b2) & 0xff)) << 16) | \
+ (((HEXAGON_Vect32)((b1) & 0xff)) << 8) | ((HEXAGON_Vect32)((b0) & 0xff)))
+
+#endif /* !__hexagon__ */
+
+#ifdef __cplusplus
+
+class HEXAGON_Vect32C {
+public:
+ // Constructors
+ HEXAGON_Vect32C(int w = 0) : data(w) {};
+ HEXAGON_Vect32C(short h1, short h0) : data(HEXAGON_V32_CREATE_H(h1, h0)) {};
+ HEXAGON_Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
+ : data(HEXAGON_V32_CREATE_B(b3, b2, b1, b0)) {};
+ HEXAGON_Vect32C(const HEXAGON_Vect32C &v) : data(v.data) {};
+
+ HEXAGON_Vect32C &operator=(const HEXAGON_Vect32C &v) {
+ data = v.data;
+ return *this;
+ };
+
+ operator int() {
+ return data;
+ };
+
+ // Extract word methods
+ int W(void) {
+ return HEXAGON_V32_GET_W(data);
+ };
+ unsigned int UW(void) {
+ return HEXAGON_V32_GET_UW(data);
+ };
+
+ // Extract half word methods
+ short H0(void) {
+ return HEXAGON_V32_GET_H0(data);
+ };
+ short H1(void) {
+ return HEXAGON_V32_GET_H1(data);
+ };
+ unsigned short UH0(void) {
+ return HEXAGON_V32_GET_UH0(data);
+ };
+ unsigned short UH1(void) {
+ return HEXAGON_V32_GET_UH1(data);
+ };
+
+ // Extract byte methods
+ signed char B0(void) {
+ return HEXAGON_V32_GET_B0(data);
+ };
+ signed char B1(void) {
+ return HEXAGON_V32_GET_B1(data);
+ };
+ signed char B2(void) {
+ return HEXAGON_V32_GET_B2(data);
+ };
+ signed char B3(void) {
+ return HEXAGON_V32_GET_B3(data);
+ };
+ unsigned char UB0(void) {
+ return HEXAGON_V32_GET_UB0(data);
+ };
+ unsigned char UB1(void) {
+ return HEXAGON_V32_GET_UB1(data);
+ };
+ unsigned char UB2(void) {
+ return HEXAGON_V32_GET_UB2(data);
+ };
+ unsigned char UB3(void) {
+ return HEXAGON_V32_GET_UB3(data);
+ };
+
+ // NOTE: All set methods return a HEXAGON_Vect32C type
+
+ // Set word method
+ HEXAGON_Vect32C W(int w) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_W(data, w));
+ };
+
+ // Set half word methods
+ HEXAGON_Vect32C H0(short h) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_H0(data, h));
+ };
+ HEXAGON_Vect32C H1(short h) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_H1(data, h));
+ };
+
+ // Set byte methods
+ HEXAGON_Vect32C B0(signed char b) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_B0(data, b));
+ };
+ HEXAGON_Vect32C B1(signed char b) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_B1(data, b));
+ };
+ HEXAGON_Vect32C B2(signed char b) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_B2(data, b));
+ };
+ HEXAGON_Vect32C B3(signed char b) {
+ return HEXAGON_Vect32C(HEXAGON_V32_PUT_B3(data, b));
+ };
+
+private:
+ int data;
+};
+
+#endif /* __cplusplus */
+
+// V65 Silver types
+#if __Q6S_ARCH__ >= 65
+ // Silver vector types are 128 bytes, and pairs are 256. The vector predicate
+ // types are 16 bytes and 32 bytes for pairs.
+ typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(16)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_VecPred256 __attribute__((__vector_size__(32)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
+ __attribute__((aligned(256)));
+
+ typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(4)));
+
+ typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
+ __attribute__((aligned(4)));
+
+ #define Q6S_VectorPredPair HEXAGON_VecPred256
+ #define Q6S_VectorPred HEXAGON_VecPred128
+ #define Q6S_Vector HEXAGON_Vect1024
+ #define Q6S_VectorPair HEXAGON_Vect2048
+ #define Q6S_UVector HEXAGON_UVect1024
+ #define Q6S_UVectorPair HEXAGON_UVect2048
+
+#else /* __Q6S_ARCH__ >= 65 */
+
+// V65 Vector types
+#if __HVX_ARCH__ >= 65
+#if defined __HVX__ && (__HVX_LENGTH__ == 128)
+ typedef long HEXAGON_VecPred128 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_Vect2048 __attribute__((__vector_size__(256)))
+ __attribute__((aligned(256)));
+
+ typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(4)));
+
+ typedef long HEXAGON_UVect2048 __attribute__((__vector_size__(256)))
+ __attribute__((aligned(4)));
+
+ #define HVX_VectorPred HEXAGON_VecPred128
+ #define HVX_Vector HEXAGON_Vect1024
+ #define HVX_VectorPair HEXAGON_Vect2048
+ #define HVX_UVector HEXAGON_UVect1024
+ #define HVX_UVectorPair HEXAGON_UVect2048
+#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
+#if defined __HVX__ && (__HVX_LENGTH__ == 64)
+ typedef long HEXAGON_VecPred64 __attribute__((__vector_size__(64)))
+ __attribute__((aligned(64)));
+
+ typedef long HEXAGON_Vect512 __attribute__((__vector_size__(64)))
+ __attribute__((aligned(64)));
+
+ typedef long HEXAGON_Vect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+ typedef long HEXAGON_UVect512 __attribute__((__vector_size__(64)))
+ __attribute__((aligned(4)));
+
+ typedef long HEXAGON_UVect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(4)));
+
+ #define HVX_VectorPred HEXAGON_VecPred64
+ #define HVX_Vector HEXAGON_Vect512
+ #define HVX_VectorPair HEXAGON_Vect1024
+ #define HVX_UVector HEXAGON_UVect512
+ #define HVX_UVectorPair HEXAGON_UVect1024
+#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */
+#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
+#endif /* __HVX_ARCH__ >= 65 */
+#endif /* __Q6S_ARCH__ >= 65 */
+
+/* Predicates */
+
+typedef int HEXAGON_Pred;
+
+/***
+ *** backward compatibility aliases
+ ***/
+
+/* Old names */
+#define Q6Vect Q6Vect64
+#define Q6V_GET_D Q6V64_GET_D
+#define Q6V_GET_UD Q6V64_GET_UD
+#define Q6V_GET_W0 Q6V64_GET_W0
+#define Q6V_GET_W1 Q6V64_GET_W1
+#define Q6V_GET_UW0 Q6V64_GET_UW0
+#define Q6V_GET_UW1 Q6V64_GET_UW1
+#define Q6V_GET_H0 Q6V64_GET_H0
+#define Q6V_GET_H1 Q6V64_GET_H1
+#define Q6V_GET_H2 Q6V64_GET_H2
+#define Q6V_GET_H3 Q6V64_GET_H3
+#define Q6V_GET_UH0 Q6V64_GET_UH0
+#define Q6V_GET_UH1 Q6V64_GET_UH1
+#define Q6V_GET_UH2 Q6V64_GET_UH2
+#define Q6V_GET_UH3 Q6V64_GET_UH3
+#define Q6V_GET_B0 Q6V64_GET_B0
+#define Q6V_GET_B1 Q6V64_GET_B1
+#define Q6V_GET_B2 Q6V64_GET_B2
+#define Q6V_GET_B3 Q6V64_GET_B3
+#define Q6V_GET_B4 Q6V64_GET_B4
+#define Q6V_GET_B5 Q6V64_GET_B5
+#define Q6V_GET_B6 Q6V64_GET_B6
+#define Q6V_GET_B7 Q6V64_GET_B7
+#define Q6V_GET_UB0 Q6V64_GET_UB0
+#define Q6V_GET_UB1 Q6V64_GET_UB1
+#define Q6V_GET_UB2 Q6V64_GET_UB2
+#define Q6V_GET_UB3 Q6V64_GET_UB3
+#define Q6V_GET_UB4 Q6V64_GET_UB4
+#define Q6V_GET_UB5 Q6V64_GET_UB5
+#define Q6V_GET_UB6 Q6V64_GET_UB6
+#define Q6V_GET_UB7 Q6V64_GET_UB7
+#define Q6V_PUT_D Q6V64_PUT_D
+#define Q6V_PUT_W0 Q6V64_PUT_W0
+#define Q6V_PUT_W1 Q6V64_PUT_W1
+#define Q6V_PUT_H0 Q6V64_PUT_H0
+#define Q6V_PUT_H1 Q6V64_PUT_H1
+#define Q6V_PUT_H2 Q6V64_PUT_H2
+#define Q6V_PUT_H3 Q6V64_PUT_H3
+#define Q6V_PUT_B0 Q6V64_PUT_B0
+#define Q6V_PUT_B1 Q6V64_PUT_B1
+#define Q6V_PUT_B2 Q6V64_PUT_B2
+#define Q6V_PUT_B3 Q6V64_PUT_B3
+#define Q6V_PUT_B4 Q6V64_PUT_B4
+#define Q6V_PUT_B5 Q6V64_PUT_B5
+#define Q6V_PUT_B6 Q6V64_PUT_B6
+#define Q6V_PUT_B7 Q6V64_PUT_B7
+#define Q6V_CREATE_D Q6V64_CREATE_D
+#define Q6V_CREATE_W Q6V64_CREATE_W
+#define Q6V_CREATE_H Q6V64_CREATE_H
+#define Q6V_CREATE_B Q6V64_CREATE_B
+
+#ifdef __cplusplus
+#define Q6VectC Q6Vect64C
+#endif /* __cplusplus */
+
+/* 64 Bit Vectors */
+
+typedef long long __attribute__((__may_alias__)) Q6Vect64;
+
+/* Extract doubleword macros */
+
+#define Q6V64_GET_D(v) (v)
+#define Q6V64_GET_UD(v) ((unsigned long long)(v))
+
+/* Extract word macros */
+
+#define Q6V64_GET_W0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.w[0]; \
+ })
+#define Q6V64_GET_W1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.w[1]; \
+ })
+#define Q6V64_GET_UW0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned int uw[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uw[0]; \
+ })
+#define Q6V64_GET_UW1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned int uw[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uw[1]; \
+ })
+
+/* Extract half word macros */
+
+#define Q6V64_GET_H0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[0]; \
+ })
+#define Q6V64_GET_H1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[1]; \
+ })
+#define Q6V64_GET_H2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[2]; \
+ })
+#define Q6V64_GET_H3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[3]; \
+ })
+#define Q6V64_GET_UH0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uh[0]; \
+ })
+#define Q6V64_GET_UH1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uh[1]; \
+ })
+#define Q6V64_GET_UH2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uh[2]; \
+ })
+#define Q6V64_GET_UH3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned short uh[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.uh[3]; \
+ })
+
+/* Extract byte macros */
+
+#define Q6V64_GET_B0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[0]; \
+ })
+#define Q6V64_GET_B1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[1]; \
+ })
+#define Q6V64_GET_B2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[2]; \
+ })
+#define Q6V64_GET_B3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[3]; \
+ })
+#define Q6V64_GET_B4(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[4]; \
+ })
+#define Q6V64_GET_B5(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[5]; \
+ })
+#define Q6V64_GET_B6(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[6]; \
+ })
+#define Q6V64_GET_B7(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ signed char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[7]; \
+ })
+#define Q6V64_GET_UB0(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[0]; \
+ })
+#define Q6V64_GET_UB1(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[1]; \
+ })
+#define Q6V64_GET_UB2(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[2]; \
+ })
+#define Q6V64_GET_UB3(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[3]; \
+ })
+#define Q6V64_GET_UB4(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[4]; \
+ })
+#define Q6V64_GET_UB5(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[5]; \
+ })
+#define Q6V64_GET_UB6(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[6]; \
+ })
+#define Q6V64_GET_UB7(v) \
+ __extension__({ \
+ union { \
+ long long d; \
+ unsigned char ub[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.ub[7]; \
+ })
+
+/* NOTE: All set macros return a Q6Vect64 type */
+
+/* Set doubleword macro */
+
+#define Q6V64_PUT_D(v, new) (new)
+
+/* Set word macros */
+
+#ifdef __qdsp6__
+
+#define Q6V64_PUT_W0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.w[0] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_W1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.w[1] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_PUT_W0(v, new) \
+ (((v) & 0xffffffff00000000LL) | ((Q6Vect64)((unsigned int)(new))))
+#define Q6V64_PUT_W1(v, new) \
+ (((v) & 0x00000000ffffffffLL) | (((Q6Vect64)(new)) << 32LL))
+
+#endif /* !__qdsp6__ */
+
+/* Set half word macros */
+
+#ifdef __qdsp6__
+
+#define Q6V64_PUT_H0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[0] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_H1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[1] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_H2(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[2] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_H3(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.h[3] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_PUT_H0(v, new) \
+ (((v) & 0xffffffffffff0000LL) | ((Q6Vect64)((unsigned short)(new))))
+#define Q6V64_PUT_H1(v, new) \
+ (((v) & 0xffffffff0000ffffLL) | (((Q6Vect64)((unsigned short)(new))) << 16LL))
+#define Q6V64_PUT_H2(v, new) \
+ (((v) & 0xffff0000ffffffffLL) | (((Q6Vect64)((unsigned short)(new))) << 32LL))
+#define Q6V64_PUT_H3(v, new) \
+ (((v) & 0x0000ffffffffffffLL) | (((Q6Vect64)(new)) << 48LL))
+
+#endif /* !__qdsp6__ */
+
+/* Set byte macros */
+
+#ifdef __qdsp6__
+
+#define Q6V64_PUT_B0(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[0] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B1(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[1] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B2(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[2] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B3(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[3] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B4(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[4] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B5(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[5] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B6(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[6] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+#define Q6V64_PUT_B7(v, new) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.d = (v); \
+ _Q6V64_internal_union.b[7] = (new); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_PUT_B0(v, new) \
+ (((v) & 0xffffffffffffff00LL) | ((Q6Vect64)((unsigned char)(new))))
+#define Q6V64_PUT_B1(v, new) \
+ (((v) & 0xffffffffffff00ffLL) | (((Q6Vect64)((unsigned char)(new))) << 8LL))
+#define Q6V64_PUT_B2(v, new) \
+ (((v) & 0xffffffffff00ffffLL) | (((Q6Vect64)((unsigned char)(new))) << 16LL))
+#define Q6V64_PUT_B3(v, new) \
+ (((v) & 0xffffffff00ffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 24LL))
+#define Q6V64_PUT_B4(v, new) \
+ (((v) & 0xffffff00ffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 32LL))
+#define Q6V64_PUT_B5(v, new) \
+ (((v) & 0xffff00ffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 40LL))
+#define Q6V64_PUT_B6(v, new) \
+ (((v) & 0xff00ffffffffffffLL) | (((Q6Vect64)((unsigned char)(new))) << 48LL))
+#define Q6V64_PUT_B7(v, new) \
+ (((v) & 0x00ffffffffffffffLL) | (((Q6Vect64)(new)) << 56LL))
+
+#endif /* !__qdsp6__ */
+
+/* NOTE: All create macros return a Q6Vect64 type */
+
+/* Create from a doubleword */
+
+#define Q6V64_CREATE_D(d) (d)
+
+/* Create from words */
+
+#ifdef __qdsp6__
+
+#define Q6V64_CREATE_W(w1, w0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ int w[2]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.w[0] = (w0); \
+ _Q6V64_internal_union.w[1] = (w1); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_CREATE_W(w1, w0) \
+ ((((Q6Vect64)(w1)) << 32LL) | ((Q6Vect64)((w0) & 0xffffffff)))
+
+#endif /* !__qdsp6__ */
+
+/* Create from half words */
+
+#ifdef __qdsp6__
+
+#define Q6V64_CREATE_H(h3, h2, h1, h0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[4]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.h[0] = (h0); \
+ _Q6V64_internal_union.h[1] = (h1); \
+ _Q6V64_internal_union.h[2] = (h2); \
+ _Q6V64_internal_union.h[3] = (h3); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_CREATE_H(h3, h2, h1, h0) \
+ ((((Q6Vect64)(h3)) << 48LL) | (((Q6Vect64)((h2) & 0xffff)) << 32LL) | \
+ (((Q6Vect64)((h1) & 0xffff)) << 16LL) | ((Q6Vect64)((h0) & 0xffff)))
+
+#endif /* !__qdsp6__ */
+
+/* Create from bytes */
+
+#ifdef __qdsp6__
+
+#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[8]; \
+ } _Q6V64_internal_union; \
+ _Q6V64_internal_union.b[0] = (b0); \
+ _Q6V64_internal_union.b[1] = (b1); \
+ _Q6V64_internal_union.b[2] = (b2); \
+ _Q6V64_internal_union.b[3] = (b3); \
+ _Q6V64_internal_union.b[4] = (b4); \
+ _Q6V64_internal_union.b[5] = (b5); \
+ _Q6V64_internal_union.b[6] = (b6); \
+ _Q6V64_internal_union.b[7] = (b7); \
+ _Q6V64_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0) \
+ ((((Q6Vect64)(b7)) << 56LL) | (((Q6Vect64)((b6) & 0xff)) << 48LL) | \
+ (((Q6Vect64)((b5) & 0xff)) << 40LL) | (((Q6Vect64)((b4) & 0xff)) << 32LL) | \
+ (((Q6Vect64)((b3) & 0xff)) << 24LL) | (((Q6Vect64)((b2) & 0xff)) << 16LL) | \
+ (((Q6Vect64)((b1) & 0xff)) << 8LL) | ((Q6Vect64)((b0) & 0xff)))
+
+#endif /* !__qdsp6__ */
+
+#ifdef __cplusplus
+
+class Q6Vect64C {
+public:
+ // Constructors
+ Q6Vect64C(long long d = 0) : data(d) {};
+ Q6Vect64C(int w1, int w0) : data(Q6V64_CREATE_W(w1, w0)) {};
+ Q6Vect64C(short h3, short h2, short h1, short h0)
+ : data(Q6V64_CREATE_H(h3, h2, h1, h0)) {};
+ Q6Vect64C(signed char b7, signed char b6, signed char b5, signed char b4,
+ signed char b3, signed char b2, signed char b1, signed char b0)
+ : data(Q6V64_CREATE_B(b7, b6, b5, b4, b3, b2, b1, b0)) {};
+ Q6Vect64C(const Q6Vect64C &v) : data(v.data) {};
+
+ Q6Vect64C &operator=(const Q6Vect64C &v) {
+ data = v.data;
+ return *this;
+ };
+
+ operator long long() {
+ return data;
+ };
+
+ // Extract doubleword methods
+ long long D(void) {
+ return Q6V64_GET_D(data);
+ };
+ unsigned long long UD(void) {
+ return Q6V64_GET_UD(data);
+ };
+
+ // Extract word methods
+ int W0(void) {
+ return Q6V64_GET_W0(data);
+ };
+ int W1(void) {
+ return Q6V64_GET_W1(data);
+ };
+ unsigned int UW0(void) {
+ return Q6V64_GET_UW0(data);
+ };
+ unsigned int UW1(void) {
+ return Q6V64_GET_UW1(data);
+ };
+
+ // Extract half word methods
+ short H0(void) {
+ return Q6V64_GET_H0(data);
+ };
+ short H1(void) {
+ return Q6V64_GET_H1(data);
+ };
+ short H2(void) {
+ return Q6V64_GET_H2(data);
+ };
+ short H3(void) {
+ return Q6V64_GET_H3(data);
+ };
+ unsigned short UH0(void) {
+ return Q6V64_GET_UH0(data);
+ };
+ unsigned short UH1(void) {
+ return Q6V64_GET_UH1(data);
+ };
+ unsigned short UH2(void) {
+ return Q6V64_GET_UH2(data);
+ };
+ unsigned short UH3(void) {
+ return Q6V64_GET_UH3(data);
+ };
+
+ // Extract byte methods
+ signed char B0(void) {
+ return Q6V64_GET_B0(data);
+ };
+ signed char B1(void) {
+ return Q6V64_GET_B1(data);
+ };
+ signed char B2(void) {
+ return Q6V64_GET_B2(data);
+ };
+ signed char B3(void) {
+ return Q6V64_GET_B3(data);
+ };
+ signed char B4(void) {
+ return Q6V64_GET_B4(data);
+ };
+ signed char B5(void) {
+ return Q6V64_GET_B5(data);
+ };
+ signed char B6(void) {
+ return Q6V64_GET_B6(data);
+ };
+ signed char B7(void) {
+ return Q6V64_GET_B7(data);
+ };
+ unsigned char UB0(void) {
+ return Q6V64_GET_UB0(data);
+ };
+ unsigned char UB1(void) {
+ return Q6V64_GET_UB1(data);
+ };
+ unsigned char UB2(void) {
+ return Q6V64_GET_UB2(data);
+ };
+ unsigned char UB3(void) {
+ return Q6V64_GET_UB3(data);
+ };
+ unsigned char UB4(void) {
+ return Q6V64_GET_UB4(data);
+ };
+ unsigned char UB5(void) {
+ return Q6V64_GET_UB5(data);
+ };
+ unsigned char UB6(void) {
+ return Q6V64_GET_UB6(data);
+ };
+ unsigned char UB7(void) {
+ return Q6V64_GET_UB7(data);
+ };
+
+ // NOTE: All set methods return a Q6Vect64C type
+
+ // Set doubleword method
+ Q6Vect64C D(long long d) {
+ return Q6Vect64C(Q6V64_PUT_D(data, d));
+ };
+
+ // Set word methods
+ Q6Vect64C W0(int w) {
+ return Q6Vect64C(Q6V64_PUT_W0(data, w));
+ };
+ Q6Vect64C W1(int w) {
+ return Q6Vect64C(Q6V64_PUT_W1(data, w));
+ };
+
+ // Set half word methods
+ Q6Vect64C H0(short h) {
+ return Q6Vect64C(Q6V64_PUT_H0(data, h));
+ };
+ Q6Vect64C H1(short h) {
+ return Q6Vect64C(Q6V64_PUT_H1(data, h));
+ };
+ Q6Vect64C H2(short h) {
+ return Q6Vect64C(Q6V64_PUT_H2(data, h));
+ };
+ Q6Vect64C H3(short h) {
+ return Q6Vect64C(Q6V64_PUT_H3(data, h));
+ };
+
+ // Set byte methods
+ Q6Vect64C B0(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B0(data, b));
+ };
+ Q6Vect64C B1(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B1(data, b));
+ };
+ Q6Vect64C B2(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B2(data, b));
+ };
+ Q6Vect64C B3(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B3(data, b));
+ };
+ Q6Vect64C B4(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B4(data, b));
+ };
+ Q6Vect64C B5(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B5(data, b));
+ };
+ Q6Vect64C B6(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B6(data, b));
+ };
+ Q6Vect64C B7(signed char b) {
+ return Q6Vect64C(Q6V64_PUT_B7(data, b));
+ };
+
+private:
+ long long data;
+};
+
+#endif /* __cplusplus */
+
+/* 32 Bit Vectors */
+
+typedef int Q6Vect32;
+
+/* Extract word macros */
+
+#define Q6V32_GET_W(v) (v)
+#define Q6V32_GET_UW(v) ((unsigned int)(v))
+
+/* Extract half word macros */
+
+#define Q6V32_GET_H0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.h[0]; \
+ })
+#define Q6V32_GET_H1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.h[1]; \
+ })
+#define Q6V32_GET_UH0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned short uh[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.uh[0]; \
+ })
+#define Q6V32_GET_UH1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned short uh[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.uh[1]; \
+ })
+
+/* Extract byte macros */
+
+#define Q6V32_GET_B0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[0]; \
+ })
+#define Q6V32_GET_B1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[1]; \
+ })
+#define Q6V32_GET_B2(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[2]; \
+ })
+#define Q6V32_GET_B3(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ signed char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[3]; \
+ })
+#define Q6V32_GET_UB0(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.ub[0]; \
+ })
+#define Q6V32_GET_UB1(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.ub[1]; \
+ })
+#define Q6V32_GET_UB2(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.ub[2]; \
+ })
+#define Q6V32_GET_UB3(v) \
+ __extension__({ \
+ union { \
+ int w; \
+ unsigned char ub[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.ub[3]; \
+ })
+
+/* NOTE: All set macros return a Q6Vect32 type */
+
+/* Set word macro */
+
+#define Q6V32_PUT_W(v, new) (new)
+
+/* Set half word macros */
+
+#ifdef __qdsp6__
+
+#define Q6V32_PUT_H0(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.h[0] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+#define Q6V32_PUT_H1(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ short h[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.h[1] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V32_PUT_H0(v, new) \
+ (((v) & 0xffff0000) | ((Q6Vect32)((unsigned short)(new))))
+#define Q6V32_PUT_H1(v, new) (((v) & 0x0000ffff) | (((Q6Vect32)(new)) << 16))
+
+#endif /* !__qdsp6__ */
+
+/* Set byte macros */
+
+#ifdef __qdsp6__
+
+#define Q6V32_PUT_B0(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[0] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+#define Q6V32_PUT_B1(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[1] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+#define Q6V32_PUT_B2(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[2] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+#define Q6V32_PUT_B3(v, new) \
+ __extension__({ \
+ union { \
+ int w; \
+ char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.w = (v); \
+ _Q6V32_internal_union.b[3] = (new); \
+ _Q6V32_internal_union.w; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V32_PUT_B0(v, new) \
+ (((v) & 0xffffff00) | ((Q6Vect32)((unsigned char)(new))))
+#define Q6V32_PUT_B1(v, new) \
+ (((v) & 0xffff00ff) | (((Q6Vect32)((unsigned char)(new))) << 8))
+#define Q6V32_PUT_B2(v, new) \
+ (((v) & 0xff00ffff) | (((Q6Vect32)((unsigned char)(new))) << 16))
+#define Q6V32_PUT_B3(v, new) (((v) & 0x00ffffff) | (((Q6Vect32)(new)) << 24))
+
+#endif /* !__qdsp6__ */
+
+/* NOTE: All create macros return a Q6Vect32 type */
+
+/* Create from a word */
+
+#define Q6V32_CREATE_W(w) (w)
+
+/* Create from half words */
+
+#ifdef __qdsp6__
+
+#define Q6V32_CREATE_H(h1, h0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ short h[2]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.h[0] = (h0); \
+ _Q6V32_internal_union.h[1] = (h1); \
+ _Q6V32_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V32_CREATE_H(h1, h0) \
+ ((((Q6Vect32)(h1)) << 16) | ((Q6Vect32)((h0) & 0xffff)))
+
+#endif /* !__qdsp6__ */
+
+/* Create from bytes */
+#ifdef __qdsp6__
+
+#define Q6V32_CREATE_B(b3, b2, b1, b0) \
+ __extension__({ \
+ union { \
+ long long d; \
+ char b[4]; \
+ } _Q6V32_internal_union; \
+ _Q6V32_internal_union.b[0] = (b0); \
+ _Q6V32_internal_union.b[1] = (b1); \
+ _Q6V32_internal_union.b[2] = (b2); \
+ _Q6V32_internal_union.b[3] = (b3); \
+ _Q6V32_internal_union.d; \
+ })
+
+#else /* !__qdsp6__ */
+
+#define Q6V32_CREATE_B(b3, b2, b1, b0) \
+ ((((Q6Vect32)(b3)) << 24) | (((Q6Vect32)((b2) & 0xff)) << 16) | \
+ (((Q6Vect32)((b1) & 0xff)) << 8) | ((Q6Vect32)((b0) & 0xff)))
+
+#endif /* !__qdsp6__ */
+
+#ifdef __cplusplus
+
+class Q6Vect32C {
+public:
+ // Constructors
+ Q6Vect32C(int w = 0) : data(w) {};
+ Q6Vect32C(short h1, short h0) : data(Q6V32_CREATE_H(h1, h0)) {};
+ Q6Vect32C(signed char b3, signed char b2, signed char b1, signed char b0)
+ : data(Q6V32_CREATE_B(b3, b2, b1, b0)) {};
+ Q6Vect32C(const Q6Vect32C &v) : data(v.data) {};
+
+ Q6Vect32C &operator=(const Q6Vect32C &v) {
+ data = v.data;
+ return *this;
+ };
+
+ operator int() {
+ return data;
+ };
+
+ // Extract word methods
+ int W(void) {
+ return Q6V32_GET_W(data);
+ };
+ unsigned int UW(void) {
+ return Q6V32_GET_UW(data);
+ };
+
+ // Extract half word methods
+ short H0(void) {
+ return Q6V32_GET_H0(data);
+ };
+ short H1(void) {
+ return Q6V32_GET_H1(data);
+ };
+ unsigned short UH0(void) {
+ return Q6V32_GET_UH0(data);
+ };
+ unsigned short UH1(void) {
+ return Q6V32_GET_UH1(data);
+ };
+
+ // Extract byte methods
+ signed char B0(void) {
+ return Q6V32_GET_B0(data);
+ };
+ signed char B1(void) {
+ return Q6V32_GET_B1(data);
+ };
+ signed char B2(void) {
+ return Q6V32_GET_B2(data);
+ };
+ signed char B3(void) {
+ return Q6V32_GET_B3(data);
+ };
+ unsigned char UB0(void) {
+ return Q6V32_GET_UB0(data);
+ };
+ unsigned char UB1(void) {
+ return Q6V32_GET_UB1(data);
+ };
+ unsigned char UB2(void) {
+ return Q6V32_GET_UB2(data);
+ };
+ unsigned char UB3(void) {
+ return Q6V32_GET_UB3(data);
+ };
+
+ // NOTE: All set methods return a Q6Vect32C type
+
+ // Set word method
+ Q6Vect32C W(int w) {
+ return Q6Vect32C(Q6V32_PUT_W(data, w));
+ };
+
+ // Set half word methods
+ Q6Vect32C H0(short h) {
+ return Q6Vect32C(Q6V32_PUT_H0(data, h));
+ };
+ Q6Vect32C H1(short h) {
+ return Q6Vect32C(Q6V32_PUT_H1(data, h));
+ };
+
+ // Set byte methods
+ Q6Vect32C B0(signed char b) {
+ return Q6Vect32C(Q6V32_PUT_B0(data, b));
+ };
+ Q6Vect32C B1(signed char b) {
+ return Q6Vect32C(Q6V32_PUT_B1(data, b));
+ };
+ Q6Vect32C B2(signed char b) {
+ return Q6Vect32C(Q6V32_PUT_B2(data, b));
+ };
+ Q6Vect32C B3(signed char b) {
+ return Q6Vect32C(Q6V32_PUT_B3(data, b));
+ };
+
+private:
+ int data;
+};
+
+#endif /* __cplusplus */
+
+// V65 Vector types
+#if __HVX_ARCH__ >= 65
+#if defined __HVX__ && (__HVX_LENGTH__ == 128)
+typedef long Q6VecPred128 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+typedef long Q6Vect2048 __attribute__((__vector_size__(256)))
+ __attribute__((aligned(256)));
+
+#else /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
+#if defined __HVX__ && (__HVX_LENGTH__ == 64)
+typedef long Q6VecPred64 __attribute__((__vector_size__(64)))
+ __attribute__((aligned(64)));
+
+typedef long Q6Vect512 __attribute__((__vector_size__(64)))
+ __attribute__((aligned(64)));
+
+typedef long Q6Vect1024 __attribute__((__vector_size__(128)))
+ __attribute__((aligned(128)));
+
+#endif /* defined __HVX__ && (__HVX_LENGTH__ == 64) */
+#endif /* defined __HVX__ && (__HVX_LENGTH__ == 128) */
+#endif /* __HVX_ARCH__ >= 65 */
+
+/* Predicates */
+
+typedef int Q6Pred;
+
+
+#ifdef __HVX__
+
+// Extract HVX VectorPair macro.
+#define HEXAGON_HVX_GET_W(v) (v)
+
+// Extract HVX Vector macros.
+#define HEXAGON_HVX_GET_V0(v) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_Vector V[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.V[0]; \
+ })
+#define HEXAGON_HVX_GET_V1(v) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_Vector V[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.V[1]; \
+ })
+#define HEXAGON_HVX_GET_P(v) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_VectorPred P[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.P[0]; \
+ })
+
+// Set HVX VectorPair macro.
+#define HEXAGON_HVX_PUT_W(v, new) (new)
+
+// Set HVX Vector macros.
+#define HEXAGON_HVX_PUT_V0(v, new) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_Vector V[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.V[0] = (new); \
+ _HEXAGON_HVX_internal_union.W; \
+ })
+
+#define HEXAGON_HVX_PUT_V1(v, new) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_Vector V[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.V[1] = (new); \
+ _HEXAGON_HVX_internal_union.W; \
+ })
+
+#define HEXAGON_HVX_PUT_P(v, new) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_VectorPred P[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.W = (v); \
+ _HEXAGON_HVX_internal_union.P[0] = (new); \
+ _HEXAGON_HVX_internal_union.W; \
+ })
+
+
+#define HEXAGON_HVX_CREATE_W(v1, v0) \
+ __extension__({ \
+ union { \
+ HVX_VectorPair W; \
+ HVX_Vector V[2]; \
+ } _HEXAGON_HVX_internal_union; \
+ _HEXAGON_HVX_internal_union.V[0] = (v0); \
+ _HEXAGON_HVX_internal_union.V[1] = (v1); \
+ _HEXAGON_HVX_internal_union.W; \
+ })
+
+#ifdef __cplusplus
+
+class HVX_Vect {
+public:
+ // Constructors.
+ // Default.
+ HVX_Vect() : data(Q6_W_vcombine_VV(Q6_V_vzero(), Q6_V_vzero())){};
+
+ // Custom constructors.
+ HVX_Vect(HVX_VectorPair W) : data(W){};
+ HVX_Vect(HVX_Vector v1, HVX_Vector v0) : data(HEXAGON_HVX_CREATE_W(v1, v0)){};
+
+ // Copy constructor.
+ HVX_Vect(const HVX_Vect &W) = default;
+
+ // Move constructor.
+ HVX_Vect(HVX_Vect &&W) = default;
+
+ // Assignment operator.
+ HVX_Vect &operator=(const HVX_Vect &W) = default;
+
+ operator HVX_VectorPair() { return data; };
+
+ // Extract VectorPair method.
+ HVX_VectorPair W(void) { return HEXAGON_HVX_GET_W(data); };
+
+ // Extract Vector methods.
+ HVX_Vector V0(void) { return HEXAGON_HVX_GET_V0(data); };
+ HVX_Vector V1(void) { return HEXAGON_HVX_GET_V1(data); };
+ HVX_VectorPred P(void) { return HEXAGON_HVX_GET_P(data); };
+
+ // NOTE: All set methods return a HVX_Vect type.
+ // Set HVX VectorPair method.
+ HVX_Vect W(HVX_VectorPair w) { return HVX_Vect(HEXAGON_HVX_PUT_W(data, w)); };
+
+ // Set HVX Vector methods.
+ HVX_Vect V0(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V0(data, v)); };
+ HVX_Vect V1(HVX_Vector v) { return HVX_Vect(HEXAGON_HVX_PUT_V1(data, v)); };
+ HVX_Vect P(HVX_VectorPred p) { return HVX_Vect(HEXAGON_HVX_PUT_P(data, p)); };
+
+private:
+ HVX_VectorPair data;
+};
+
+#endif /* __cplusplus */
+#endif /* __HVX__ */
+
+#define HEXAGON_UDMA_DM0_STATUS_IDLE 0x00000000
+#define HEXAGON_UDMA_DM0_STATUS_RUN 0x00000001
+#define HEXAGON_UDMA_DM0_STATUS_ERROR 0x00000002
+#define HEXAGON_UDMA_DESC_DSTATE_INCOMPLETE 0
+#define HEXAGON_UDMA_DESC_DSTATE_COMPLETE 1
+#define HEXAGON_UDMA_DESC_ORDER_NOORDER 0
+#define HEXAGON_UDMA_DESC_ORDER_ORDER 1
+#define HEXAGON_UDMA_DESC_BYPASS_OFF 0
+#define HEXAGON_UDMA_DESC_BYPASS_ON 1
+#define HEXAGON_UDMA_DESC_COMP_NONE 0
+#define HEXAGON_UDMA_DESC_COMP_DLBC 1
+#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE0 0
+#define HEXAGON_UDMA_DESC_DESCTYPE_TYPE1 1
+
+typedef struct hexagon_udma_descriptor_type0_s
+{
+ void *next;
+ unsigned int length:24;
+ unsigned int desctype:2;
+ unsigned int dstcomp:1;
+ unsigned int srccomp:1;
+ unsigned int dstbypass:1;
+ unsigned int srcbypass:1;
+ unsigned int order:1;
+ unsigned int dstate:1;
+ void *src;
+ void *dst;
+} hexagon_udma_descriptor_type0_t;
+
+typedef struct hexagon_udma_descriptor_type1_s
+{
+ void *next;
+ unsigned int length:24;
+ unsigned int desctype:2;
+ unsigned int dstcomp:1;
+ unsigned int srccomp:1;
+ unsigned int dstbypass:1;
+ unsigned int srcbypass:1;
+ unsigned int order:1;
+ unsigned int dstate:1;
+ void *src;
+ void *dst;
+ unsigned int allocation:28;
+ unsigned int padding:4;
+ unsigned int roiwidth:16;
+ unsigned int roiheight:16;
+ unsigned int srcstride:16;
+ unsigned int dststride:16;
+ unsigned int srcwidthoffset:16;
+ unsigned int dstwidthoffset:16;
+} hexagon_udma_descriptor_type1_t;
+
+#endif /* !HEXAGON_TYPES_H */
diff --git a/clang/lib/Headers/hvx_hexagon_protos.h b/clang/lib/Headers/hvx_hexagon_protos.h
new file mode 100644
index 000000000000..41ce7a6b93e9
--- /dev/null
+++ b/clang/lib/Headers/hvx_hexagon_protos.h
@@ -0,0 +1,4392 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Automatically generated file, do not edit!
+//===----------------------------------------------------------------------===//
+
+
+
+#ifndef _HVX_HEXAGON_PROTOS_H_
+#define _HVX_HEXAGON_PROTOS_H_ 1
+
+#ifdef __HVX__
+#if __HVX_LENGTH__ == 128
+#define __BUILTIN_VECTOR_WRAP(a) a ## _128B
+#else
+#define __BUILTIN_VECTOR_WRAP(a) a
+#endif
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Rd32=vextract(Vu32,Rs32)
+ C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs)
+ Instruction Type: LD
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_R_vextract_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=hi(Vss32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_hi_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=lo(Vss32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_lo_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vsplat(Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_V_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=and(Qs4,Qt4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_and_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=and(Qs4,!Qt4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_and_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=not(Qs4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_not_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=or(Qs4,Qt4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_or_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=or(Qs4,!Qt4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_or_QQn __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vsetq(Rt32)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vsetq_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=xor(Qs4,Qt4)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_xor_QQ __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) vmem(Rt32+#s4)=Vs32
+ C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
+ Instruction Type: CVI_VM_ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmem_QnRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) vmem(Rt32+#s4):nt=Vs32
+ C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
+ Instruction Type: CVI_VM_ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmem_QnRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) vmem(Rt32+#s4):nt=Vs32
+ C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
+ Instruction Type: CVI_VM_ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmem_QRIV_nt __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) vmem(Rt32+#s4)=Vs32
+ C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs)
+ Instruction Type: CVI_VM_ST
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vmem_QRIV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vabsdiff(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vabsdiff_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vub_vabsdiff_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuh_vabsdiff_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vabsdiff(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vabsdiff_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vabs(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vabs_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vabs(Vu32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vabs_Vh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vabs(Vu32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vabs_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vabs(Vu32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vabs_Vw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vadd_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wb_vadd_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.b+=Vu32.b
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_condacc_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.b+=Vu32.b
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_condacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vadd_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.h+=Vu32.h
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_condacc_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.h+=Vu32.h
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_condacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vadd_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vadd_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vadd(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vadd_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vadd(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vadd_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vadd_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wub_vadd_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vadd_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuh_vadd_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vadd(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vadd_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vadd_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vadd_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.w+=Vu32.w
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_condacc_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.w+=Vu32.w
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_condacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vadd_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vadd_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=valign(Vu32,Vv32,Rt8)
+ C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_valign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=valign(Vu32,Vv32,#u3)
+ C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_valign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vand(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vand_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vand(Qu4,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_V_vand_QR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32|=vand(Qu4,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_V_vandor_VQR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vand(Vu32,Rt32)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Q_vand_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vand(Vu32,Rt32)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Q_vandor_QVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasl(Vu32.h,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasl_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasl(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasl_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vasl(Vu32.w,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vasl_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vasl(Vu32.w,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vaslacc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vasl(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vasl_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasr(Vu32.h,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasr_VhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_VhVhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasr(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vasr(Vu32.w,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vasr_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vasr(Vu32.w,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vasracc_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasr_VwVwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_VwVwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vasr(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vasr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=Vu32
+ C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_equals_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32=Vuu32
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_equals_W __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vavg_VhVh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vavg_VubVub_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vavg_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vavg_VuhVuh_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vavg_VwVw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vcl0(Vu32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vcl0_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vcl0(Vu32.uw)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vcl0_Vuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32=vcombine(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_vcombine_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=#0
+ C Intrinsic Prototype: HVX_Vector Q6_V_vzero()
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vdeal(Vu32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vdeal_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vdeale(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vdeale_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vdeal(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vdeal_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32=vdeal(Vu32,Vv32,Rt8)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_vdeal_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vdelta(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vdmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vdmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.h+=vdmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vdmpyacc_VhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vdmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vdmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vdmpy(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vdmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vdmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_WhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwWhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_VhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_WhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwWhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_VhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwVhRuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpy_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vdmpyacc_VwVhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vdsad_WuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vdsadacc_WuwWuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.eq(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eq_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.eq(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.eq(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.eq(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.eq(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eq_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.eq(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.eq(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.eq(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.eq(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eq_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.eq(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.eq(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.eq(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_eqxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qd4=vcmp.gt(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gt_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4&=vcmp.gt(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtand_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4|=vcmp.gt(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtor_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Qx4^=vcmp.gt(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vcmp_gtxacc_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w=vinsert(Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vinsert_VwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vlalign(Vu32,Vv32,Rt8)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vlalign_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vlalign(Vu32,Vv32,#u3)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vlalign_VVI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vlsr(Vu32.uh,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vlsr_VuhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vlsr(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vlsr_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vlsr(Vu32.uw,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vlsr_VuwR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vlsr(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vlsr_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vlut32_VbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vlut32or_VbVbVbR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vlut16_VbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vlut16or_WhVbVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmax(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vmax_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vmax(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vmax_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vmax(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vmax_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmax(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vmax_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmin(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vmin_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vmin(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vmin_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vmin(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vmin_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmin(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vmin_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpa_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpaacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpa_WubWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpa_WubWub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vmpa(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpa_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpa(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpaacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpyacc_WhVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpyacc_WhVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpy(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vmpy(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpyacc_WhVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpye(Vu32.w,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Rt32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpy_VhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpyacc_WwVhRh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpy_VhRh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpy_VhRh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpy_VhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpyacc_WwVhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpy_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpyacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpy_VhVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyieo(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyieo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyieacc_VwVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyie(Vu32.w,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyie_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyieacc_VwVwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpyi_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpyiacc_VhVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpyi_VhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vmpyiacc_VhVhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyio(Vu32.w,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyio_VwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyi_VwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyiacc_VwVwRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyi_VwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyiacc_VwVwRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyo_VwVh_s1_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuh_vmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuh_vmpyacc_WuhVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuh_vmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuh_vmpyacc_WuhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vmpy_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vmpyacc_WuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vmpy_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vmpyacc_WuwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vmux(Qt4,Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vmux_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vnavg(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vnavg_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vnavg(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vnavg_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vnavg(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vnavg_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vnormamt(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vnormamt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vnormamt(Vu32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vnormamt_Vw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vnot(Vu32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vnot_V __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vor(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vpacke(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vpacke_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vpacke(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vpacke_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vpack(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vpack(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vpack_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vpacko(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vpacko_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vpacko(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vpacko_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vpack(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vpack(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vpack_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vpopcount(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vpopcount_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vrdelta(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vrdelta_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpy_VubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpyacc_VwVubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vrmpy_WubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vrmpyacc_WwWubRbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpy_VubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpyacc_VwVubVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vrmpy(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpy_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vrmpyacc_VwVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vrmpy_VubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vrmpyacc_VuwVubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vrmpy_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vrmpyacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vrmpy_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vrmpyacc_VuwVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vror(Vu32,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vror_VR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vround(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vround(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vround_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vround(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vround(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vround_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vrsad_WubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wuw_vrsadacc_WuwWubRubI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vsat(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vsat_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vsat(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vsat_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vsxt(Vu32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vsxt_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vsxt(Vu32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vsxt_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vshuffe(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vshuffe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vshuff(Vu32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vshuff_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vshuffe(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vshuffe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vshuff(Vu32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vshuff_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vshuffo(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vshuffo_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32=vshuff(Vu32,Vv32,Rt8)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_vshuff_VVR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.b=vshuffoe(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wb_vshuffoe_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vshuffoe(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vshuffoe_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vshuffo(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vshuffo_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vsub_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wb_vsub_WbWb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.b-=Vu32.b
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_condnac_QnVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.b-=Vu32.b
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_condnac_QVbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vsub_WhWh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.h-=Vu32.h
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_condnac_QnVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.h-=Vu32.h
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_condnac_QVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vsub_VhVh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vsub_WhWh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vsub(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vsub_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vsub(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vsub_VubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vsub_VubVub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wub_vsub_WubWub_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vsub_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuh_vsub_WuhWuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vsub(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vsub_VuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vsub_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vsub_WwWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (!Qv4) Vx32.w-=Vu32.w
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_condnac_QnVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: if (Qv4) Vx32.w-=Vu32.w
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_condnac_QVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vsub_VwVw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vsub_WwWw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32=vswap(Qt4,Vu32,Vv32)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_vswap_QVV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vtmpy(Vuu32.b,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vtmpy_WbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vtmpyacc_WhWbRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vtmpy_WubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vtmpyacc_WhWubRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vtmpy(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vtmpy_WhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vtmpyacc_WwWhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vunpack(Vu32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vunpack_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vunpack(Vu32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vunpack_Vh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h|=vunpacko(Vu32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vunpackoor_WhVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w|=vunpacko(Vu32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vunpackoor_WwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vunpack(Vu32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuh_vunpack_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vunpack(Vu32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuw_vunpack_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vd32=vxor(Vu32,Vv32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vxor_VV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uh=vzxt(Vu32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuh_vzxt_Vub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 60
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vzxt(Vu32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuw_vzxt_Vuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)
+#endif /* __HEXAGON_ARCH___ >= 60 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vsplat(Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vb_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vsplat(Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vh_vsplat_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Qd4=vsetq2(Rt32)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Q_vsetq2_R __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Qd4.b=vshuffe(Qs4.h,Qt4.h)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Qb_vshuffe_QhQh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Qd4.h=vshuffe(Qs4.w,Qt4.w)
+ C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Qh_vshuffe_QwQw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vadd_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wb_vadd_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vadd_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vadd(vclb(Vu32.h),Vv32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vadd_vclb_VhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vadd(vclb(Vu32.w),Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vadd_vclb_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vadd(Vu32.h,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vaddacc_WwVhVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vadd(Vu32.ub,Vv32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vaddacc_WhVubVub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.b):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vadd_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vadd(Vu32.uh,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vaddacc_WwVuhVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vadd_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuw_vadd_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32=vand(!Qu4,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_V_vand_QnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vx32|=vand(!Qu4,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt)
+ Instruction Type: CVI_VX_LATE
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_V_vandor_VQnR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32=vand(!Qv4,Vu32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vand_QnV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32=vand(Qv4,Vu32)
+ C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_V_vand_QV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vasr_VhVhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_VuwVuwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_VwVwR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vlsr(Vu32.ub,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vlsr_VubR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vlut32_VbVbR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vlut32or_VbVbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vlut32_VbVbI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vlut16_VbVhR_nomatch __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vlut16or_WhVbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wh_vlut16_VbVhI __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vmax(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vmax_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vmin(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vmin_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=vmpa(Vuu32.uh,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpa_WuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpaacc_WwWuhRb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32=vmpye(Vu32.w,Vv32.uh)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_W_vmpye_VwVuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyi_VwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.ub)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vw_vmpyiacc_VwVwRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vxx32+=vmpyo(Vu32.w,Vv32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_W_vmpyoacc_WVwVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vround(Vu32.uh,Vv32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vround_VuhVuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vround(Vu32.uw,Vv32.uw):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vround_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vsat(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vsat_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vsub_VbVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wb_vsub_WbWb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vsub_VwVwQ_carry __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.b):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vsub_VubVb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vsub_VuwVuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 62
+/* ==========================================================================
+ Assembly Syntax: Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_VA_DV
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Wuw_vsub_WuwWuw_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)
+#endif /* __HEXAGON_ARCH___ >= 62 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vabs(Vu32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vabs_Vb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vabs(Vu32.b):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vabs_Vb_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.h+=vasl(Vu32.h,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vaslacc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.h+=vasr(Vu32.h,Rt32)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_vasracc_VhVhR __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_VuhVuhR_rnd_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vub_vasr_VuhVuhR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuh_vasr_VuwVuwR_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vavg_VbVb_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vavg_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vavg_VuwVuw_rnd __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vdd32=#0
+ C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero()
+ Instruction Type: MAPPING
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_W_vzero __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
+ C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
+ Instruction Type: CVI_GATHER
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_ARMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h
+ C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
+ Instruction Type: CVI_GATHER
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_AQRMVh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
+ C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_GATHER_DV
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_ARMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h
+ C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv)
+ Instruction Type: CVI_GATHER_DV
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_AQRMWw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
+ C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
+ Instruction Type: CVI_GATHER
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_ARMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w
+ C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv)
+ Instruction Type: CVI_GATHER
+ Execution Slots: SLOT01
+ ========================================================================== */
+
+#define Q6_vgather_AQRMVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=vlut4(Vu32.uh,Rtt32.h)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT2
+ ========================================================================== */
+
+#define Q6_Vh_vlut4_VuhPh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpa_WubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Wh_vmpaacc_WhWubRub __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT2
+ ========================================================================== */
+
+#define Q6_Vh_vmpa_VhVhVhPh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT2
+ ========================================================================== */
+
+#define Q6_Vh_vmpa_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT2
+ ========================================================================== */
+
+#define Q6_Vh_vmps_VhVhVuhPuh_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_vmpyacc_WwVhRh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vmpye(Vu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vmpye_VuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt)
+ Instruction Type: CVI_VX
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Vuw_vmpyeacc_VuwVuhRuh __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=vnavg(Vu32.b,Vv32.b)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_vnavg_VbVb __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.b=prefixsum(Qv4)
+ C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vb_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.h=prefixsum(Qv4)
+ C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vh_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=prefixsum(Qv4)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_prefixsum_Q __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h+=Vw32
+ C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatteracc_RMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_QRMVhV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER_DV
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32
+ C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER_DV
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatteracc_RMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER_DV
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_QRMWwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w+=Vw32
+ C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatteracc_RMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 65
+/* ==========================================================================
+ Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32
+ C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw)
+ Instruction Type: CVI_SCATTER
+ Execution Slots: SLOT0
+ ========================================================================== */
+
+#define Q6_vscatter_QRMVwV __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)
+#endif /* __HEXAGON_ARCH___ >= 65 */
+
+#if __HVX_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vadd_VwVwQ_carry_sat __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HVX_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w=vasrinto(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VP_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Ww_vasrinto_WwVwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HVX_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Vd32.uw=vrotr(Vu32.uw,Vv32.uw)
+ C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VS
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vuw_vrotr_VuwVuw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HVX_ARCH__ >= 66
+/* ==========================================================================
+ Assembly Syntax: Vd32.w=vsatdw(Vu32.w,Vv32.w)
+ C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv)
+ Instruction Type: CVI_VA
+ Execution Slots: SLOT0123
+ ========================================================================== */
+
+#define Q6_Vw_vsatdw_VwVw __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)
+#endif /* __HEXAGON_ARCH___ >= 66 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_v6mpy_WubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_v6mpyacc_WwWubWbI_h __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_v6mpy_WubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#if __HVX_ARCH__ >= 68
+/* ==========================================================================
+ Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v
+ C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2)
+ Instruction Type: CVI_VX_DV
+ Execution Slots: SLOT23
+ ========================================================================== */
+
+#define Q6_Ww_v6mpyacc_WwWubWbI_v __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)
+#endif /* __HEXAGON_ARCH___ >= 68 */
+
+#endif /* __HVX__ */
+
+#endif
diff --git a/clang/lib/Headers/immintrin.h b/clang/lib/Headers/immintrin.h
index 22f7a520c929..56d3dadf6a33 100644
--- a/clang/lib/Headers/immintrin.h
+++ b/clang/lib/Headers/immintrin.h
@@ -72,11 +72,6 @@
#include <f16cintrin.h>
#endif
-#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
- defined(__VPCLMULQDQ__)
-#include <vpclmulqdqintrin.h>
-#endif
-
/* No feature check desired due to internal checks */
#include <bmiintrin.h>
@@ -231,6 +226,11 @@
#endif
#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
+ defined(__VPCLMULQDQ__)
+#include <vpclmulqdqintrin.h>
+#endif
+
+#if !(defined(_MSC_VER) || defined(__SCE__)) || __has_feature(modules) || \
defined(__VAES__)
#include <vaesintrin.h>
#endif
diff --git a/clang/lib/Headers/intrin.h b/clang/lib/Headers/intrin.h
index a78b96997d18..ff8eb8fca268 100644
--- a/clang/lib/Headers/intrin.h
+++ b/clang/lib/Headers/intrin.h
@@ -451,24 +451,47 @@ unsigned char _InterlockedCompareExchange128_rel(__int64 volatile *_Destination,
static __inline__ void __DEFAULT_FN_ATTRS __movsb(unsigned char *__dst,
unsigned char const *__src,
size_t __n) {
- __asm__ __volatile__("rep movsb" : "+D"(__dst), "+S"(__src), "+c"(__n)
- : : "memory");
+#if defined(__x86_64__)
+ __asm__ __volatile__("rep movsb"
+ : "+D"(__dst), "+S"(__src), "+c"(__n)
+ :
+ : "memory");
+#else
+ __asm__ __volatile__("xchg %%esi, %1\nrep movsb\nxchg %%esi, %1"
+ : "+D"(__dst), "+r"(__src), "+c"(__n)
+ :
+ : "memory");
+#endif
}
static __inline__ void __DEFAULT_FN_ATTRS __movsd(unsigned long *__dst,
unsigned long const *__src,
size_t __n) {
+#if defined(__x86_64__)
__asm__ __volatile__("rep movsl"
: "+D"(__dst), "+S"(__src), "+c"(__n)
:
: "memory");
+#else
+ __asm__ __volatile__("xchg %%esi, %1\nrep movsl\nxchg %%esi, %1"
+ : "+D"(__dst), "+r"(__src), "+c"(__n)
+ :
+ : "memory");
+#endif
}
static __inline__ void __DEFAULT_FN_ATTRS __movsw(unsigned short *__dst,
unsigned short const *__src,
size_t __n) {
+#if defined(__x86_64__)
__asm__ __volatile__("rep movsw"
: "+D"(__dst), "+S"(__src), "+c"(__n)
:
: "memory");
+#else
+ __asm__ __volatile__("xchg %%esi, %1\nrep movsw\nxchg %%esi, %1"
+ : "+D"(__dst), "+r"(__src), "+c"(__n)
+ :
+ : "memory");
+#endif
}
static __inline__ void __DEFAULT_FN_ATTRS __stosd(unsigned long *__dst,
unsigned long __x,
@@ -507,16 +530,26 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
|* Misc
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
+#if defined(__i386__)
+#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("cpuid" \
+ : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__leaf), "2"(__count))
+#else
+/* x86-64 uses %rbx as the base register, so preserve it. */
+#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
+ __asm("xchgq %%rbx,%q1\n" \
+ "cpuid\n" \
+ "xchgq %%rbx,%q1" \
+ : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
+ : "0"(__leaf), "2"(__count))
+#endif
static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
- __asm__("cpuid"
- : "=a"(__info[0]), "=b"(__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level), "c"(0));
+ __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
}
static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
int __ecx) {
- __asm__("cpuid"
- : "=a"(__info[0]), "=b"(__info[1]), "=c"(__info[2]), "=d"(__info[3])
- : "a"(__level), "c"(__ecx));
+ __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
}
static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
__asm__ volatile("hlt");
diff --git a/clang/lib/Headers/keylockerintrin.h b/clang/lib/Headers/keylockerintrin.h
index c15d39c8e392..68b0a5689618 100644
--- a/clang/lib/Headers/keylockerintrin.h
+++ b/clang/lib/Headers/keylockerintrin.h
@@ -230,10 +230,12 @@ _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256 )
/// IF (IllegalHandle)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
/// IF (Authentic == 0)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// MEM[__odata+127:__odata] := AES256Encrypt (__idata[127:0], UnwrappedKey)
/// ZF := 0
@@ -267,10 +269,12 @@ _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128)
/// IF (IllegalHandle)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
/// IF (Authentic == 0)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// MEM[__odata+127:__odata] := AES128Decrypt (__idata[127:0], UnwrappedKey)
/// ZF := 0
@@ -304,10 +308,12 @@ _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES256)
/// IF (IllegalHandle)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
/// IF (Authentic == 0)
/// ZF := 1
+/// MEM[__odata+127:__odata] := 0
/// ELSE
/// MEM[__odata+127:__odata] := AES256Decrypt (__idata[127:0], UnwrappedKey)
/// ZF := 0
@@ -354,10 +360,16 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// HandleKeyType (Handle[383:0]) != HANDLE_KEY_TYPE_AES128 )
/// IF (IllegalHandle)
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
/// IF Authentic == 0
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// FOR i := 0 to 7
/// __odata[i] := AES128Encrypt (__idata[i], UnwrappedKey)
@@ -394,10 +406,16 @@ _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// HandleKeyType (Handle[511:0]) != HANDLE_KEY_TYPE_AES512 )
/// IF (IllegalHandle)
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
/// IF Authentic == 0
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// FOR i := 0 to 7
/// __odata[i] := AES256Encrypt (__idata[i], UnwrappedKey)
@@ -434,10 +452,16 @@ _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES128 )
/// IF (IllegalHandle)
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate384 (Handle[383:0], IWKey)
/// IF Authentic == 0
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// FOR i := 0 to 7
/// __odata[i] := AES128Decrypt (__idata[i], UnwrappedKey)
@@ -474,10 +498,16 @@ _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// HandleKeyType (Handle) != HANDLE_KEY_TYPE_AES512 )
/// If (IllegalHandle)
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// (UnwrappedKey, Authentic) := UnwrapKeyAndAuthenticate512 (Handle[511:0], IWKey)
/// IF Authentic == 0
/// ZF := 1
+/// FOR i := 0 to 7
+/// __odata[i] := 0
+/// ENDFOR
/// ELSE
/// FOR i := 0 to 7
/// __odata[i] := AES256Decrypt (__idata[i], UnwrappedKey)
diff --git a/clang/lib/Headers/opencl-c-base.h b/clang/lib/Headers/opencl-c-base.h
index e8dcd70377e5..3c5e2c973936 100644
--- a/clang/lib/Headers/opencl-c-base.h
+++ b/clang/lib/Headers/opencl-c-base.h
@@ -21,9 +21,37 @@
#define cl_khr_subgroup_shuffle 1
#define cl_khr_subgroup_shuffle_relative 1
#define cl_khr_subgroup_clustered_reduce 1
+#define cl_khr_extended_bit_ops 1
+#define cl_khr_integer_dot_product 1
+#define __opencl_c_integer_dot_product_input_4x8bit 1
+#define __opencl_c_integer_dot_product_input_4x8bit_packed 1
+
#endif // defined(__SPIR__)
#endif // (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ >= 200)
+// Define feature macros for OpenCL C 2.0
+#if (defined(__OPENCL_CPP_VERSION__) || __OPENCL_C_VERSION__ == 200)
+#define __opencl_c_pipes 1
+#define __opencl_c_generic_address_space 1
+#define __opencl_c_work_group_collective_functions 1
+#define __opencl_c_atomic_order_acq_rel 1
+#define __opencl_c_atomic_order_seq_cst 1
+#define __opencl_c_atomic_scope_device 1
+#define __opencl_c_atomic_scope_all_devices 1
+#define __opencl_c_device_enqueue 1
+#define __opencl_c_read_write_images 1
+#define __opencl_c_program_scope_global_variables 1
+#define __opencl_c_images 1
+#endif
+
+// Define header-only feature macros for OpenCL C 3.0.
+#if (__OPENCL_C_VERSION__ == 300)
+// For the SPIR target all features are supported.
+#if defined(__SPIR__)
+#define __opencl_c_atomic_scope_all_devices 1
+#endif // defined(__SPIR__)
+#endif // (__OPENCL_C_VERSION__ == 300)
+
// built-in scalar data types:
/**
@@ -141,7 +169,9 @@ typedef double double8 __attribute__((ext_vector_type(8)));
typedef double double16 __attribute__((ext_vector_type(16)));
#endif
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__OPENCL_CPP_VERSION__)
+#define NULL nullptr
+#elif defined(__OPENCL_C_VERSION__)
#define NULL ((void*)0)
#endif
@@ -297,7 +327,12 @@ typedef enum memory_scope {
memory_scope_work_item = __OPENCL_MEMORY_SCOPE_WORK_ITEM,
memory_scope_work_group = __OPENCL_MEMORY_SCOPE_WORK_GROUP,
memory_scope_device = __OPENCL_MEMORY_SCOPE_DEVICE,
+#if defined(__opencl_c_atomic_scope_all_devices)
memory_scope_all_svm_devices = __OPENCL_MEMORY_SCOPE_ALL_SVM_DEVICES,
+#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0)
+ memory_scope_all_devices = memory_scope_all_svm_devices,
+#endif // __OPENCL_C_VERSION__ >= CL_VERSION_3_0
+#endif // defined(__opencl_c_atomic_scope_all_devices)
#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups)
memory_scope_sub_group = __OPENCL_MEMORY_SCOPE_SUB_GROUP
#endif
@@ -322,7 +357,9 @@ typedef enum memory_order
memory_order_acquire = __ATOMIC_ACQUIRE,
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
+#if defined(__opencl_c_atomic_order_seq_cst)
memory_order_seq_cst = __ATOMIC_SEQ_CST
+#endif
} memory_order;
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -445,8 +482,113 @@ typedef struct {
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+/**
+ * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
+ * Reinterprets a data type as another data type of the same size
+ */
+#define as_char(x) __builtin_astype((x), char)
+#define as_char2(x) __builtin_astype((x), char2)
+#define as_char3(x) __builtin_astype((x), char3)
+#define as_char4(x) __builtin_astype((x), char4)
+#define as_char8(x) __builtin_astype((x), char8)
+#define as_char16(x) __builtin_astype((x), char16)
+
+#define as_uchar(x) __builtin_astype((x), uchar)
+#define as_uchar2(x) __builtin_astype((x), uchar2)
+#define as_uchar3(x) __builtin_astype((x), uchar3)
+#define as_uchar4(x) __builtin_astype((x), uchar4)
+#define as_uchar8(x) __builtin_astype((x), uchar8)
+#define as_uchar16(x) __builtin_astype((x), uchar16)
+
+#define as_short(x) __builtin_astype((x), short)
+#define as_short2(x) __builtin_astype((x), short2)
+#define as_short3(x) __builtin_astype((x), short3)
+#define as_short4(x) __builtin_astype((x), short4)
+#define as_short8(x) __builtin_astype((x), short8)
+#define as_short16(x) __builtin_astype((x), short16)
+
+#define as_ushort(x) __builtin_astype((x), ushort)
+#define as_ushort2(x) __builtin_astype((x), ushort2)
+#define as_ushort3(x) __builtin_astype((x), ushort3)
+#define as_ushort4(x) __builtin_astype((x), ushort4)
+#define as_ushort8(x) __builtin_astype((x), ushort8)
+#define as_ushort16(x) __builtin_astype((x), ushort16)
+
+#define as_int(x) __builtin_astype((x), int)
+#define as_int2(x) __builtin_astype((x), int2)
+#define as_int3(x) __builtin_astype((x), int3)
+#define as_int4(x) __builtin_astype((x), int4)
+#define as_int8(x) __builtin_astype((x), int8)
+#define as_int16(x) __builtin_astype((x), int16)
+
+#define as_uint(x) __builtin_astype((x), uint)
+#define as_uint2(x) __builtin_astype((x), uint2)
+#define as_uint3(x) __builtin_astype((x), uint3)
+#define as_uint4(x) __builtin_astype((x), uint4)
+#define as_uint8(x) __builtin_astype((x), uint8)
+#define as_uint16(x) __builtin_astype((x), uint16)
+
+#define as_long(x) __builtin_astype((x), long)
+#define as_long2(x) __builtin_astype((x), long2)
+#define as_long3(x) __builtin_astype((x), long3)
+#define as_long4(x) __builtin_astype((x), long4)
+#define as_long8(x) __builtin_astype((x), long8)
+#define as_long16(x) __builtin_astype((x), long16)
+
+#define as_ulong(x) __builtin_astype((x), ulong)
+#define as_ulong2(x) __builtin_astype((x), ulong2)
+#define as_ulong3(x) __builtin_astype((x), ulong3)
+#define as_ulong4(x) __builtin_astype((x), ulong4)
+#define as_ulong8(x) __builtin_astype((x), ulong8)
+#define as_ulong16(x) __builtin_astype((x), ulong16)
+
+#define as_float(x) __builtin_astype((x), float)
+#define as_float2(x) __builtin_astype((x), float2)
+#define as_float3(x) __builtin_astype((x), float3)
+#define as_float4(x) __builtin_astype((x), float4)
+#define as_float8(x) __builtin_astype((x), float8)
+#define as_float16(x) __builtin_astype((x), float16)
+
+#ifdef cl_khr_fp64
+#define as_double(x) __builtin_astype((x), double)
+#define as_double2(x) __builtin_astype((x), double2)
+#define as_double3(x) __builtin_astype((x), double3)
+#define as_double4(x) __builtin_astype((x), double4)
+#define as_double8(x) __builtin_astype((x), double8)
+#define as_double16(x) __builtin_astype((x), double16)
+#endif // cl_khr_fp64
+
+#ifdef cl_khr_fp16
+#define as_half(x) __builtin_astype((x), half)
+#define as_half2(x) __builtin_astype((x), half2)
+#define as_half3(x) __builtin_astype((x), half3)
+#define as_half4(x) __builtin_astype((x), half4)
+#define as_half8(x) __builtin_astype((x), half8)
+#define as_half16(x) __builtin_astype((x), half16)
+#endif // cl_khr_fp16
+
+#define as_size_t(x) __builtin_astype((x), size_t)
+#define as_ptrdiff_t(x) __builtin_astype((x), ptrdiff_t)
+#define as_intptr_t(x) __builtin_astype((x), intptr_t)
+#define as_uintptr_t(x) __builtin_astype((x), uintptr_t)
+
+// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
+
+#define __kernel_exec(X, typen) __kernel \
+ __attribute__((work_group_size_hint(X, 1, 1))) \
+ __attribute__((vec_type_hint(typen)))
+
+#define kernel_exec(X, typen) __kernel \
+ __attribute__((work_group_size_hint(X, 1, 1))) \
+ __attribute__((vec_type_hint(typen)))
+
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
+
+int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
+#endif
+
#ifdef cl_intel_device_side_avc_motion_estimation
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : begin
#define CLK_AVC_ME_MAJOR_16x16_INTEL 0x0
#define CLK_AVC_ME_MAJOR_16x8_INTEL 0x1
@@ -580,7 +722,6 @@ typedef struct {
#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMOUT_INITIALIZE_INTEL 0x0
#define CLK_AVC_IME_RESULT_DUAL_REFERENCE_STREAMIN_INITIALIZE_INTEL 0x0
-#pragma OPENCL EXTENSION cl_intel_device_side_avc_motion_estimation : end
#endif // cl_intel_device_side_avc_motion_estimation
// Disable any extensions we may have enabled previously.
diff --git a/clang/lib/Headers/opencl-c.h b/clang/lib/Headers/opencl-c.h
index ab665628c8e1..fc50dd718c4e 100644
--- a/clang/lib/Headers/opencl-c.h
+++ b/clang/lib/Headers/opencl-c.h
@@ -23,10 +23,11 @@
#endif //cl_khr_3d_image_writes
#endif //__OPENCL_C_VERSION__ < CL_VERSION_2_0
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+
+#if (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__)
#pragma OPENCL EXTENSION cl_intel_planar_yuv : begin
#pragma OPENCL EXTENSION cl_intel_planar_yuv : end
-#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
+#endif // (defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)) && defined(__SPIR__)
#define __ovld __attribute__((overloadable))
#define __conv __attribute__((convergent))
@@ -6339,101 +6340,6 @@ half16 __ovld __cnfn convert_half16_rtz(double16);
#endif // cl_khr_fp16
-/**
- * OpenCL v1.1/1.2/2.0 s6.2.4.2 - as_type operators
- * Reinterprets a data type as another data type of the same size
- */
-#define as_char(x) __builtin_astype((x), char)
-#define as_char2(x) __builtin_astype((x), char2)
-#define as_char3(x) __builtin_astype((x), char3)
-#define as_char4(x) __builtin_astype((x), char4)
-#define as_char8(x) __builtin_astype((x), char8)
-#define as_char16(x) __builtin_astype((x), char16)
-
-#define as_uchar(x) __builtin_astype((x), uchar)
-#define as_uchar2(x) __builtin_astype((x), uchar2)
-#define as_uchar3(x) __builtin_astype((x), uchar3)
-#define as_uchar4(x) __builtin_astype((x), uchar4)
-#define as_uchar8(x) __builtin_astype((x), uchar8)
-#define as_uchar16(x) __builtin_astype((x), uchar16)
-
-#define as_short(x) __builtin_astype((x), short)
-#define as_short2(x) __builtin_astype((x), short2)
-#define as_short3(x) __builtin_astype((x), short3)
-#define as_short4(x) __builtin_astype((x), short4)
-#define as_short8(x) __builtin_astype((x), short8)
-#define as_short16(x) __builtin_astype((x), short16)
-
-#define as_ushort(x) __builtin_astype((x), ushort)
-#define as_ushort2(x) __builtin_astype((x), ushort2)
-#define as_ushort3(x) __builtin_astype((x), ushort3)
-#define as_ushort4(x) __builtin_astype((x), ushort4)
-#define as_ushort8(x) __builtin_astype((x), ushort8)
-#define as_ushort16(x) __builtin_astype((x), ushort16)
-
-#define as_int(x) __builtin_astype((x), int)
-#define as_int2(x) __builtin_astype((x), int2)
-#define as_int3(x) __builtin_astype((x), int3)
-#define as_int4(x) __builtin_astype((x), int4)
-#define as_int8(x) __builtin_astype((x), int8)
-#define as_int16(x) __builtin_astype((x), int16)
-
-#define as_uint(x) __builtin_astype((x), uint)
-#define as_uint2(x) __builtin_astype((x), uint2)
-#define as_uint3(x) __builtin_astype((x), uint3)
-#define as_uint4(x) __builtin_astype((x), uint4)
-#define as_uint8(x) __builtin_astype((x), uint8)
-#define as_uint16(x) __builtin_astype((x), uint16)
-
-#define as_long(x) __builtin_astype((x), long)
-#define as_long2(x) __builtin_astype((x), long2)
-#define as_long3(x) __builtin_astype((x), long3)
-#define as_long4(x) __builtin_astype((x), long4)
-#define as_long8(x) __builtin_astype((x), long8)
-#define as_long16(x) __builtin_astype((x), long16)
-
-#define as_ulong(x) __builtin_astype((x), ulong)
-#define as_ulong2(x) __builtin_astype((x), ulong2)
-#define as_ulong3(x) __builtin_astype((x), ulong3)
-#define as_ulong4(x) __builtin_astype((x), ulong4)
-#define as_ulong8(x) __builtin_astype((x), ulong8)
-#define as_ulong16(x) __builtin_astype((x), ulong16)
-
-#define as_float(x) __builtin_astype((x), float)
-#define as_float2(x) __builtin_astype((x), float2)
-#define as_float3(x) __builtin_astype((x), float3)
-#define as_float4(x) __builtin_astype((x), float4)
-#define as_float8(x) __builtin_astype((x), float8)
-#define as_float16(x) __builtin_astype((x), float16)
-
-#ifdef cl_khr_fp64
-#define as_double(x) __builtin_astype((x), double)
-#define as_double2(x) __builtin_astype((x), double2)
-#define as_double3(x) __builtin_astype((x), double3)
-#define as_double4(x) __builtin_astype((x), double4)
-#define as_double8(x) __builtin_astype((x), double8)
-#define as_double16(x) __builtin_astype((x), double16)
-#endif //cl_khr_fp64
-
-#ifdef cl_khr_fp16
-#define as_half(x) __builtin_astype((x), half)
-#define as_half2(x) __builtin_astype((x), half2)
-#define as_half3(x) __builtin_astype((x), half3)
-#define as_half4(x) __builtin_astype((x), half4)
-#define as_half8(x) __builtin_astype((x), half8)
-#define as_half16(x) __builtin_astype((x), half16)
-#endif //cl_khr_fp16
-
-// OpenCL v1.1 s6.9, v1.2/2.0 s6.10 - Function qualifiers
-
-#define __kernel_exec(X, typen) __kernel \
- __attribute__((work_group_size_hint(X, 1, 1))) \
- __attribute__((vec_type_hint(typen)))
-
-#define kernel_exec(X, typen) __kernel \
- __attribute__((work_group_size_hint(X, 1, 1))) \
- __attribute__((vec_type_hint(typen)))
-
// OpenCL v1.1 s6.11.1, v1.2 s6.12.1, v2.0 s6.13.1 - Work-item Functions
/**
@@ -6494,8 +6400,7 @@ size_t __ovld __cnfn get_local_id(uint dimindx);
* Returns the number of work-groups that will execute a
* kernel for dimension identified by dimindx.
* Valid values of dimindx are 0 to get_work_dim() - 1.
- * For other values of dimindx, get_num_groups () returns
- * 1.
+ * For other values of dimindx, get_num_groups() returns 1.
* For clEnqueueTask, this always returns 1.
*/
size_t __ovld __cnfn get_num_groups(uint dimindx);
@@ -7354,7 +7259,7 @@ half16 __ovld __cnfn fmod(half16 x, half16 y);
* Returns fmin(x - floor (x), 0x1.fffffep-1f ).
* floor(x) is returned in iptr.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld fract(float x, float *iptr);
float2 __ovld fract(float2 x, float2 *iptr);
float3 __ovld fract(float3 x, float3 *iptr);
@@ -7436,7 +7341,7 @@ half4 __ovld fract(half4 x, __private half4 *iptr);
half8 __ovld fract(half8 x, __private half8 *iptr);
half16 __ovld fract(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Extract mantissa and exponent from x. For each
@@ -7444,7 +7349,7 @@ half16 __ovld fract(half16 x, __private half16 *iptr);
* magnitude in the interval [1/2, 1) or 0. Each
* component of x equals mantissa returned * 2^exp.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld frexp(float x, int *exp);
float2 __ovld frexp(float2 x, int2 *exp);
float3 __ovld frexp(float3 x, int3 *exp);
@@ -7526,7 +7431,7 @@ half4 __ovld frexp(half4 x, __private int4 *exp);
half8 __ovld frexp(half8 x, __private int8 *exp);
half16 __ovld frexp(half16 x, __private int16 *exp);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Compute the value of the square root of x^2 + y^2
@@ -7651,7 +7556,7 @@ half8 __ovld __cnfn lgamma(half8 x);
half16 __ovld __cnfn lgamma(half16 x);
#endif //cl_khr_fp16
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld lgamma_r(float x, int *signp);
float2 __ovld lgamma_r(float2 x, int2 *signp);
float3 __ovld lgamma_r(float3 x, int3 *signp);
@@ -7733,7 +7638,7 @@ half4 __ovld lgamma_r(half4 x, __private int4 *signp);
half8 __ovld lgamma_r(half8 x, __private int8 *signp);
half16 __ovld lgamma_r(half16 x, __private int16 *signp);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Compute natural logarithm.
@@ -7957,7 +7862,7 @@ half16 __ovld __cnfn minmag(half16 x, half16 y);
* the argument. It stores the integral part in the object
* pointed to by iptr.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld modf(float x, float *iptr);
float2 __ovld modf(float2 x, float2 *iptr);
float3 __ovld modf(float3 x, float3 *iptr);
@@ -8039,7 +7944,7 @@ half4 __ovld modf(half4 x, __private half4 *iptr);
half8 __ovld modf(half8 x, __private half8 *iptr);
half16 __ovld modf(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Returns a quiet NaN. The nancode may be placed
@@ -8217,7 +8122,7 @@ half16 __ovld __cnfn remainder(half16 x, half16 y);
* sign as x/y. It stores this signed value in the object
* pointed to by quo.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld remquo(float x, float y, int *quo);
float2 __ovld remquo(float2 x, float2 y, int2 *quo);
float3 __ovld remquo(float3 x, float3 y, int3 *quo);
@@ -8300,7 +8205,7 @@ half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Round to integral value (using round to nearest
* even rounding mode) in floating-point format.
@@ -8441,7 +8346,7 @@ half16 __ovld __cnfn sin(half16);
* is the return value and computed cosine is returned
* in cosval.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld sincos(float x, float *cosval);
float2 __ovld sincos(float2 x, float2 *cosval);
float3 __ovld sincos(float3 x, float3 *cosval);
@@ -8523,7 +8428,7 @@ half4 __ovld sincos(half4 x, __private half4 *cosval);
half8 __ovld sincos(half8 x, __private half8 *cosval);
half16 __ovld sincos(half16 x, __private half16 *cosval);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Compute hyperbolic sine.
@@ -9449,54 +9354,54 @@ ulong16 __ovld __cnfn clz(ulong16 x);
* component type of x, if x is a vector.
*/
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld ctz(char x);
-uchar __ovld ctz(uchar x);
-char2 __ovld ctz(char2 x);
-uchar2 __ovld ctz(uchar2 x);
-char3 __ovld ctz(char3 x);
-uchar3 __ovld ctz(uchar3 x);
-char4 __ovld ctz(char4 x);
-uchar4 __ovld ctz(uchar4 x);
-char8 __ovld ctz(char8 x);
-uchar8 __ovld ctz(uchar8 x);
-char16 __ovld ctz(char16 x);
-uchar16 __ovld ctz(uchar16 x);
-short __ovld ctz(short x);
-ushort __ovld ctz(ushort x);
-short2 __ovld ctz(short2 x);
-ushort2 __ovld ctz(ushort2 x);
-short3 __ovld ctz(short3 x);
-ushort3 __ovld ctz(ushort3 x);
-short4 __ovld ctz(short4 x);
-ushort4 __ovld ctz(ushort4 x);
-short8 __ovld ctz(short8 x);
-ushort8 __ovld ctz(ushort8 x);
-short16 __ovld ctz(short16 x);
-ushort16 __ovld ctz(ushort16 x);
-int __ovld ctz(int x);
-uint __ovld ctz(uint x);
-int2 __ovld ctz(int2 x);
-uint2 __ovld ctz(uint2 x);
-int3 __ovld ctz(int3 x);
-uint3 __ovld ctz(uint3 x);
-int4 __ovld ctz(int4 x);
-uint4 __ovld ctz(uint4 x);
-int8 __ovld ctz(int8 x);
-uint8 __ovld ctz(uint8 x);
-int16 __ovld ctz(int16 x);
-uint16 __ovld ctz(uint16 x);
-long __ovld ctz(long x);
-ulong __ovld ctz(ulong x);
-long2 __ovld ctz(long2 x);
-ulong2 __ovld ctz(ulong2 x);
-long3 __ovld ctz(long3 x);
-ulong3 __ovld ctz(ulong3 x);
-long4 __ovld ctz(long4 x);
-ulong4 __ovld ctz(ulong4 x);
-long8 __ovld ctz(long8 x);
-ulong8 __ovld ctz(ulong8 x);
-long16 __ovld ctz(long16 x);
-ulong16 __ovld ctz(ulong16 x);
+char __ovld __cnfn ctz(char x);
+uchar __ovld __cnfn ctz(uchar x);
+char2 __ovld __cnfn ctz(char2 x);
+uchar2 __ovld __cnfn ctz(uchar2 x);
+char3 __ovld __cnfn ctz(char3 x);
+uchar3 __ovld __cnfn ctz(uchar3 x);
+char4 __ovld __cnfn ctz(char4 x);
+uchar4 __ovld __cnfn ctz(uchar4 x);
+char8 __ovld __cnfn ctz(char8 x);
+uchar8 __ovld __cnfn ctz(uchar8 x);
+char16 __ovld __cnfn ctz(char16 x);
+uchar16 __ovld __cnfn ctz(uchar16 x);
+short __ovld __cnfn ctz(short x);
+ushort __ovld __cnfn ctz(ushort x);
+short2 __ovld __cnfn ctz(short2 x);
+ushort2 __ovld __cnfn ctz(ushort2 x);
+short3 __ovld __cnfn ctz(short3 x);
+ushort3 __ovld __cnfn ctz(ushort3 x);
+short4 __ovld __cnfn ctz(short4 x);
+ushort4 __ovld __cnfn ctz(ushort4 x);
+short8 __ovld __cnfn ctz(short8 x);
+ushort8 __ovld __cnfn ctz(ushort8 x);
+short16 __ovld __cnfn ctz(short16 x);
+ushort16 __ovld __cnfn ctz(ushort16 x);
+int __ovld __cnfn ctz(int x);
+uint __ovld __cnfn ctz(uint x);
+int2 __ovld __cnfn ctz(int2 x);
+uint2 __ovld __cnfn ctz(uint2 x);
+int3 __ovld __cnfn ctz(int3 x);
+uint3 __ovld __cnfn ctz(uint3 x);
+int4 __ovld __cnfn ctz(int4 x);
+uint4 __ovld __cnfn ctz(uint4 x);
+int8 __ovld __cnfn ctz(int8 x);
+uint8 __ovld __cnfn ctz(uint8 x);
+int16 __ovld __cnfn ctz(int16 x);
+uint16 __ovld __cnfn ctz(uint16 x);
+long __ovld __cnfn ctz(long x);
+ulong __ovld __cnfn ctz(ulong x);
+long2 __ovld __cnfn ctz(long2 x);
+ulong2 __ovld __cnfn ctz(ulong2 x);
+long3 __ovld __cnfn ctz(long3 x);
+ulong3 __ovld __cnfn ctz(ulong3 x);
+long4 __ovld __cnfn ctz(long4 x);
+ulong4 __ovld __cnfn ctz(ulong4 x);
+long8 __ovld __cnfn ctz(long8 x);
+ulong8 __ovld __cnfn ctz(ulong8 x);
+long16 __ovld __cnfn ctz(long16 x);
+ulong16 __ovld __cnfn ctz(ulong16 x);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
@@ -10002,6 +9907,7 @@ ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
/*
* popcount(x): returns the number of set bit in x
*/
+#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
char __ovld __cnfn popcount(char x);
uchar __ovld __cnfn popcount(uchar x);
char2 __ovld __cnfn popcount(char2 x);
@@ -10050,6 +9956,7 @@ long8 __ovld __cnfn popcount(long8 x);
ulong8 __ovld __cnfn popcount(ulong8 x);
long16 __ovld __cnfn popcount(long16 x);
ulong16 __ovld __cnfn popcount(ulong16 x);
+#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
/**
* Multiply two 24-bit integer values x and y and add
@@ -11342,7 +11249,7 @@ half8 __ovld vload8(size_t offset, const __constant half *p);
half16 __ovld vload16(size_t offset, const __constant half *p);
#endif //cl_khr_fp16
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
char2 __ovld vload2(size_t offset, const char *p);
uchar2 __ovld vload2(size_t offset, const uchar *p);
short2 __ovld vload2(size_t offset, const short *p);
@@ -11580,9 +11487,9 @@ half4 __ovld vload4(size_t offset, const __private half *p);
half8 __ovld vload8(size_t offset, const __private half *p);
half16 __ovld vload16(size_t offset, const __private half *p);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
void __ovld vstore2(char2 data, size_t offset, char *p);
void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
void __ovld vstore2(short2 data, size_t offset, short *p);
@@ -11816,7 +11723,7 @@ void __ovld vstore4(half4 data, size_t offset, __private half *p);
void __ovld vstore8(half8 data, size_t offset, __private half *p);
void __ovld vstore16(half16 data, size_t offset, __private half *p);
#endif //cl_khr_fp16
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Read sizeof (half) bytes of data from address
@@ -11827,13 +11734,13 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
* must be 16-bit aligned.
*/
float __ovld vload_half(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld vload_half(size_t offset, const half *p);
#else
float __ovld vload_half(size_t offset, const __global half *p);
float __ovld vload_half(size_t offset, const __local half *p);
float __ovld vload_half(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* Read sizeof (halfn) bytes of data from address
@@ -11848,7 +11755,7 @@ float3 __ovld vload_half3(size_t offset, const __constant half *p);
float4 __ovld vload_half4(size_t offset, const __constant half *p);
float8 __ovld vload_half8(size_t offset, const __constant half *p);
float16 __ovld vload_half16(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float2 __ovld vload_half2(size_t offset, const half *p);
float3 __ovld vload_half3(size_t offset, const half *p);
float4 __ovld vload_half4(size_t offset, const half *p);
@@ -11870,7 +11777,7 @@ float3 __ovld vload_half3(size_t offset, const __private half *p);
float4 __ovld vload_half4(size_t offset, const __private half *p);
float8 __ovld vload_half8(size_t offset, const __private half *p);
float16 __ovld vload_half16(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* The float value given by data is first
@@ -11883,7 +11790,7 @@ float16 __ovld vload_half16(size_t offset, const __private half *p);
* The default current rounding mode is round to
* nearest even.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
void __ovld vstore_half(float data, size_t offset, half *p);
void __ovld vstore_half_rte(float data, size_t offset, half *p);
void __ovld vstore_half_rtz(float data, size_t offset, half *p);
@@ -11929,7 +11836,7 @@ void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* The floatn value given by data is converted to
@@ -11942,7 +11849,7 @@ void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
* The default current rounding mode is round to
* nearest even.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
void __ovld vstore_half2(float2 data, size_t offset, half *p);
void __ovld vstore_half3(float3 data, size_t offset, half *p);
void __ovld vstore_half4(float4 data, size_t offset, half *p);
@@ -12148,7 +12055,7 @@ void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@@ -12169,7 +12076,7 @@ float3 __ovld vloada_half3(size_t offset, const __constant half *p);
float4 __ovld vloada_half4(size_t offset, const __constant half *p);
float8 __ovld vloada_half8(size_t offset, const __constant half *p);
float16 __ovld vloada_half16(size_t offset, const __constant half *p);
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
float __ovld vloada_half(size_t offset, const half *p);
float2 __ovld vloada_half2(size_t offset, const half *p);
float3 __ovld vloada_half3(size_t offset, const half *p);
@@ -12195,7 +12102,7 @@ float3 __ovld vloada_half3(size_t offset, const __private half *p);
float4 __ovld vloada_half4(size_t offset, const __private half *p);
float8 __ovld vloada_half8(size_t offset, const __private half *p);
float16 __ovld vloada_half16(size_t offset, const __private half *p);
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
/**
* The floatn value given by data is converted to
@@ -12213,7 +12120,7 @@ float16 __ovld vloada_half16(size_t offset, const __private half *p);
* mode. The default current rounding mode is
* round to nearest even.
*/
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
void __ovld vstorea_half(float data, size_t offset, half *p);
void __ovld vstorea_half2(float2 data, size_t offset, half *p);
void __ovld vstorea_half3(float3 data, size_t offset, half *p);
@@ -12498,7 +12405,7 @@ void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
#endif //cl_khr_fp64
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
@@ -12582,7 +12489,7 @@ void __ovld write_mem_fence(cl_mem_fence_flags flags);
// OpenCL v2.0 s6.13.9 - Address Space Qualifier Functions
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#if defined(__opencl_c_generic_address_space)
cl_mem_fence_flags __ovld get_fence(const void *ptr);
cl_mem_fence_flags __ovld get_fence(void *ptr);
@@ -12593,7 +12500,7 @@ cl_mem_fence_flags __ovld get_fence(void *ptr);
* where gentype is builtin type or user defined type.
*/
-#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
+#endif //defined(__opencl_c_generic_address_space)
// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10 - Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
@@ -13397,291 +13304,324 @@ void __ovld atomic_init(volatile atomic_double *object, double value);
void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
// atomic_fetch()
+// OpenCL v2.0 s6.13.11.7.5:
+// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif
-// OpenCL v2.0 s6.13.11.7.5:
-// add/sub: atomic type argument can be uintptr_t/intptr_t, value type argument can be ptrdiff_t.
-// or/xor/and/min/max: atomic type argument can be intptr_t/uintptr_t, value type argument can be intptr_t/uintptr_t.
-
+#if defined(__opencl_c_atomic_scope_device)
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#endif
-uintptr_t __ovld atomic_fetch_or(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and(volatile atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min(volatile atomic_uintptr_t *object, intptr_t opermax);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
-uintptr_t __ovld atomic_fetch_max(volatile atomic_uintptr_t *object, intptr_t opermax);
-uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder);
-uintptr_t __ovld atomic_fetch_max_explicit(volatile atomic_uintptr_t *object, intptr_t opermax, memory_order minder, memory_scope scope);
-
-intptr_t __ovld atomic_fetch_or(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and(volatile atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min(volatile atomic_intptr_t *object, uintptr_t opermax);
-intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
-intptr_t __ovld atomic_fetch_min_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
-intptr_t __ovld atomic_fetch_max(volatile atomic_intptr_t *object, uintptr_t opermax);
-intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder);
-intptr_t __ovld atomic_fetch_max_explicit(volatile atomic_intptr_t *object, uintptr_t opermax, memory_order minder, memory_scope scope);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
+#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
#endif
// atomic_store()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
void __ovld atomic_store(volatile atomic_float *object, float desired);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
void __ovld atomic_store(volatile atomic_double *object, double desired);
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
#endif //cl_khr_fp64
void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
+#endif
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+#endif
+#endif
+
+void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
#endif
// atomic_load()
-
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
int __ovld atomic_load(volatile atomic_int *object);
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
uint __ovld atomic_load(volatile atomic_uint *object);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
float __ovld atomic_load(volatile atomic_float *object);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
double __ovld atomic_load(volatile atomic_double *object);
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
#endif //cl_khr_fp64
long __ovld atomic_load(volatile atomic_long *object);
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
ulong __ovld atomic_load(volatile atomic_ulong *object);
+#endif
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
+#endif
+#endif
+
+int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
+uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
+float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
#endif
// atomic_exchange()
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
float __ovld atomic_exchange(volatile atomic_float *object, float desired);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
double __ovld atomic_exchange(volatile atomic_double *object, double desired);
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
#endif //cl_khr_fp64
long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
+#endif
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+#endif
+#endif
+
+int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
+float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+#endif //cl_khr_fp64
+long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
#endif
// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
-
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
+
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
+#endif
+#endif
+
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
ulong desired, memory_order success, memory_order failure);
+#endif
+
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
+ int desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
+ uint desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
+ float desired, memory_order success, memory_order failure, memory_scope scope);
+#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
+#ifdef cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
+ double desired, memory_order success, memory_order failure, memory_scope scope);
+#endif //cl_khr_fp64
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
+ long desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
+ ulong desired, memory_order success, memory_order failure, memory_scope scope);
bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
ulong desired, memory_order success, memory_order failure, memory_scope scope);
#endif
// atomic_flag_test_and_set() and atomic_flag_clear()
-
+#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
void __ovld atomic_flag_clear(volatile atomic_flag *object);
+#endif
+
+#if defined(__opencl_c_atomic_scope_device)
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+#endif
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
@@ -14176,12 +14116,6 @@ half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
#endif //cl_khr_fp16
-#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-// OpenCL v1.2 s6.12.13, v2.0 s6.13.13 - printf
-
-int printf(__constant const char* st, ...) __attribute__((format(printf, 1, 2)));
-#endif
-
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
#ifdef cl_khr_gl_msaa_sharing
@@ -16130,6 +16064,230 @@ double __ovld sub_group_clustered_reduce_max( double value, uint clustersize );
#endif // cl_khr_subgroup_clustered_reduce
+#if defined(cl_khr_extended_bit_ops)
+char __ovld __cnfn bitfield_insert(char, char, uint, uint);
+uchar __ovld __cnfn bitfield_insert(uchar, uchar, uint, uint);
+short __ovld __cnfn bitfield_insert(short, short, uint, uint);
+ushort __ovld __cnfn bitfield_insert(ushort, ushort, uint, uint);
+int __ovld __cnfn bitfield_insert(int, int, uint, uint);
+uint __ovld __cnfn bitfield_insert(uint, uint, uint, uint);
+long __ovld __cnfn bitfield_insert(long, long, uint, uint);
+ulong __ovld __cnfn bitfield_insert(ulong, ulong, uint, uint);
+char2 __ovld __cnfn bitfield_insert(char2, char2, uint, uint);
+uchar2 __ovld __cnfn bitfield_insert(uchar2, uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_insert(short2, short2, uint, uint);
+ushort2 __ovld __cnfn bitfield_insert(ushort2, ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_insert(int2, int2, uint, uint);
+uint2 __ovld __cnfn bitfield_insert(uint2, uint2, uint, uint);
+long2 __ovld __cnfn bitfield_insert(long2, long2, uint, uint);
+ulong2 __ovld __cnfn bitfield_insert(ulong2, ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_insert(char3, char3, uint, uint);
+uchar3 __ovld __cnfn bitfield_insert(uchar3, uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_insert(short3, short3, uint, uint);
+ushort3 __ovld __cnfn bitfield_insert(ushort3, ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_insert(int3, int3, uint, uint);
+uint3 __ovld __cnfn bitfield_insert(uint3, uint3, uint, uint);
+long3 __ovld __cnfn bitfield_insert(long3, long3, uint, uint);
+ulong3 __ovld __cnfn bitfield_insert(ulong3, ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_insert(char4, char4, uint, uint);
+uchar4 __ovld __cnfn bitfield_insert(uchar4, uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_insert(short4, short4, uint, uint);
+ushort4 __ovld __cnfn bitfield_insert(ushort4, ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_insert(int4, int4, uint, uint);
+uint4 __ovld __cnfn bitfield_insert(uint4, uint4, uint, uint);
+long4 __ovld __cnfn bitfield_insert(long4, long4, uint, uint);
+ulong4 __ovld __cnfn bitfield_insert(ulong4, ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_insert(char8, char8, uint, uint);
+uchar8 __ovld __cnfn bitfield_insert(uchar8, uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_insert(short8, short8, uint, uint);
+ushort8 __ovld __cnfn bitfield_insert(ushort8, ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_insert(int8, int8, uint, uint);
+uint8 __ovld __cnfn bitfield_insert(uint8, uint8, uint, uint);
+long8 __ovld __cnfn bitfield_insert(long8, long8, uint, uint);
+ulong8 __ovld __cnfn bitfield_insert(ulong8, ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_insert(char16, char16, uint, uint);
+uchar16 __ovld __cnfn bitfield_insert(uchar16, uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_insert(short16, short16, uint, uint);
+ushort16 __ovld __cnfn bitfield_insert(ushort16, ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_insert(int16, int16, uint, uint);
+uint16 __ovld __cnfn bitfield_insert(uint16, uint16, uint, uint);
+long16 __ovld __cnfn bitfield_insert(long16, long16, uint, uint);
+ulong16 __ovld __cnfn bitfield_insert(ulong16, ulong16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(char, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(short, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(int, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(long, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(char2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(short2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(int2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(long2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(char3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(short3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(int3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(long3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(char4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(short4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(int4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(long4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(char8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(short8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(int8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(long8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(char16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(short16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(int16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(long16, uint, uint);
+
+char __ovld __cnfn bitfield_extract_signed(uchar, uint, uint);
+short __ovld __cnfn bitfield_extract_signed(ushort, uint, uint);
+int __ovld __cnfn bitfield_extract_signed(uint, uint, uint);
+long __ovld __cnfn bitfield_extract_signed(ulong, uint, uint);
+char2 __ovld __cnfn bitfield_extract_signed(uchar2, uint, uint);
+short2 __ovld __cnfn bitfield_extract_signed(ushort2, uint, uint);
+int2 __ovld __cnfn bitfield_extract_signed(uint2, uint, uint);
+long2 __ovld __cnfn bitfield_extract_signed(ulong2, uint, uint);
+char3 __ovld __cnfn bitfield_extract_signed(uchar3, uint, uint);
+short3 __ovld __cnfn bitfield_extract_signed(ushort3, uint, uint);
+int3 __ovld __cnfn bitfield_extract_signed(uint3, uint, uint);
+long3 __ovld __cnfn bitfield_extract_signed(ulong3, uint, uint);
+char4 __ovld __cnfn bitfield_extract_signed(uchar4, uint, uint);
+short4 __ovld __cnfn bitfield_extract_signed(ushort4, uint, uint);
+int4 __ovld __cnfn bitfield_extract_signed(uint4, uint, uint);
+long4 __ovld __cnfn bitfield_extract_signed(ulong4, uint, uint);
+char8 __ovld __cnfn bitfield_extract_signed(uchar8, uint, uint);
+short8 __ovld __cnfn bitfield_extract_signed(ushort8, uint, uint);
+int8 __ovld __cnfn bitfield_extract_signed(uint8, uint, uint);
+long8 __ovld __cnfn bitfield_extract_signed(ulong8, uint, uint);
+char16 __ovld __cnfn bitfield_extract_signed(uchar16, uint, uint);
+short16 __ovld __cnfn bitfield_extract_signed(ushort16, uint, uint);
+int16 __ovld __cnfn bitfield_extract_signed(uint16, uint, uint);
+long16 __ovld __cnfn bitfield_extract_signed(ulong16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(char, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(short, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(int, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(long, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(char2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(short2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(int2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(long2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(char3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(short3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(int3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(long3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(char4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(short4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(int4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(long4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(char8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(short8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(int8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(long8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(char16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(short16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(int16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(long16, uint, uint);
+
+uchar __ovld __cnfn bitfield_extract_unsigned(uchar, uint, uint);
+ushort __ovld __cnfn bitfield_extract_unsigned(ushort, uint, uint);
+uint __ovld __cnfn bitfield_extract_unsigned(uint, uint, uint);
+ulong __ovld __cnfn bitfield_extract_unsigned(ulong, uint, uint);
+uchar2 __ovld __cnfn bitfield_extract_unsigned(uchar2, uint, uint);
+ushort2 __ovld __cnfn bitfield_extract_unsigned(ushort2, uint, uint);
+uint2 __ovld __cnfn bitfield_extract_unsigned(uint2, uint, uint);
+ulong2 __ovld __cnfn bitfield_extract_unsigned(ulong2, uint, uint);
+uchar3 __ovld __cnfn bitfield_extract_unsigned(uchar3, uint, uint);
+ushort3 __ovld __cnfn bitfield_extract_unsigned(ushort3, uint, uint);
+uint3 __ovld __cnfn bitfield_extract_unsigned(uint3, uint, uint);
+ulong3 __ovld __cnfn bitfield_extract_unsigned(ulong3, uint, uint);
+uchar4 __ovld __cnfn bitfield_extract_unsigned(uchar4, uint, uint);
+ushort4 __ovld __cnfn bitfield_extract_unsigned(ushort4, uint, uint);
+uint4 __ovld __cnfn bitfield_extract_unsigned(uint4, uint, uint);
+ulong4 __ovld __cnfn bitfield_extract_unsigned(ulong4, uint, uint);
+uchar8 __ovld __cnfn bitfield_extract_unsigned(uchar8, uint, uint);
+ushort8 __ovld __cnfn bitfield_extract_unsigned(ushort8, uint, uint);
+uint8 __ovld __cnfn bitfield_extract_unsigned(uint8, uint, uint);
+ulong8 __ovld __cnfn bitfield_extract_unsigned(ulong8, uint, uint);
+uchar16 __ovld __cnfn bitfield_extract_unsigned(uchar16, uint, uint);
+ushort16 __ovld __cnfn bitfield_extract_unsigned(ushort16, uint, uint);
+uint16 __ovld __cnfn bitfield_extract_unsigned(uint16, uint, uint);
+ulong16 __ovld __cnfn bitfield_extract_unsigned(ulong16, uint, uint);
+
+char __ovld __cnfn bit_reverse(char);
+uchar __ovld __cnfn bit_reverse(uchar);
+short __ovld __cnfn bit_reverse(short);
+ushort __ovld __cnfn bit_reverse(ushort);
+int __ovld __cnfn bit_reverse(int);
+uint __ovld __cnfn bit_reverse(uint);
+long __ovld __cnfn bit_reverse(long);
+ulong __ovld __cnfn bit_reverse(ulong);
+char2 __ovld __cnfn bit_reverse(char2);
+uchar2 __ovld __cnfn bit_reverse(uchar2);
+short2 __ovld __cnfn bit_reverse(short2);
+ushort2 __ovld __cnfn bit_reverse(ushort2);
+int2 __ovld __cnfn bit_reverse(int2);
+uint2 __ovld __cnfn bit_reverse(uint2);
+long2 __ovld __cnfn bit_reverse(long2);
+ulong2 __ovld __cnfn bit_reverse(ulong2);
+char3 __ovld __cnfn bit_reverse(char3);
+uchar3 __ovld __cnfn bit_reverse(uchar3);
+short3 __ovld __cnfn bit_reverse(short3);
+ushort3 __ovld __cnfn bit_reverse(ushort3);
+int3 __ovld __cnfn bit_reverse(int3);
+uint3 __ovld __cnfn bit_reverse(uint3);
+long3 __ovld __cnfn bit_reverse(long3);
+ulong3 __ovld __cnfn bit_reverse(ulong3);
+char4 __ovld __cnfn bit_reverse(char4);
+uchar4 __ovld __cnfn bit_reverse(uchar4);
+short4 __ovld __cnfn bit_reverse(short4);
+ushort4 __ovld __cnfn bit_reverse(ushort4);
+int4 __ovld __cnfn bit_reverse(int4);
+uint4 __ovld __cnfn bit_reverse(uint4);
+long4 __ovld __cnfn bit_reverse(long4);
+ulong4 __ovld __cnfn bit_reverse(ulong4);
+char8 __ovld __cnfn bit_reverse(char8);
+uchar8 __ovld __cnfn bit_reverse(uchar8);
+short8 __ovld __cnfn bit_reverse(short8);
+ushort8 __ovld __cnfn bit_reverse(ushort8);
+int8 __ovld __cnfn bit_reverse(int8);
+uint8 __ovld __cnfn bit_reverse(uint8);
+long8 __ovld __cnfn bit_reverse(long8);
+ulong8 __ovld __cnfn bit_reverse(ulong8);
+char16 __ovld __cnfn bit_reverse(char16);
+uchar16 __ovld __cnfn bit_reverse(uchar16);
+short16 __ovld __cnfn bit_reverse(short16);
+ushort16 __ovld __cnfn bit_reverse(ushort16);
+int16 __ovld __cnfn bit_reverse(int16);
+uint16 __ovld __cnfn bit_reverse(uint16);
+long16 __ovld __cnfn bit_reverse(long16);
+ulong16 __ovld __cnfn bit_reverse(ulong16);
+#endif // cl_khr_extended_bit_ops
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit)
+uint __ovld __cnfn dot(uchar4, uchar4);
+int __ovld __cnfn dot(char4, char4);
+int __ovld __cnfn dot(uchar4, char4);
+int __ovld __cnfn dot(char4, uchar4);
+
+uint __ovld __cnfn dot_acc_sat(uchar4, uchar4, uint);
+int __ovld __cnfn dot_acc_sat(char4, char4, int);
+int __ovld __cnfn dot_acc_sat(uchar4, char4, int);
+int __ovld __cnfn dot_acc_sat(char4, uchar4, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit
+
+#if defined(__opencl_c_integer_dot_product_input_4x8bit_packed)
+uint __ovld __cnfn dot_4x8packed_uu_uint(uint, uint);
+int __ovld __cnfn dot_4x8packed_ss_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_us_int(uint, uint);
+int __ovld __cnfn dot_4x8packed_su_int(uint, uint);
+
+uint __ovld __cnfn dot_acc_sat_4x8packed_uu_uint(uint, uint, uint);
+int __ovld __cnfn dot_acc_sat_4x8packed_ss_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
+int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
+#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
float __ovld __conv intel_sub_group_shuffle( float x, uint c );
@@ -17127,31 +17285,23 @@ uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
#endif // cl_amd_media_ops2
#if defined(cl_arm_integer_dot_product_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : begin
uint __ovld arm_dot(uchar4 a, uchar4 b);
int __ovld arm_dot(char4 a, char4 b);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : end
#endif // defined(cl_arm_integer_dot_product_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : begin
uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
int __ovld arm_dot_acc(char4 a, char4 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : end
#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int16)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : begin
uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
int __ovld arm_dot_acc(short2 a, short2 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int16 : end
#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : begin
uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
-#pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_saturate_int8 : end
#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
// Disable any extensions we may have enabled previously.
diff --git a/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
index 406c9748e286..953857badfc4 100644
--- a/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
+++ b/clang/lib/Headers/openmp_wrappers/__clang_openmp_device_functions.h
@@ -39,4 +39,46 @@ extern "C" {
#pragma omp end declare variant
+// Ensure we make `_ZdlPv`, aka. `operator delete(void*)` available without the
+// need to `include <new>` in C++ mode.
+#ifdef __cplusplus
+
+// We require malloc/free.
+#include <cstdlib>
+
+#pragma push_macro("OPENMP_NOEXCEPT")
+#if __cplusplus >= 201103L
+#define OPENMP_NOEXCEPT noexcept
+#else
+#define OPENMP_NOEXCEPT
+#endif
+
+// Device overrides for non-placement new and delete.
+inline void *operator new(__SIZE_TYPE__ size) {
+ if (size == 0)
+ size = 1;
+ return ::malloc(size);
+}
+
+inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); }
+
+inline void operator delete(void *ptr)OPENMP_NOEXCEPT { ::free(ptr); }
+
+inline void operator delete[](void *ptr) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+
+// Sized delete, C++14 only.
+#if __cplusplus >= 201402L
+inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT {
+ ::operator delete(ptr);
+}
+#endif
+
+#pragma pop_macro("OPENMP_NOEXCEPT")
+#endif
+
#endif
diff --git a/clang/lib/Headers/openmp_wrappers/complex b/clang/lib/Headers/openmp_wrappers/complex
index 142e526b81b3..dfd6193c97cb 100644
--- a/clang/lib/Headers/openmp_wrappers/complex
+++ b/clang/lib/Headers/openmp_wrappers/complex
@@ -17,7 +17,6 @@
// We require std::math functions in the complex builtins below.
#include <cmath>
-#define __CUDA__
#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
#undef __OPENMP_NVPTX__
@@ -26,9 +25,6 @@
// Grab the host header too.
#include_next <complex>
-
-#ifdef __cplusplus
-
// If we are compiling against libc++, the macro _LIBCPP_STD_VER should be set
// after including <cmath> above. Since the complex header we use is a
// simplified version of the libc++, we don't need it in this case. If we
@@ -48,5 +44,3 @@
#pragma omp end declare variant
#endif
-
-#endif
diff --git a/clang/lib/Headers/openmp_wrappers/complex.h b/clang/lib/Headers/openmp_wrappers/complex.h
index 00d278548f82..15dc415b8126 100644
--- a/clang/lib/Headers/openmp_wrappers/complex.h
+++ b/clang/lib/Headers/openmp_wrappers/complex.h
@@ -17,7 +17,6 @@
// We require math functions in the complex builtins below.
#include <math.h>
-#define __CUDA__
#define __OPENMP_NVPTX__
#include <__clang_cuda_complex_builtins.h>
#undef __OPENMP_NVPTX__
diff --git a/clang/lib/Headers/openmp_wrappers/new b/clang/lib/Headers/openmp_wrappers/new
index 1387d925b126..985ddc567f49 100644
--- a/clang/lib/Headers/openmp_wrappers/new
+++ b/clang/lib/Headers/openmp_wrappers/new
@@ -9,6 +9,8 @@
#ifndef __CLANG_OPENMP_WRAPPERS_NEW
#define __CLANG_OPENMP_WRAPPERS_NEW
+// We need the system <new> for the std::nothrow_t. The new/delete operators
+// which do not use nothrow_t are provided without the <new> header.
#include_next <new>
#if defined(__NVPTX__) && defined(_OPENMP)
@@ -22,48 +24,24 @@
#define OPENMP_NOEXCEPT
#endif
-// Device overrides for non-placement new and delete.
-inline void *operator new(__SIZE_TYPE__ size) {
- if (size == 0)
- size = 1;
- return ::malloc(size);
-}
inline void *operator new(__SIZE_TYPE__ size,
const std::nothrow_t &) OPENMP_NOEXCEPT {
return ::operator new(size);
}
-inline void *operator new[](__SIZE_TYPE__ size) { return ::operator new(size); }
inline void *operator new[](__SIZE_TYPE__ size, const std::nothrow_t &) {
return ::operator new(size);
}
-inline void operator delete(void *ptr)OPENMP_NOEXCEPT {
- if (ptr)
- ::free(ptr);
-}
inline void operator delete(void *ptr, const std::nothrow_t &)OPENMP_NOEXCEPT {
::operator delete(ptr);
}
-inline void operator delete[](void *ptr) OPENMP_NOEXCEPT {
- ::operator delete(ptr);
-}
inline void operator delete[](void *ptr,
const std::nothrow_t &) OPENMP_NOEXCEPT {
::operator delete(ptr);
}
-// Sized delete, C++14 only.
-#if __cplusplus >= 201402L
-inline void operator delete(void *ptr, __SIZE_TYPE__ size)OPENMP_NOEXCEPT {
- ::operator delete(ptr);
-}
-inline void operator delete[](void *ptr, __SIZE_TYPE__ size) OPENMP_NOEXCEPT {
- ::operator delete(ptr);
-}
-#endif
-
#pragma pop_macro("OPENMP_NOEXCEPT")
#endif
diff --git a/clang/lib/Headers/ppc_wrappers/xmmintrin.h b/clang/lib/Headers/ppc_wrappers/xmmintrin.h
index 0f429fa04081..0e45b96769f8 100644
--- a/clang/lib/Headers/ppc_wrappers/xmmintrin.h
+++ b/clang/lib/Headers/ppc_wrappers/xmmintrin.h
@@ -28,7 +28,7 @@
Most SSE scalar float intrinsic operations can be performed more
efficiently as C language float scalar operations or optimized to
use vector SIMD operations. We recommend this for new applications. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
#endif
#ifndef _XMMINTRIN_H_INCLUDED
@@ -62,14 +62,13 @@
/* The Intel API is flexible enough that we must allow aliasing with other
vector types, and their scalar components. */
-typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef vector float __m128 __attribute__((__may_alias__));
/* Unaligned version of the same type. */
-typedef float __m128_u __attribute__ ((__vector_size__ (16), __may_alias__,
- __aligned__ (1)));
+typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
/* Internal data types for implementing the intrinsics. */
-typedef float __v4sf __attribute__ ((__vector_size__ (16)));
+typedef vector float __v4sf;
/* Create an undefined vector. */
extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
diff --git a/clang/lib/Headers/uintrintrin.h b/clang/lib/Headers/uintrintrin.h
index 78aa8779c325..e3839dcebe1e 100644
--- a/clang/lib/Headers/uintrintrin.h
+++ b/clang/lib/Headers/uintrintrin.h
@@ -20,6 +20,13 @@
#ifdef __x86_64__
+struct __uintr_frame
+{
+ unsigned long long rip;
+ unsigned long long rflags;
+ unsigned long long rsp;
+};
+
/// Clears the user interrupt flag (UIF). Its effect takes place immediately: a
/// user interrupt cannot be delivered on the instruction boundary following
/// CLUI. Can be executed only if CR4.UINT = 1, the logical processor is in
diff --git a/clang/lib/Headers/vaesintrin.h b/clang/lib/Headers/vaesintrin.h
index c4d5c3e75140..f3c0807bb94a 100644
--- a/clang/lib/Headers/vaesintrin.h
+++ b/clang/lib/Headers/vaesintrin.h
@@ -28,13 +28,6 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
(__v4di) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenc_epi128(__m512i __A, __m512i __B)
-{
- return (__m512i) __builtin_ia32_aesenc512((__v8di) __A,
- (__v8di) __B);
-}
-
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_aesdec_epi128(__m256i __A, __m256i __B)
{
@@ -42,32 +35,40 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
(__v4di) __B);
}
-static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesdec_epi128(__m512i __A, __m512i __B)
+static __inline__ __m256i __DEFAULT_FN_ATTRS
+ _mm256_aesenclast_epi128(__m256i __A, __m256i __B)
{
- return (__m512i) __builtin_ia32_aesdec512((__v8di) __A,
- (__v8di) __B);
+ return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A,
+ (__v4di) __B);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesenclast_epi128(__m256i __A, __m256i __B)
+ _mm256_aesdeclast_epi128(__m256i __A, __m256i __B)
{
- return (__m256i) __builtin_ia32_aesenclast256((__v4di) __A,
+ return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A,
(__v4di) __B);
}
+#ifdef __AVX512FINTRIN_H
static __inline__ __m512i __DEFAULT_FN_ATTRS_F
- _mm512_aesenclast_epi128(__m512i __A, __m512i __B)
+ _mm512_aesenc_epi128(__m512i __A, __m512i __B)
{
- return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A,
+ return (__m512i) __builtin_ia32_aesenc512((__v8di) __A,
(__v8di) __B);
}
-static __inline__ __m256i __DEFAULT_FN_ATTRS
- _mm256_aesdeclast_epi128(__m256i __A, __m256i __B)
+static __inline__ __m512i __DEFAULT_FN_ATTRS_F
+ _mm512_aesdec_epi128(__m512i __A, __m512i __B)
{
- return (__m256i) __builtin_ia32_aesdeclast256((__v4di) __A,
- (__v4di) __B);
+ return (__m512i) __builtin_ia32_aesdec512((__v8di) __A,
+ (__v8di) __B);
+}
+
+static __inline__ __m512i __DEFAULT_FN_ATTRS_F
+ _mm512_aesenclast_epi128(__m512i __A, __m512i __B)
+{
+ return (__m512i) __builtin_ia32_aesenclast512((__v8di) __A,
+ (__v8di) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS_F
@@ -76,7 +77,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS_F
return (__m512i) __builtin_ia32_aesdeclast512((__v8di) __A,
(__v8di) __B);
}
-
+#endif // __AVX512FINTRIN_H
#undef __DEFAULT_FN_ATTRS
#undef __DEFAULT_FN_ATTRS_F
diff --git a/clang/lib/Headers/vecintrin.h b/clang/lib/Headers/vecintrin.h
index e58c9769e8cb..ec1dbfd015f6 100644
--- a/clang/lib/Headers/vecintrin.h
+++ b/clang/lib/Headers/vecintrin.h
@@ -1016,64 +1016,84 @@ vec_scatter_element(__vector double __vec,
static inline __ATTRS_o_ai __vector signed char
vec_xl(long __offset, const signed char *__ptr) {
- return *(const __vector signed char *)
- ((const char *)__ptr + __offset);
+ __vector signed char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed char));
+ return V;
}
static inline __ATTRS_o_ai __vector unsigned char
vec_xl(long __offset, const unsigned char *__ptr) {
- return *(const __vector unsigned char *)
- ((const char *)__ptr + __offset);
+ __vector unsigned char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned char));
+ return V;
}
static inline __ATTRS_o_ai __vector signed short
vec_xl(long __offset, const signed short *__ptr) {
- return *(const __vector signed short *)
- ((const char *)__ptr + __offset);
+ __vector signed short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed short));
+ return V;
}
static inline __ATTRS_o_ai __vector unsigned short
vec_xl(long __offset, const unsigned short *__ptr) {
- return *(const __vector unsigned short *)
- ((const char *)__ptr + __offset);
+ __vector unsigned short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned short));
+ return V;
}
static inline __ATTRS_o_ai __vector signed int
vec_xl(long __offset, const signed int *__ptr) {
- return *(const __vector signed int *)
- ((const char *)__ptr + __offset);
+ __vector signed int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed int));
+ return V;
}
static inline __ATTRS_o_ai __vector unsigned int
vec_xl(long __offset, const unsigned int *__ptr) {
- return *(const __vector unsigned int *)
- ((const char *)__ptr + __offset);
+ __vector unsigned int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned int));
+ return V;
}
static inline __ATTRS_o_ai __vector signed long long
vec_xl(long __offset, const signed long long *__ptr) {
- return *(const __vector signed long long *)
- ((const char *)__ptr + __offset);
+ __vector signed long long V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed long long));
+ return V;
}
static inline __ATTRS_o_ai __vector unsigned long long
vec_xl(long __offset, const unsigned long long *__ptr) {
- return *(const __vector unsigned long long *)
- ((const char *)__ptr + __offset);
+ __vector unsigned long long V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned long long));
+ return V;
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai __vector float
vec_xl(long __offset, const float *__ptr) {
- return *(const __vector float *)
- ((const char *)__ptr + __offset);
+ __vector float V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector float));
+ return V;
}
#endif
static inline __ATTRS_o_ai __vector double
vec_xl(long __offset, const double *__ptr) {
- return *(const __vector double *)
- ((const char *)__ptr + __offset);
+ __vector double V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector double));
+ return V;
}
/*-- vec_xld2 ---------------------------------------------------------------*/
@@ -1081,64 +1101,82 @@ vec_xl(long __offset, const double *__ptr) {
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed char
vec_xld2(long __offset, const signed char *__ptr) {
- return *(const __vector signed char *)
- ((const char *)__ptr + __offset);
+ __vector signed char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed char));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned char
vec_xld2(long __offset, const unsigned char *__ptr) {
- return *(const __vector unsigned char *)
- ((const char *)__ptr + __offset);
+ __vector unsigned char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned char));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed short
vec_xld2(long __offset, const signed short *__ptr) {
- return *(const __vector signed short *)
- ((const char *)__ptr + __offset);
+ __vector signed short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed short));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned short
vec_xld2(long __offset, const unsigned short *__ptr) {
- return *(const __vector unsigned short *)
- ((const char *)__ptr + __offset);
+ __vector unsigned short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned short));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed int
vec_xld2(long __offset, const signed int *__ptr) {
- return *(const __vector signed int *)
- ((const char *)__ptr + __offset);
+ __vector signed int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed int));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned int
vec_xld2(long __offset, const unsigned int *__ptr) {
- return *(const __vector unsigned int *)
- ((const char *)__ptr + __offset);
+ __vector unsigned int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned int));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed long long
vec_xld2(long __offset, const signed long long *__ptr) {
- return *(const __vector signed long long *)
- ((const char *)__ptr + __offset);
+ __vector signed long long V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed long long));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned long long
vec_xld2(long __offset, const unsigned long long *__ptr) {
- return *(const __vector unsigned long long *)
- ((const char *)__ptr + __offset);
+ __vector unsigned long long V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned long long));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector double
vec_xld2(long __offset, const double *__ptr) {
- return *(const __vector double *)
- ((const char *)__ptr + __offset);
+ __vector double V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector double));
+ return V;
}
/*-- vec_xlw4 ---------------------------------------------------------------*/
@@ -1146,99 +1184,128 @@ vec_xld2(long __offset, const double *__ptr) {
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed char
vec_xlw4(long __offset, const signed char *__ptr) {
- return *(const __vector signed char *)
- ((const char *)__ptr + __offset);
+ __vector signed char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed char));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned char
vec_xlw4(long __offset, const unsigned char *__ptr) {
- return *(const __vector unsigned char *)
- ((const char *)__ptr + __offset);
+ __vector unsigned char V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned char));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed short
vec_xlw4(long __offset, const signed short *__ptr) {
- return *(const __vector signed short *)
- ((const char *)__ptr + __offset);
+ __vector signed short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed short));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned short
vec_xlw4(long __offset, const unsigned short *__ptr) {
- return *(const __vector unsigned short *)
- ((const char *)__ptr + __offset);
+ __vector unsigned short V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned short));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector signed int
vec_xlw4(long __offset, const signed int *__ptr) {
- return *(const __vector signed int *)
- ((const char *)__ptr + __offset);
+ __vector signed int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector signed int));
+ return V;
}
// This prototype is deprecated.
static inline __ATTRS_o_ai __vector unsigned int
vec_xlw4(long __offset, const unsigned int *__ptr) {
- return *(const __vector unsigned int *)
- ((const char *)__ptr + __offset);
+ __vector unsigned int V;
+ __builtin_memcpy(&V, ((const char *)__ptr + __offset),
+ sizeof(__vector unsigned int));
+ return V;
}
/*-- vec_xst ----------------------------------------------------------------*/
static inline __ATTRS_o_ai void
vec_xst(__vector signed char __vec, long __offset, signed char *__ptr) {
- *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+ __vector signed char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed char));
}
static inline __ATTRS_o_ai void
vec_xst(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned char));
}
static inline __ATTRS_o_ai void
vec_xst(__vector signed short __vec, long __offset, signed short *__ptr) {
- *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+ __vector signed short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed short));
}
static inline __ATTRS_o_ai void
vec_xst(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned short));
}
static inline __ATTRS_o_ai void
vec_xst(__vector signed int __vec, long __offset, signed int *__ptr) {
- *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+ __vector signed int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int));
}
static inline __ATTRS_o_ai void
vec_xst(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned int));
}
static inline __ATTRS_o_ai void
vec_xst(__vector signed long long __vec, long __offset,
- signed long long *__ptr) {
- *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+ signed long long *__ptr) {
+ __vector signed long long V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed long long));
}
static inline __ATTRS_o_ai void
vec_xst(__vector unsigned long long __vec, long __offset,
- unsigned long long *__ptr) {
- *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+ unsigned long long *__ptr) {
+ __vector unsigned long long V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned long long));
}
#if __ARCH__ >= 12
static inline __ATTRS_o_ai void
vec_xst(__vector float __vec, long __offset, float *__ptr) {
- *(__vector float *)((char *)__ptr + __offset) = __vec;
+ __vector float V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector float));
}
#endif
static inline __ATTRS_o_ai void
vec_xst(__vector double __vec, long __offset, double *__ptr) {
- *(__vector double *)((char *)__ptr + __offset) = __vec;
+ __vector double V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector double));
}
/*-- vec_xstd2 --------------------------------------------------------------*/
@@ -1246,57 +1313,73 @@ vec_xst(__vector double __vec, long __offset, double *__ptr) {
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector signed char __vec, long __offset, signed char *__ptr) {
- *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+ __vector signed char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed char));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned char));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector signed short __vec, long __offset, signed short *__ptr) {
- *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+ __vector signed short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed short));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned short));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector signed int __vec, long __offset, signed int *__ptr) {
- *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+ __vector signed int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned int));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector signed long long __vec, long __offset,
signed long long *__ptr) {
- *(__vector signed long long *)((char *)__ptr + __offset) = __vec;
+ __vector signed long long V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed long long));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector unsigned long long __vec, long __offset,
unsigned long long *__ptr) {
- *(__vector unsigned long long *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned long long V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned long long));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
- *(__vector double *)((char *)__ptr + __offset) = __vec;
+ __vector double V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector double));
}
/*-- vec_xstw4 --------------------------------------------------------------*/
@@ -1304,37 +1387,48 @@ vec_xstd2(__vector double __vec, long __offset, double *__ptr) {
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector signed char __vec, long __offset, signed char *__ptr) {
- *(__vector signed char *)((char *)__ptr + __offset) = __vec;
+ __vector signed char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed char));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector unsigned char __vec, long __offset, unsigned char *__ptr) {
- *(__vector unsigned char *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned char V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned char));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector signed short __vec, long __offset, signed short *__ptr) {
- *(__vector signed short *)((char *)__ptr + __offset) = __vec;
+ __vector signed short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector signed short));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector unsigned short __vec, long __offset, unsigned short *__ptr) {
- *(__vector unsigned short *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned short V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned short));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector signed int __vec, long __offset, signed int *__ptr) {
- *(__vector signed int *)((char *)__ptr + __offset) = __vec;
+ __vector signed int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V, sizeof(__vector signed int));
}
// This prototype is deprecated.
static inline __ATTRS_o_ai void
vec_xstw4(__vector unsigned int __vec, long __offset, unsigned int *__ptr) {
- *(__vector unsigned int *)((char *)__ptr + __offset) = __vec;
+ __vector unsigned int V = __vec;
+ __builtin_memcpy(((char *)__ptr + __offset), &V,
+ sizeof(__vector unsigned int));
}
/*-- vec_load_bndry ---------------------------------------------------------*/
@@ -9259,6 +9353,41 @@ vec_fp_test_data_class(__vector double __a, int __b, int *__c)
__VEC_CLASS_FP_ZERO | \
__VEC_CLASS_FP_INFINITY)
+/*-- vec_extend_to_fp32_hi --------------------------------------------------*/
+
+#if __ARCH__ >= 14
+#define vec_extend_to_fp32_hi(X, W) \
+ ((__vector float)__builtin_s390_vclfnhs((X), (W)));
+#endif
+
+/*-- vec_extend_to_fp32_hi --------------------------------------------------*/
+
+#if __ARCH__ >= 14
+#define vec_extend_to_fp32_lo(X, W) \
+ ((__vector float)__builtin_s390_vclfnls((X), (W)));
+#endif
+
+/*-- vec_round_from_fp32 ----------------------------------------------------*/
+
+#if __ARCH__ >= 14
+#define vec_round_from_fp32(X, Y, W) \
+ ((__vector unsigned short)__builtin_s390_vcrnfs((X), (Y), (W)));
+#endif
+
+/*-- vec_convert_to_fp16 ----------------------------------------------------*/
+
+#if __ARCH__ >= 14
+#define vec_convert_to_fp16(X, W) \
+ ((__vector unsigned short)__builtin_s390_vcfn((X), (W)));
+#endif
+
+/*-- vec_convert_from_fp16 --------------------------------------------------*/
+
+#if __ARCH__ >= 14
+#define vec_convert_from_fp16(X, W) \
+ ((__vector unsigned short)__builtin_s390_vcnf((X), (W)));
+#endif
+
/*-- vec_cp_until_zero ------------------------------------------------------*/
static inline __ATTRS_o_ai __vector signed char
diff --git a/clang/lib/Headers/vpclmulqdqintrin.h b/clang/lib/Headers/vpclmulqdqintrin.h
index 470d83254905..44daadb07d57 100644
--- a/clang/lib/Headers/vpclmulqdqintrin.h
+++ b/clang/lib/Headers/vpclmulqdqintrin.h
@@ -19,10 +19,12 @@
(__v4di)(__m256i)(B), \
(char)(I))
+#ifdef __AVX512FINTRIN_H
#define _mm512_clmulepi64_epi128(A, B, I) \
(__m512i)__builtin_ia32_pclmulqdq512((__v8di)(__m512i)(A), \
(__v8di)(__m512i)(B), \
(char)(I))
+#endif // __AVX512FINTRIN_H
#endif /* __VPCLMULQDQINTRIN_H */
diff --git a/clang/lib/Headers/wasm_simd128.h b/clang/lib/Headers/wasm_simd128.h
index ac88516ac924..712fa0378098 100644
--- a/clang/lib/Headers/wasm_simd128.h
+++ b/clang/lib/Headers/wasm_simd128.h
@@ -40,13 +40,18 @@ typedef unsigned char __u8x8
typedef short __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
typedef unsigned short __u16x4
__attribute__((__vector_size__(8), __aligned__(8)));
+typedef int __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
+typedef unsigned int __u32x2
+ __attribute__((__vector_size__(8), __aligned__(8)));
+typedef float __f32x2 __attribute__((__vector_size__(8), __aligned__(8)));
#define __DEFAULT_FN_ATTRS \
__attribute__((__always_inline__, __nodebug__, __target__("simd128"), \
__min_vector_width__(128)))
-#define __REQUIRE_CONSTANT(e) \
- _Static_assert(__builtin_constant_p(e), "Expected constant")
+#define __REQUIRE_CONSTANT(c) \
+ __attribute__((__diagnose_if__(!__builtin_constant_p(c), \
+ #c " must be constant", "error")))
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
// UB-free unaligned access copied from xmmintrin.h
@@ -57,102 +62,158 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load(const void *__mem) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v8x16_load_splat(const void *__mem) {
- struct __wasm_v8x16_load_splat_struct {
+wasm_v128_load8_splat(const void *__mem) {
+ struct __wasm_v128_load8_splat_struct {
uint8_t __v;
} __attribute__((__packed__, __may_alias__));
- uint8_t __v = ((const struct __wasm_v8x16_load_splat_struct *)__mem)->__v;
+ uint8_t __v = ((const struct __wasm_v128_load8_splat_struct *)__mem)->__v;
return (v128_t)(__u8x16){__v, __v, __v, __v, __v, __v, __v, __v,
__v, __v, __v, __v, __v, __v, __v, __v};
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v16x8_load_splat(const void *__mem) {
- struct __wasm_v16x8_load_splat_struct {
+wasm_v128_load16_splat(const void *__mem) {
+ struct __wasm_v128_load16_splat_struct {
uint16_t __v;
} __attribute__((__packed__, __may_alias__));
- uint16_t __v = ((const struct __wasm_v16x8_load_splat_struct *)__mem)->__v;
+ uint16_t __v = ((const struct __wasm_v128_load16_splat_struct *)__mem)->__v;
return (v128_t)(__u16x8){__v, __v, __v, __v, __v, __v, __v, __v};
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v32x4_load_splat(const void *__mem) {
- struct __wasm_v32x4_load_splat_struct {
+wasm_v128_load32_splat(const void *__mem) {
+ struct __wasm_v128_load32_splat_struct {
uint32_t __v;
} __attribute__((__packed__, __may_alias__));
- uint32_t __v = ((const struct __wasm_v32x4_load_splat_struct *)__mem)->__v;
+ uint32_t __v = ((const struct __wasm_v128_load32_splat_struct *)__mem)->__v;
return (v128_t)(__u32x4){__v, __v, __v, __v};
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_v64x2_load_splat(const void *__mem) {
- struct __wasm_v64x2_load_splat_struct {
+wasm_v128_load64_splat(const void *__mem) {
+ struct __wasm_v128_load64_splat_struct {
uint64_t __v;
} __attribute__((__packed__, __may_alias__));
- uint64_t __v = ((const struct __wasm_v64x2_load_splat_struct *)__mem)->__v;
+ uint64_t __v = ((const struct __wasm_v128_load64_splat_struct *)__mem)->__v;
return (v128_t)(__u64x2){__v, __v};
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_load_8x8(const void *__mem) {
- typedef int8_t __i8x8 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_i16x8_load_8x8_struct {
+wasm_i16x8_load8x8(const void *__mem) {
+ struct __wasm_i16x8_load8x8_struct {
__i8x8 __v;
} __attribute__((__packed__, __may_alias__));
- __i8x8 __v = ((const struct __wasm_i16x8_load_8x8_struct *)__mem)->__v;
+ __i8x8 __v = ((const struct __wasm_i16x8_load8x8_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __i16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_load_8x8(const void *__mem) {
- typedef uint8_t __u8x8 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_u16x8_load_8x8_struct {
+wasm_u16x8_load8x8(const void *__mem) {
+ struct __wasm_u16x8_load8x8_struct {
__u8x8 __v;
} __attribute__((__packed__, __may_alias__));
- __u8x8 __v = ((const struct __wasm_u16x8_load_8x8_struct *)__mem)->__v;
+ __u8x8 __v = ((const struct __wasm_u16x8_load8x8_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __u16x8);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_load_16x4(const void *__mem) {
- typedef int16_t __i16x4 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_i32x4_load_16x4_struct {
+wasm_i32x4_load16x4(const void *__mem) {
+ struct __wasm_i32x4_load16x4_struct {
__i16x4 __v;
} __attribute__((__packed__, __may_alias__));
- __i16x4 __v = ((const struct __wasm_i32x4_load_16x4_struct *)__mem)->__v;
+ __i16x4 __v = ((const struct __wasm_i32x4_load16x4_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __i32x4);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_load_16x4(const void *__mem) {
- typedef uint16_t __u16x4 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_u32x4_load_16x4_struct {
+wasm_u32x4_load16x4(const void *__mem) {
+ struct __wasm_u32x4_load16x4_struct {
__u16x4 __v;
} __attribute__((__packed__, __may_alias__));
- __u16x4 __v = ((const struct __wasm_u32x4_load_16x4_struct *)__mem)->__v;
+ __u16x4 __v = ((const struct __wasm_u32x4_load16x4_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __u32x4);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i64x2_load_32x2(const void *__mem) {
- typedef int32_t __i32x2 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_i64x2_load_32x2_struct {
+wasm_i64x2_load32x2(const void *__mem) {
+ struct __wasm_i64x2_load32x2_struct {
__i32x2 __v;
} __attribute__((__packed__, __may_alias__));
- __i32x2 __v = ((const struct __wasm_i64x2_load_32x2_struct *)__mem)->__v;
+ __i32x2 __v = ((const struct __wasm_i64x2_load32x2_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __i64x2);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u64x2_load_32x2(const void *__mem) {
- typedef uint32_t __u32x2 __attribute__((__vector_size__(8), __aligned__(8)));
- struct __wasm_u64x2_load_32x2_struct {
+wasm_u64x2_load32x2(const void *__mem) {
+ struct __wasm_u64x2_load32x2_struct {
__u32x2 __v;
} __attribute__((__packed__, __may_alias__));
- __u32x2 __v = ((const struct __wasm_u64x2_load_32x2_struct *)__mem)->__v;
+ __u32x2 __v = ((const struct __wasm_u64x2_load32x2_struct *)__mem)->__v;
return (v128_t) __builtin_convertvector(__v, __u64x2);
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v128_load32_zero(const void *__mem) {
+ struct __wasm_v128_load32_zero_struct {
+ int32_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int32_t __v = ((const struct __wasm_v128_load32_zero_struct *)__mem)->__v;
+ return (v128_t)(__i32x4){__v, 0, 0, 0};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_v128_load64_zero(const void *__mem) {
+ struct __wasm_v128_load64_zero_struct {
+ int64_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int64_t __v = ((const struct __wasm_v128_load64_zero_struct *)__mem)->__v;
+ return (v128_t)(__i64x2){__v, 0};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load8_lane(
+ const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_load8_lane_struct {
+ int8_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int8_t __v = ((const struct __wasm_v128_load8_lane_struct *)__mem)->__v;
+ __i8x16 __ret = (__i8x16)__vec;
+ __ret[__i] = __v;
+ return (v128_t)__ret;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load16_lane(
+ const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_load16_lane_struct {
+ int16_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int16_t __v = ((const struct __wasm_v128_load16_lane_struct *)__mem)->__v;
+ __i16x8 __ret = (__i16x8)__vec;
+ __ret[__i] = __v;
+ return (v128_t)__ret;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load32_lane(
+ const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_load32_lane_struct {
+ int32_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int32_t __v = ((const struct __wasm_v128_load32_lane_struct *)__mem)->__v;
+ __i32x4 __ret = (__i32x4)__vec;
+ __ret[__i] = __v;
+ return (v128_t)__ret;
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_load64_lane(
+ const void *__mem, v128_t __vec, int __i) __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_load64_lane_struct {
+ int64_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ int64_t __v = ((const struct __wasm_v128_load64_lane_struct *)__mem)->__v;
+ __i64x2 __ret = (__i64x2)__vec;
+ __ret[__i] = __v;
+ return (v128_t)__ret;
+}
+
static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
v128_t __a) {
// UB-free unaligned access copied from xmmintrin.h
@@ -162,6 +223,49 @@ static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store(void *__mem,
((struct __wasm_v128_store_struct *)__mem)->__v = __a;
}
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store8_lane(void *__mem,
+ v128_t __vec,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_store8_lane_struct {
+ int8_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store8_lane_struct *)__mem)->__v = ((__i8x16)__vec)[__i];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store16_lane(void *__mem,
+ v128_t __vec,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_store16_lane_struct {
+ int16_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store16_lane_struct *)__mem)->__v =
+ ((__i16x8)__vec)[__i];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store32_lane(void *__mem,
+ v128_t __vec,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_store32_lane_struct {
+ int32_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store32_lane_struct *)__mem)->__v =
+ ((__i32x4)__vec)[__i];
+}
+
+static __inline__ void __DEFAULT_FN_ATTRS wasm_v128_store64_lane(void *__mem,
+ v128_t __vec,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ struct __wasm_v128_store64_lane_struct {
+ int64_t __v;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __wasm_v128_store64_lane_struct *)__mem)->__v =
+ ((__i64x2)__vec)[__i];
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i8x16_make(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3, int8_t __c4,
int8_t __c5, int8_t __c6, int8_t __c7, int8_t __c8, int8_t __c9,
@@ -185,6 +289,11 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_make(int32_t __c0,
return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
+ int64_t __c1) {
+ return (v128_t)(__i64x2){__c0, __c1};
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
float __c1,
float __c2,
@@ -192,150 +301,221 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_make(float __c0,
return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_make(int64_t __c0,
- int64_t __c1) {
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
+ double __c1) {
+ return (v128_t)(__f64x2){__c0, __c1};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i8x16_const(int8_t __c0, int8_t __c1, int8_t __c2, int8_t __c3,
+ int8_t __c4, int8_t __c5, int8_t __c6, int8_t __c7,
+ int8_t __c8, int8_t __c9, int8_t __c10, int8_t __c11,
+ int8_t __c12, int8_t __c13, int8_t __c14, int8_t __c15)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
+ __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
+ __REQUIRE_CONSTANT(__c7) __REQUIRE_CONSTANT(__c8)
+ __REQUIRE_CONSTANT(__c9) __REQUIRE_CONSTANT(__c10)
+ __REQUIRE_CONSTANT(__c11) __REQUIRE_CONSTANT(__c12)
+ __REQUIRE_CONSTANT(__c13) __REQUIRE_CONSTANT(__c14)
+ __REQUIRE_CONSTANT(__c15) {
+ return (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5,
+ __c6, __c7, __c8, __c9, __c10, __c11,
+ __c12, __c13, __c14, __c15};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_const(int16_t __c0, int16_t __c1, int16_t __c2, int16_t __c3,
+ int16_t __c4, int16_t __c5, int16_t __c6, int16_t __c7)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) __REQUIRE_CONSTANT(__c4)
+ __REQUIRE_CONSTANT(__c5) __REQUIRE_CONSTANT(__c6)
+ __REQUIRE_CONSTANT(__c7) {
+ return (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_const(int32_t __c0, int32_t __c1, int32_t __c2, int32_t __c3)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) {
+ return (v128_t)(__i32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const(int64_t __c0,
+ int64_t __c1)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
return (v128_t)(__i64x2){__c0, __c1};
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_make(double __c0,
- double __c1) {
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_const(float __c0, float __c1, float __c2, float __c3)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) __REQUIRE_CONSTANT(__c2)
+ __REQUIRE_CONSTANT(__c3) {
+ return (v128_t)(__f32x4){__c0, __c1, __c2, __c3};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const(double __c0,
+ double __c1)
+ __REQUIRE_CONSTANT(__c0) __REQUIRE_CONSTANT(__c1) {
return (v128_t)(__f64x2){__c0, __c1};
}
-#define wasm_i8x16_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, __c8, \
- __c9, __c10, __c11, __c12, __c13, __c14, __c15) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- __REQUIRE_CONSTANT(__c2); \
- __REQUIRE_CONSTANT(__c3); \
- __REQUIRE_CONSTANT(__c4); \
- __REQUIRE_CONSTANT(__c5); \
- __REQUIRE_CONSTANT(__c6); \
- __REQUIRE_CONSTANT(__c7); \
- __REQUIRE_CONSTANT(__c8); \
- __REQUIRE_CONSTANT(__c9); \
- __REQUIRE_CONSTANT(__c10); \
- __REQUIRE_CONSTANT(__c11); \
- __REQUIRE_CONSTANT(__c12); \
- __REQUIRE_CONSTANT(__c13); \
- __REQUIRE_CONSTANT(__c14); \
- __REQUIRE_CONSTANT(__c15); \
- (v128_t)(__i8x16){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
- __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15}; \
- })
-
-#define wasm_i16x8_const(__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- __REQUIRE_CONSTANT(__c2); \
- __REQUIRE_CONSTANT(__c3); \
- __REQUIRE_CONSTANT(__c4); \
- __REQUIRE_CONSTANT(__c5); \
- __REQUIRE_CONSTANT(__c6); \
- __REQUIRE_CONSTANT(__c7); \
- (v128_t)(__i16x8){__c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7}; \
- })
-
-#define wasm_i32x4_const(__c0, __c1, __c2, __c3) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- __REQUIRE_CONSTANT(__c2); \
- __REQUIRE_CONSTANT(__c3); \
- (v128_t)(__i32x4){__c0, __c1, __c2, __c3}; \
- })
-
-#define wasm_f32x4_const(__c0, __c1, __c2, __c3) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- __REQUIRE_CONSTANT(__c2); \
- __REQUIRE_CONSTANT(__c3); \
- (v128_t)(__f32x4){__c0, __c1, __c2, __c3}; \
- })
-
-#define wasm_i64x2_const(__c0, __c1) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- (v128_t)(__i64x2){__c0, __c1}; \
- })
-
-#define wasm_f64x2_const(__c0, __c1) \
- __extension__({ \
- __REQUIRE_CONSTANT(__c0); \
- __REQUIRE_CONSTANT(__c1); \
- (v128_t)(__f64x2){__c0, __c1}; \
- })
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_const_splat(int8_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__i8x16){__c, __c, __c, __c, __c, __c, __c, __c,
+ __c, __c, __c, __c, __c, __c, __c, __c};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_const_splat(int16_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__i16x8){__c, __c, __c, __c, __c, __c, __c, __c};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_const_splat(int32_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__i32x4){__c, __c, __c, __c};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_const_splat(int64_t __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__i64x2){__c, __c};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_const_splat(float __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__f32x4){__c, __c, __c, __c};
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_const_splat(double __c)
+ __REQUIRE_CONSTANT(__c) {
+ return (v128_t)(__f64x2){__c, __c};
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_splat(int8_t __a) {
return (v128_t)(__i8x16){__a, __a, __a, __a, __a, __a, __a, __a,
__a, __a, __a, __a, __a, __a, __a, __a};
}
-#define wasm_i8x16_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_s_i8x16((__i8x16)(__a), __i))
+static __inline__ int8_t __DEFAULT_FN_ATTRS wasm_i8x16_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__i8x16)__a)[__i];
+}
-#define wasm_u8x16_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_u_i8x16((__u8x16)(__a), __i))
+static __inline__ uint8_t __DEFAULT_FN_ATTRS wasm_u8x16_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__u8x16)__a)[__i];
+}
-#define wasm_i8x16_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_i8x16((__i8x16)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_replace_lane(v128_t __a,
+ int __i,
+ int8_t __b)
+ __REQUIRE_CONSTANT(__i) {
+ __i8x16 __v = (__i8x16)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_splat(int16_t __a) {
return (v128_t)(__i16x8){__a, __a, __a, __a, __a, __a, __a, __a};
}
-#define wasm_i16x8_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_s_i16x8((__i16x8)(__a), __i))
+static __inline__ int16_t __DEFAULT_FN_ATTRS wasm_i16x8_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__i16x8)__a)[__i];
+}
-#define wasm_u16x8_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_u_i16x8((__u16x8)(__a), __i))
+static __inline__ uint16_t __DEFAULT_FN_ATTRS
+wasm_u16x8_extract_lane(v128_t __a, int __i) __REQUIRE_CONSTANT(__i) {
+ return ((__u16x8)__a)[__i];
+}
-#define wasm_i16x8_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_i16x8((__i16x8)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_replace_lane(v128_t __a,
+ int __i,
+ int16_t __b)
+ __REQUIRE_CONSTANT(__i) {
+ __i16x8 __v = (__i16x8)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_splat(int32_t __a) {
return (v128_t)(__i32x4){__a, __a, __a, __a};
}
-#define wasm_i32x4_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_i32x4((__i32x4)(__a), __i))
+static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__i32x4)__a)[__i];
+}
-#define wasm_i32x4_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_i32x4((__i32x4)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_replace_lane(v128_t __a,
+ int __i,
+ int32_t __b)
+ __REQUIRE_CONSTANT(__i) {
+ __i32x4 __v = (__i32x4)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_splat(int64_t __a) {
return (v128_t)(__i64x2){__a, __a};
}
-#define wasm_i64x2_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_i64x2((__i64x2)(__a), __i))
+static __inline__ int64_t __DEFAULT_FN_ATTRS wasm_i64x2_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__i64x2)__a)[__i];
+}
-#define wasm_i64x2_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_i64x2((__i64x2)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_replace_lane(v128_t __a,
+ int __i,
+ int64_t __b)
+ __REQUIRE_CONSTANT(__i) {
+ __i64x2 __v = (__i64x2)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_splat(float __a) {
return (v128_t)(__f32x4){__a, __a, __a, __a};
}
-#define wasm_f32x4_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_f32x4((__f32x4)(__a), __i))
+static __inline__ float __DEFAULT_FN_ATTRS wasm_f32x4_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__f32x4)__a)[__i];
+}
-#define wasm_f32x4_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_f32x4((__f32x4)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_replace_lane(v128_t __a,
+ int __i,
+ float __b)
+ __REQUIRE_CONSTANT(__i) {
+ __f32x4 __v = (__f32x4)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_splat(double __a) {
return (v128_t)(__f64x2){__a, __a};
}
-#define wasm_f64x2_extract_lane(__a, __i) \
- (__builtin_wasm_extract_lane_f64x2((__f64x2)(__a), __i))
+static __inline__ double __DEFAULT_FN_ATTRS wasm_f64x2_extract_lane(v128_t __a,
+ int __i)
+ __REQUIRE_CONSTANT(__i) {
+ return ((__f64x2)__a)[__i];
+}
-#define wasm_f64x2_replace_lane(__a, __i, __b) \
- ((v128_t)__builtin_wasm_replace_lane_f64x2((__f64x2)(__a), __i, __b))
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_replace_lane(v128_t __a,
+ int __i,
+ double __b)
+ __REQUIRE_CONSTANT(__i) {
+ __f64x2 __v = (__f64x2)__a;
+ __v[__i] = __b;
+ return (v128_t)__v;
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_eq(v128_t __a,
v128_t __b) {
@@ -487,6 +667,36 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_ge(v128_t __a,
return (v128_t)((__u32x4)__a >= (__u32x4)__b);
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_eq(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a == (__i64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ne(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a != (__i64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_lt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a < (__i64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_gt(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a > (__i64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_le(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a <= (__i64x2)__b);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_ge(v128_t __a,
+ v128_t __b) {
+ return (v128_t)((__i64x2)__a >= (__i64x2)__b);
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_eq(v128_t __a,
v128_t __b) {
return (v128_t)((__f32x4)__a == (__f32x4)__b);
@@ -571,6 +781,10 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_andnot(v128_t __a,
return __a & ~__b;
}
+static __inline__ bool __DEFAULT_FN_ATTRS wasm_v128_any_true(v128_t __a) {
+ return __builtin_wasm_any_true_v128((__i8x16)__a);
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v128_bitselect(v128_t __a,
v128_t __b,
v128_t __mask) {
@@ -586,14 +800,18 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_neg(v128_t __a) {
return (v128_t)(-(__u8x16)__a);
}
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_any_true(v128_t __a) {
- return __builtin_wasm_any_true_i8x16((__i8x16)__a);
-}
-
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i8x16_all_true(v128_t __a) {
return __builtin_wasm_all_true_i8x16((__i8x16)__a);
}
+static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i8x16_bitmask(v128_t __a) {
+ return __builtin_wasm_bitmask_i8x16((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_popcnt(v128_t __a) {
+ return (v128_t)__builtin_wasm_popcnt_i8x16((__i8x16)__a);
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_shl(v128_t __a,
int32_t __b) {
return (v128_t)((__i8x16)__a << __b);
@@ -614,16 +832,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add(v128_t __a,
return (v128_t)((__u8x16)__a + (__u8x16)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_s_i8x16((__i8x16)__a,
- (__i8x16)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_add_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_add_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_u_i8x16((__u8x16)__a,
- (__u8x16)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_add_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_add_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
@@ -631,16 +847,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub(v128_t __a,
return (v128_t)((__u8x16)__a - (__u8x16)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_s_i8x16((__i8x16)__a,
- (__i8x16)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_sub_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_sat_s_i8x16((__i8x16)__a, (__i8x16)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_u_i8x16((__u8x16)__a,
- (__u8x16)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u8x16_sub_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_sat_u_i8x16((__u8x16)__a, (__u8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_min(v128_t __a,
@@ -676,14 +890,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_neg(v128_t __a) {
return (v128_t)(-(__u16x8)__a);
}
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_any_true(v128_t __a) {
- return __builtin_wasm_any_true_i16x8((__i16x8)__a);
-}
-
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i16x8_all_true(v128_t __a) {
return __builtin_wasm_all_true_i16x8((__i16x8)__a);
}
+static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i16x8_bitmask(v128_t __a) {
+ return __builtin_wasm_bitmask_i16x8((__i16x8)__a);
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_shl(v128_t __a,
int32_t __b) {
return (v128_t)((__i16x8)__a << __b);
@@ -704,16 +918,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add(v128_t __a,
return (v128_t)((__u16x8)__a + (__u16x8)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_s_i16x8((__i16x8)__a,
- (__i16x8)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_add_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_add_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_add_saturate_u_i16x8((__u16x8)__a,
- (__u16x8)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_add_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_add_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
@@ -721,16 +933,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub(v128_t __a,
return (v128_t)((__i16x8)__a - (__i16x8)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_s_i16x8((__i16x8)__a,
- (__i16x8)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_sub_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_sub_saturate_u_i16x8((__u16x8)__a,
- (__u16x8)__b);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u16x8_sub_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_sub_sat_u_i16x8((__u16x8)__a, (__u16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_mul(v128_t __a,
@@ -771,14 +981,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_neg(v128_t __a) {
return (v128_t)(-(__u32x4)__a);
}
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_any_true(v128_t __a) {
- return __builtin_wasm_any_true_i32x4((__i32x4)__a);
-}
-
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i32x4_all_true(v128_t __a) {
return __builtin_wasm_all_true_i32x4((__i32x4)__a);
}
+static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i32x4_bitmask(v128_t __a) {
+ return __builtin_wasm_bitmask_i32x4((__i32x4)__a);
+}
+
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_shl(v128_t __a,
int32_t __b) {
return (v128_t)((__i32x4)__a << __b);
@@ -829,21 +1039,26 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_u32x4_max(v128_t __a,
return (v128_t)__builtin_wasm_max_u_i32x4((__u32x4)__a, (__u32x4)__b);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
- return (v128_t)(-(__u64x2)__a);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i32x4_dot_i16x8(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_dot_s_i32x4_i16x8((__i16x8)__a, (__i16x8)__b);
}
-#ifdef __wasm_unimplemented_simd128__
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_abs(v128_t __a) {
+ return (v128_t)__builtin_wasm_abs_i64x2((__i64x2)__a);
+}
-static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_any_true(v128_t __a) {
- return __builtin_wasm_any_true_i64x2((__i64x2)__a);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_neg(v128_t __a) {
+ return (v128_t)(-(__u64x2)__a);
}
static __inline__ bool __DEFAULT_FN_ATTRS wasm_i64x2_all_true(v128_t __a) {
return __builtin_wasm_all_true_i64x2((__i64x2)__a);
}
-#endif // __wasm_unimplemented_simd128__
+static __inline__ int32_t __DEFAULT_FN_ATTRS wasm_i64x2_bitmask(v128_t __a) {
+ return __builtin_wasm_bitmask_i64x2((__i64x2)__a);
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i64x2_shl(v128_t __a,
int32_t __b) {
@@ -887,23 +1102,21 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_sqrt(v128_t __a) {
return (v128_t)__builtin_wasm_sqrt_f32x4((__f32x4)__a);
}
-#ifdef __wasm_unimplemented_simd128__
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_ceil(v128_t __a) {
+ return (v128_t)__builtin_wasm_ceil_f32x4((__f32x4)__a);
+}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfma(v128_t __a,
- v128_t __b,
- v128_t __c) {
- return (v128_t)__builtin_wasm_qfma_f32x4((__f32x4)__a, (__f32x4)__b,
- (__f32x4)__c);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_floor(v128_t __a) {
+ return (v128_t)__builtin_wasm_floor_f32x4((__f32x4)__a);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_qfms(v128_t __a,
- v128_t __b,
- v128_t __c) {
- return (v128_t)__builtin_wasm_qfms_f32x4((__f32x4)__a, (__f32x4)__b,
- (__f32x4)__c);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_trunc(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_f32x4((__f32x4)__a);
}
-#endif // __wasm_unimplemented_simd128__
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_nearest(v128_t __a) {
+ return (v128_t)__builtin_wasm_nearest_f32x4((__f32x4)__a);
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_add(v128_t __a,
v128_t __b) {
@@ -937,12 +1150,14 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmin(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_pmin_f32x4((__f32x4)__a, (__f32x4)__b);
+ __i32x4 __mask = (__i32x4)((__f32x4)__b < (__f32x4)__a);
+ return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f32x4_pmax(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_pmax_f32x4((__f32x4)__a, (__f32x4)__b);
+ __i32x4 __mask = (__i32x4)((__f32x4)__a < (__f32x4)__b);
+ return (v128_t)((((__i32x4)__b) & __mask) | (((__i32x4)__a) & ~__mask));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_abs(v128_t __a) {
@@ -957,23 +1172,21 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_sqrt(v128_t __a) {
return (v128_t)__builtin_wasm_sqrt_f64x2((__f64x2)__a);
}
-#ifdef __wasm_unimplemented_simd128__
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_ceil(v128_t __a) {
+ return (v128_t)__builtin_wasm_ceil_f64x2((__f64x2)__a);
+}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfma(v128_t __a,
- v128_t __b,
- v128_t __c) {
- return (v128_t)__builtin_wasm_qfma_f64x2((__f64x2)__a, (__f64x2)__b,
- (__f64x2)__c);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_floor(v128_t __a) {
+ return (v128_t)__builtin_wasm_floor_f64x2((__f64x2)__a);
}
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_qfms(v128_t __a,
- v128_t __b,
- v128_t __c) {
- return (v128_t)__builtin_wasm_qfms_f64x2((__f64x2)__a, (__f64x2)__b,
- (__f64x2)__c);
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_trunc(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_f64x2((__f64x2)__a);
}
-#endif // __wasm_unimplemented_simd128__
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_nearest(v128_t __a) {
+ return (v128_t)__builtin_wasm_nearest_f64x2((__f64x2)__a);
+}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_add(v128_t __a,
v128_t __b) {
@@ -1007,21 +1220,23 @@ static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_max(v128_t __a,
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmin(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_pmin_f64x2((__f64x2)__a, (__f64x2)__b);
+ __i64x2 __mask = (__i64x2)((__f64x2)__b < (__f64x2)__a);
+ return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_f64x2_pmax(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_pmax_f64x2((__f64x2)__a, (__f64x2)__b);
+ __i64x2 __mask = (__i64x2)((__f64x2)__a < (__f64x2)__b);
+ return (v128_t)((((__i64x2)__b) & __mask) | (((__i64x2)__a) & ~__mask));
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+wasm_i32x4_trunc_sat_f32x4(v128_t __a) {
return (v128_t)__builtin_wasm_trunc_saturate_s_i32x4_f32x4((__f32x4)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+wasm_u32x4_trunc_sat_f32x4(v128_t __a) {
return (v128_t)__builtin_wasm_trunc_saturate_u_i32x4_f32x4((__f32x4)__a);
}
@@ -1035,38 +1250,71 @@ wasm_f32x4_convert_u32x4(v128_t __a) {
return (v128_t) __builtin_convertvector((__u32x4)__a, __f32x4);
}
-#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f64x2_convert_low_i32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__i32x2){__a[0], __a[1]}, __f64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f64x2_convert_low_u32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector((__u32x2){__a[0], __a[1]}, __f64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
+ return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f32x4_demote_f64x2_zero(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ __builtin_shufflevector((__f64x2)__a, (__f64x2){0, 0}, 0, 1, 2, 3),
+ __f32x4);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_f64x2_promote_low_f32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ (__f32x2){((__f32x4)__a)[0], ((__f32x4)__a)[1]}, __f64x2);
+}
+
+#define wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
__c7, __c8, __c9, __c10, __c11, __c12, __c13, \
__c14, __c15) \
- ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ ((v128_t)__builtin_wasm_shuffle_i8x16( \
(__i8x16)(__a), (__i8x16)(__b), __c0, __c1, __c2, __c3, __c4, __c5, \
__c6, __c7, __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15))
-#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+#define wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
__c7) \
- ((v128_t)__builtin_wasm_shuffle_v8x16( \
+ ((v128_t)__builtin_wasm_shuffle_i8x16( \
(__i8x16)(__a), (__i8x16)(__b), (__c0)*2, (__c0)*2 + 1, (__c1)*2, \
(__c1)*2 + 1, (__c2)*2, (__c2)*2 + 1, (__c3)*2, (__c3)*2 + 1, (__c4)*2, \
(__c4)*2 + 1, (__c5)*2, (__c5)*2 + 1, (__c6)*2, (__c6)*2 + 1, (__c7)*2, \
(__c7)*2 + 1))
-#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
- ((v128_t)__builtin_wasm_shuffle_v8x16( \
+#define wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
+ ((v128_t)__builtin_wasm_shuffle_i8x16( \
(__i8x16)(__a), (__i8x16)(__b), (__c0)*4, (__c0)*4 + 1, (__c0)*4 + 2, \
(__c0)*4 + 3, (__c1)*4, (__c1)*4 + 1, (__c1)*4 + 2, (__c1)*4 + 3, \
(__c2)*4, (__c2)*4 + 1, (__c2)*4 + 2, (__c2)*4 + 3, (__c3)*4, \
(__c3)*4 + 1, (__c3)*4 + 2, (__c3)*4 + 3))
-#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
- ((v128_t)__builtin_wasm_shuffle_v8x16( \
+#define wasm_i64x2_shuffle(__a, __b, __c0, __c1) \
+ ((v128_t)__builtin_wasm_shuffle_i8x16( \
(__i8x16)(__a), (__i8x16)(__b), (__c0)*8, (__c0)*8 + 1, (__c0)*8 + 2, \
(__c0)*8 + 3, (__c0)*8 + 4, (__c0)*8 + 5, (__c0)*8 + 6, (__c0)*8 + 7, \
(__c1)*8, (__c1)*8 + 1, (__c1)*8 + 2, (__c1)*8 + 3, (__c1)*8 + 4, \
(__c1)*8 + 5, (__c1)*8 + 6, (__c1)*8 + 7))
-static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_v8x16_swizzle(v128_t __a,
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i8x16_swizzle(v128_t __a,
v128_t __b) {
- return (v128_t)__builtin_wasm_swizzle_v8x16((__i8x16)__a, (__i8x16)__b);
+ return (v128_t)__builtin_wasm_swizzle_i8x16((__i8x16)__a, (__i8x16)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1077,8 +1325,8 @@ wasm_i8x16_narrow_i16x8(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u8x16_narrow_i16x8(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__u16x8)__a,
- (__u16x8)__b);
+ return (v128_t)__builtin_wasm_narrow_u_i8x16_i16x8((__i16x8)__a,
+ (__i16x8)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
@@ -1089,12 +1337,12 @@ wasm_i16x8_narrow_i32x4(v128_t __a, v128_t __b) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u16x8_narrow_i32x4(v128_t __a, v128_t __b) {
- return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__u32x4)__a,
- (__u32x4)__b);
+ return (v128_t)__builtin_wasm_narrow_u_i16x8_i32x4((__i32x4)__a,
+ (__i32x4)__b);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_low_i8x16(v128_t __a) {
+wasm_i16x8_extend_low_i8x16(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__i8x8){((__i8x16)__a)[0], ((__i8x16)__a)[1], ((__i8x16)__a)[2],
((__i8x16)__a)[3], ((__i8x16)__a)[4], ((__i8x16)__a)[5],
@@ -1103,7 +1351,7 @@ wasm_i16x8_widen_low_i8x16(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_high_i8x16(v128_t __a) {
+wasm_i16x8_extend_high_i8x16(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__i8x8){((__i8x16)__a)[8], ((__i8x16)__a)[9], ((__i8x16)__a)[10],
((__i8x16)__a)[11], ((__i8x16)__a)[12], ((__i8x16)__a)[13],
@@ -1112,7 +1360,7 @@ wasm_i16x8_widen_high_i8x16(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_low_u8x16(v128_t __a) {
+wasm_u16x8_extend_low_u8x16(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__u8x8){((__u8x16)__a)[0], ((__u8x16)__a)[1], ((__u8x16)__a)[2],
((__u8x16)__a)[3], ((__u8x16)__a)[4], ((__u8x16)__a)[5],
@@ -1121,7 +1369,7 @@ wasm_i16x8_widen_low_u8x16(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i16x8_widen_high_u8x16(v128_t __a) {
+wasm_u16x8_extend_high_u8x16(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__u8x8){((__u8x16)__a)[8], ((__u8x16)__a)[9], ((__u8x16)__a)[10],
((__u8x16)__a)[11], ((__u8x16)__a)[12], ((__u8x16)__a)[13],
@@ -1130,7 +1378,7 @@ wasm_i16x8_widen_high_u8x16(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_low_i16x8(v128_t __a) {
+wasm_i32x4_extend_low_i16x8(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__i16x4){((__i16x8)__a)[0], ((__i16x8)__a)[1], ((__i16x8)__a)[2],
((__i16x8)__a)[3]},
@@ -1138,7 +1386,7 @@ wasm_i32x4_widen_low_i16x8(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_high_i16x8(v128_t __a) {
+wasm_i32x4_extend_high_i16x8(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__i16x4){((__i16x8)__a)[4], ((__i16x8)__a)[5], ((__i16x8)__a)[6],
((__i16x8)__a)[7]},
@@ -1146,7 +1394,7 @@ wasm_i32x4_widen_high_i16x8(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_low_u16x8(v128_t __a) {
+wasm_u32x4_extend_low_u16x8(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__u16x4){((__u16x8)__a)[0], ((__u16x8)__a)[1], ((__u16x8)__a)[2],
((__u16x8)__a)[3]},
@@ -1154,14 +1402,333 @@ wasm_i32x4_widen_low_u16x8(v128_t __a) {
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
-wasm_i32x4_widen_high_u16x8(v128_t __a) {
+wasm_u32x4_extend_high_u16x8(v128_t __a) {
return (v128_t) __builtin_convertvector(
(__u16x4){((__u16x8)__a)[4], ((__u16x8)__a)[5], ((__u16x8)__a)[6],
((__u16x8)__a)[7]},
__u32x4);
}
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_extend_low_i32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ (__i32x2){((__i32x4)__a)[0], ((__i32x4)__a)[1]}, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_extend_high_i32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ (__i32x2){((__i32x4)__a)[2], ((__i32x4)__a)[3]}, __i64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_extend_low_u32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ (__u32x2){((__u32x4)__a)[0], ((__u32x4)__a)[1]}, __u64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_extend_high_u32x4(v128_t __a) {
+ return (v128_t) __builtin_convertvector(
+ (__u32x2){((__u32x4)__a)[2], ((__u32x4)__a)[3]}, __u64x2);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_extadd_pairwise_i8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_s_i16x8((__i8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_extadd_pairwise_u8x16(v128_t __a) {
+ return (v128_t)__builtin_wasm_extadd_pairwise_i8x16_u_i16x8((__u8x16)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_extadd_pairwise_i16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_s_i32x4((__i16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_extadd_pairwise_u16x8(v128_t __a) {
+ return (v128_t)__builtin_wasm_extadd_pairwise_i16x8_u_i32x4((__u16x8)__a);
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_extmul_low_i8x16(v128_t __a, v128_t __b) {
+ return (v128_t)((__i16x8)wasm_i16x8_extend_low_i8x16(__a) *
+ (__i16x8)wasm_i16x8_extend_low_i8x16(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i16x8_extmul_high_i8x16(v128_t __a, v128_t __b) {
+ return (v128_t)((__i16x8)wasm_i16x8_extend_high_i8x16(__a) *
+ (__i16x8)wasm_i16x8_extend_high_i8x16(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_extmul_low_u8x16(v128_t __a, v128_t __b) {
+ return (v128_t)((__u16x8)wasm_u16x8_extend_low_u8x16(__a) *
+ (__u16x8)wasm_u16x8_extend_low_u8x16(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u16x8_extmul_high_u8x16(v128_t __a, v128_t __b) {
+ return (v128_t)((__u16x8)wasm_u16x8_extend_high_u8x16(__a) *
+ (__u16x8)wasm_u16x8_extend_high_u8x16(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_extmul_low_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)((__i32x4)wasm_i32x4_extend_low_i16x8(__a) *
+ (__i32x4)wasm_i32x4_extend_low_i16x8(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i32x4_extmul_high_i16x8(v128_t __a, v128_t __b) {
+ return (v128_t)((__i32x4)wasm_i32x4_extend_high_i16x8(__a) *
+ (__i32x4)wasm_i32x4_extend_high_i16x8(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_extmul_low_u16x8(v128_t __a, v128_t __b) {
+ return (v128_t)((__u32x4)wasm_u32x4_extend_low_u16x8(__a) *
+ (__u32x4)wasm_u32x4_extend_low_u16x8(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u32x4_extmul_high_u16x8(v128_t __a, v128_t __b) {
+ return (v128_t)((__u32x4)wasm_u32x4_extend_high_u16x8(__a) *
+ (__u32x4)wasm_u32x4_extend_high_u16x8(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_extmul_low_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)((__i64x2)wasm_i64x2_extend_low_i32x4(__a) *
+ (__i64x2)wasm_i64x2_extend_low_i32x4(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_i64x2_extmul_high_i32x4(v128_t __a, v128_t __b) {
+ return (v128_t)((__i64x2)wasm_i64x2_extend_high_i32x4(__a) *
+ (__i64x2)wasm_i64x2_extend_high_i32x4(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_extmul_low_u32x4(v128_t __a, v128_t __b) {
+ return (v128_t)((__u64x2)wasm_u64x2_extend_low_u32x4(__a) *
+ (__u64x2)wasm_u64x2_extend_low_u32x4(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS
+wasm_u64x2_extmul_high_u32x4(v128_t __a, v128_t __b) {
+ return (v128_t)((__u64x2)wasm_u64x2_extend_high_u32x4(__a) *
+ (__u64x2)wasm_u64x2_extend_high_u32x4(__b));
+}
+
+static __inline__ v128_t __DEFAULT_FN_ATTRS wasm_i16x8_q15mulr_sat(v128_t __a,
+ v128_t __b) {
+ return (v128_t)__builtin_wasm_q15mulr_sat_s_i16x8((__i16x8)__a, (__i16x8)__b);
+}
+
+// Old intrinsic names supported to ease transitioning to the standard names. Do
+// not use these; they will be removed in the near future.
+
+#define __DEPRECATED_FN_ATTRS(__replacement) \
+ __DEFAULT_FN_ATTRS __attribute__( \
+ (deprecated("use " __replacement " instead", __replacement)))
+
+#define __WASM_STR(X) #X
+
+#ifdef __DEPRECATED
+#define __DEPRECATED_WASM_MACRO(__name, __replacement) \
+ _Pragma(__WASM_STR(GCC warning( \
+ "'" __name "' is deprecated: use '" __replacement "' instead")))
+#else
+#define __DEPRECATED_WASM_MACRO(__name, __replacement)
+#endif
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load8_splat")
+wasm_v8x16_load_splat(const void *__mem) {
+ return wasm_v128_load8_splat(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load16_splat")
+wasm_v16x8_load_splat(const void *__mem) {
+ return wasm_v128_load16_splat(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load32_splat")
+wasm_v32x4_load_splat(const void *__mem) {
+ return wasm_v128_load32_splat(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_v128_load64_splat")
+wasm_v64x2_load_splat(const void *__mem) {
+ return wasm_v128_load64_splat(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_load8x8")
+wasm_i16x8_load_8x8(const void *__mem) {
+ return wasm_i16x8_load8x8(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_load8x8")
+wasm_u16x8_load_8x8(const void *__mem) {
+ return wasm_u16x8_load8x8(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_load16x4")
+wasm_i32x4_load_16x4(const void *__mem) {
+ return wasm_i32x4_load16x4(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_load16x4")
+wasm_u32x4_load_16x4(const void *__mem) {
+ return wasm_u32x4_load16x4(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i64x2_load32x2")
+wasm_i64x2_load_32x2(const void *__mem) {
+ return wasm_i64x2_load32x2(__mem);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u64x2_load32x2")
+wasm_u64x2_load_32x2(const void *__mem) {
+ return wasm_u64x2_load32x2(__mem);
+}
+
+#define wasm_v8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7, __c8, __c9, __c10, __c11, __c12, __c13, \
+ __c14, __c15) \
+ __DEPRECATED_WASM_MACRO("wasm_v8x16_shuffle", "wasm_i8x16_shuffle") \
+ wasm_i8x16_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7, \
+ __c8, __c9, __c10, __c11, __c12, __c13, __c14, __c15)
+
+#define wasm_v16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, \
+ __c7) \
+ __DEPRECATED_WASM_MACRO("wasm_v16x8_shuffle", "wasm_i16x8_shuffle") \
+ wasm_i16x8_shuffle(__a, __b, __c0, __c1, __c2, __c3, __c4, __c5, __c6, __c7)
+
+#define wasm_v32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3) \
+ __DEPRECATED_WASM_MACRO("wasm_v32x4_shuffle", "wasm_i32x4_shuffle") \
+ wasm_i32x4_shuffle(__a, __b, __c0, __c1, __c2, __c3)
+
+#define wasm_v64x2_shuffle(__a, __b, __c0, __c1) \
+ __DEPRECATED_WASM_MACRO("wasm_v64x2_shuffle", "wasm_i64x2_shuffle") \
+ wasm_i64x2_shuffle(__a, __b, __c0, __c1)
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_swizzle")
+wasm_v8x16_swizzle(v128_t __a, v128_t __b) {
+ return wasm_i8x16_swizzle(__a, __b);
+}
+
+static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
+wasm_i8x16_any_true(v128_t __a) {
+ return wasm_v128_any_true(__a);
+}
+
+static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
+wasm_i16x8_any_true(v128_t __a) {
+ return wasm_v128_any_true(__a);
+}
+
+static __inline__ bool __DEPRECATED_FN_ATTRS("wasm_v128_any_true")
+wasm_i32x4_any_true(v128_t __a) {
+ return wasm_v128_any_true(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_add_sat")
+wasm_i8x16_add_saturate(v128_t __a, v128_t __b) {
+ return wasm_i8x16_add_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_add_sat")
+wasm_u8x16_add_saturate(v128_t __a, v128_t __b) {
+ return wasm_u8x16_add_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i8x16_sub_sat")
+wasm_i8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return wasm_i8x16_sub_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u8x16_sub_sat")
+wasm_u8x16_sub_saturate(v128_t __a, v128_t __b) {
+ return wasm_u8x16_sub_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_add_sat")
+wasm_i16x8_add_saturate(v128_t __a, v128_t __b) {
+ return wasm_i16x8_add_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_add_sat")
+wasm_u16x8_add_saturate(v128_t __a, v128_t __b) {
+ return wasm_u16x8_add_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_sub_sat")
+wasm_i16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return wasm_i16x8_sub_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_sub_sat")
+wasm_u16x8_sub_saturate(v128_t __a, v128_t __b) {
+ return wasm_u16x8_sub_sat(__a, __b);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_low_i8x16")
+wasm_i16x8_widen_low_i8x16(v128_t __a) {
+ return wasm_i16x8_extend_low_i8x16(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i16x8_extend_high_i8x16")
+wasm_i16x8_widen_high_i8x16(v128_t __a) {
+ return wasm_i16x8_extend_high_i8x16(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_low_u8x16")
+wasm_i16x8_widen_low_u8x16(v128_t __a) {
+ return wasm_u16x8_extend_low_u8x16(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u16x8_extend_high_u8x16")
+wasm_i16x8_widen_high_u8x16(v128_t __a) {
+ return wasm_u16x8_extend_high_u8x16(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_low_i16x8")
+wasm_i32x4_widen_low_i16x8(v128_t __a) {
+ return wasm_i32x4_extend_low_i16x8(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_extend_high_i16x8")
+wasm_i32x4_widen_high_i16x8(v128_t __a) {
+ return wasm_i32x4_extend_high_i16x8(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_low_u16x8")
+wasm_i32x4_widen_low_u16x8(v128_t __a) {
+ return wasm_u32x4_extend_low_u16x8(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_extend_high_u16x8")
+wasm_i32x4_widen_high_u16x8(v128_t __a) {
+ return wasm_u32x4_extend_high_u16x8(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_i32x4_trunc_sat_f32x4")
+wasm_i32x4_trunc_saturate_f32x4(v128_t __a) {
+ return wasm_i32x4_trunc_sat_f32x4(__a);
+}
+
+static __inline__ v128_t __DEPRECATED_FN_ATTRS("wasm_u32x4_trunc_sat_f32x4")
+wasm_u32x4_trunc_saturate_f32x4(v128_t __a) {
+ return wasm_u32x4_trunc_sat_f32x4(__a);
+}
+
// Undefine helper macros
#undef __DEFAULT_FN_ATTRS
+#undef __DEPRECATED_FN_ATTRS
#endif // __WASM_SIMD128_H
diff --git a/clang/lib/Index/FileIndexRecord.cpp b/clang/lib/Index/FileIndexRecord.cpp
index df18a9aed8b7..d392a2bedeba 100644
--- a/clang/lib/Index/FileIndexRecord.cpp
+++ b/clang/lib/Index/FileIndexRecord.cpp
@@ -17,42 +17,63 @@
using namespace clang;
using namespace clang::index;
+ArrayRef<DeclOccurrence>
+FileIndexRecord::getDeclOccurrencesSortedByOffset() const {
+ if (!IsSorted) {
+ llvm::stable_sort(Decls,
+ [](const DeclOccurrence &A, const DeclOccurrence &B) {
+ return A.Offset < B.Offset;
+ });
+ IsSorted = true;
+ }
+ return Decls;
+}
+
void FileIndexRecord::addDeclOccurence(SymbolRoleSet Roles, unsigned Offset,
const Decl *D,
ArrayRef<SymbolRelation> Relations) {
assert(D->isCanonicalDecl() &&
"Occurrences should be associated with their canonical decl");
+ IsSorted = false;
+ Decls.emplace_back(Roles, Offset, D, Relations);
+}
- auto IsNextOccurence = [&]() -> bool {
- if (Decls.empty())
- return true;
- auto &Last = Decls.back();
- return Last.Offset < Offset;
- };
-
- if (IsNextOccurence()) {
- Decls.emplace_back(Roles, Offset, D, Relations);
- return;
- }
+void FileIndexRecord::addMacroOccurence(SymbolRoleSet Roles, unsigned Offset,
+ const IdentifierInfo *Name,
+ const MacroInfo *MI) {
+ IsSorted = false;
+ Decls.emplace_back(Roles, Offset, Name, MI);
+}
- DeclOccurrence NewInfo(Roles, Offset, D, Relations);
- // We keep Decls in order as we need to access them in this order in all cases.
- auto It = llvm::upper_bound(Decls, NewInfo);
- Decls.insert(It, std::move(NewInfo));
+void FileIndexRecord::removeHeaderGuardMacros() {
+ auto It =
+ std::remove_if(Decls.begin(), Decls.end(), [](const DeclOccurrence &D) {
+ if (const auto *MI = D.DeclOrMacro.dyn_cast<const MacroInfo *>())
+ return MI->isUsedForHeaderGuard();
+ return false;
+ });
+ Decls.erase(It, Decls.end());
}
-void FileIndexRecord::print(llvm::raw_ostream &OS) const {
+void FileIndexRecord::print(llvm::raw_ostream &OS, SourceManager &SM) const {
OS << "DECLS BEGIN ---\n";
for (auto &DclInfo : Decls) {
- const Decl *D = DclInfo.Dcl;
- SourceManager &SM = D->getASTContext().getSourceManager();
- SourceLocation Loc = SM.getFileLoc(D->getLocation());
- PresumedLoc PLoc = SM.getPresumedLoc(Loc);
- OS << llvm::sys::path::filename(PLoc.getFilename()) << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
-
- if (auto ND = dyn_cast<NamedDecl>(D)) {
- OS << ' ' << ND->getDeclName();
+ if (const auto *D = DclInfo.DeclOrMacro.dyn_cast<const Decl *>()) {
+ SourceLocation Loc = SM.getFileLoc(D->getLocation());
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ OS << llvm::sys::path::filename(PLoc.getFilename()) << ':'
+ << PLoc.getLine() << ':' << PLoc.getColumn();
+
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ OS << ' ' << ND->getDeclName();
+ }
+ } else {
+ const auto *MI = DclInfo.DeclOrMacro.get<const MacroInfo *>();
+ SourceLocation Loc = SM.getFileLoc(MI->getDefinitionLoc());
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ OS << llvm::sys::path::filename(PLoc.getFilename()) << ':'
+ << PLoc.getLine() << ':' << PLoc.getColumn();
+ OS << ' ' << DclInfo.MacroName->getName();
}
OS << '\n';
diff --git a/clang/lib/Index/FileIndexRecord.h b/clang/lib/Index/FileIndexRecord.h
index 37bf96a71964..621d5b78977d 100644
--- a/clang/lib/Index/FileIndexRecord.h
+++ b/clang/lib/Index/FileIndexRecord.h
@@ -27,14 +27,13 @@ class FileIndexRecord {
private:
FileID FID;
bool IsSystem;
- std::vector<DeclOccurrence> Decls;
+ mutable bool IsSorted = false;
+ mutable std::vector<DeclOccurrence> Decls;
public:
FileIndexRecord(FileID FID, bool IsSystem) : FID(FID), IsSystem(IsSystem) {}
- ArrayRef<DeclOccurrence> getDeclOccurrencesSortedByOffset() const {
- return Decls;
- }
+ ArrayRef<DeclOccurrence> getDeclOccurrencesSortedByOffset() const;
FileID getFileID() const { return FID; }
bool isSystem() const { return IsSystem; }
@@ -48,7 +47,21 @@ public:
/// \param Relations the set of symbols related to this occurrence.
void addDeclOccurence(SymbolRoleSet Roles, unsigned Offset, const Decl *D,
ArrayRef<SymbolRelation> Relations);
- void print(llvm::raw_ostream &OS) const;
+
+ /// Adds an occurrence of the given macro at the supplied \c Offset.
+ ///
+ /// \param Roles the roles the occurrence fulfills in this position.
+ /// \param Offset the offset in the file of this occurrence.
+ /// \param Name the name of the macro.
+ /// \param MI the canonical declaration this is an occurrence of.
+ void addMacroOccurence(SymbolRoleSet Roles, unsigned Offset,
+ const IdentifierInfo *Name, const MacroInfo *MI);
+
+ /// Remove any macro occurrences for header guards. When preprocessing, this
+ /// will only be accurate after HandleEndOfFile.
+ void removeHeaderGuardMacros();
+
+ void print(llvm::raw_ostream &OS, SourceManager &SM) const;
};
} // end namespace index
diff --git a/clang/lib/Index/IndexBody.cpp b/clang/lib/Index/IndexBody.cpp
index e4944fd0fc3b..fa35f749d028 100644
--- a/clang/lib/Index/IndexBody.cpp
+++ b/clang/lib/Index/IndexBody.cpp
@@ -286,9 +286,6 @@ public:
}
bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- if (E->isClassReceiver())
- IndexCtx.handleReference(E->getClassReceiver(), E->getReceiverLocation(),
- Parent, ParentDC);
if (E->isExplicitProperty()) {
SmallVector<SymbolRelation, 2> Relations;
SymbolRoleSet Roles = getRolesForRef(E, Relations);
@@ -466,6 +463,15 @@ public:
}
return true;
}
+
+ bool VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ SmallVector<SymbolRelation, 4> Relations;
+ SymbolRoleSet Roles = getRolesForRef(E, Relations);
+ for (auto *D : E->decls())
+ IndexCtx.handleReference(D, E->getNameLoc(), Parent, ParentDC, Roles,
+ Relations, E);
+ return true;
+ }
};
} // anonymous namespace
diff --git a/clang/lib/Index/IndexDecl.cpp b/clang/lib/Index/IndexDecl.cpp
index 2ba323e63575..00adb3644ff2 100644
--- a/clang/lib/Index/IndexDecl.cpp
+++ b/clang/lib/Index/IndexDecl.cpp
@@ -759,7 +759,7 @@ bool IndexingContext::indexDeclContext(const DeclContext *DC) {
}
bool IndexingContext::indexTopLevelDecl(const Decl *D) {
- if (D->getLocation().isInvalid())
+ if (!D || D->getLocation().isInvalid())
return true;
if (isa<ObjCMethodDecl>(D))
diff --git a/clang/lib/Index/IndexSymbol.cpp b/clang/lib/Index/IndexSymbol.cpp
index 0d2e557cdd36..68e457de5265 100644
--- a/clang/lib/Index/IndexSymbol.cpp
+++ b/clang/lib/Index/IndexSymbol.cpp
@@ -329,6 +329,11 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
Info.Kind = SymbolKind::Using;
Info.Lang = SymbolLanguage::CXX;
break;
+ case Decl::UsingEnum:
+ Info.Kind = SymbolKind::Using;
+ Info.Lang = SymbolLanguage::CXX;
+ Info.SubKind = SymbolSubKind::UsingEnum;
+ break;
case Decl::Binding:
Info.Kind = SymbolKind::Variable;
Info.Lang = SymbolLanguage::CXX;
@@ -542,6 +547,8 @@ StringRef index::getSymbolSubKindString(SymbolSubKind K) {
case SymbolSubKind::AccessorSetter: return "acc-set";
case SymbolSubKind::UsingTypename: return "using-typename";
case SymbolSubKind::UsingValue: return "using-value";
+ case SymbolSubKind::UsingEnum:
+ return "using-enum";
}
llvm_unreachable("invalid symbol subkind");
}
diff --git a/clang/lib/Index/IndexingAction.cpp b/clang/lib/Index/IndexingAction.cpp
index 4986303cac47..c9fcaad31128 100644
--- a/clang/lib/Index/IndexingAction.cpp
+++ b/clang/lib/Index/IndexingAction.cpp
@@ -51,6 +51,51 @@ public:
MacroNameTok.getLocation(),
*MD.getMacroInfo());
}
+
+ void Defined(const Token &MacroNameTok, const MacroDefinition &MD,
+ SourceRange Range) override {
+ if (!MD.getMacroInfo()) // Ignore nonexistent macro.
+ return;
+ // Note: this is defined(M), not #define M
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
+ void Ifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ if (!MD.getMacroInfo()) // Ignore non-existent macro.
+ return;
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
+ void Ifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ if (!MD.getMacroInfo()) // Ignore nonexistent macro.
+ return;
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
+
+ using PPCallbacks::Elifdef;
+ using PPCallbacks::Elifndef;
+ void Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ if (!MD.getMacroInfo()) // Ignore non-existent macro.
+ return;
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
+ void Elifndef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) override {
+ if (!MD.getMacroInfo()) // Ignore non-existent macro.
+ return;
+ IndexCtx->handleMacroReference(*MacroNameTok.getIdentifierInfo(),
+ MacroNameTok.getLocation(),
+ *MD.getMacroInfo());
+ }
};
class IndexASTConsumer final : public ASTConsumer {
@@ -162,23 +207,54 @@ static void indexTranslationUnit(ASTUnit &Unit, IndexingContext &IndexCtx) {
Unit.visitLocalTopLevelDecls(&IndexCtx, topLevelDeclVisitor);
}
-static void indexPreprocessorMacros(const Preprocessor &PP,
+static void indexPreprocessorMacro(const IdentifierInfo *II,
+ const MacroInfo *MI,
+ MacroDirective::Kind DirectiveKind,
+ SourceLocation Loc,
+ IndexDataConsumer &DataConsumer) {
+ // When using modules, it may happen that we find #undef of a macro that
+ // was defined in another module. In such case, MI may be nullptr, since
+ // we only look for macro definitions in the current TU. In that case,
+ // there is nothing to index.
+ if (!MI)
+ return;
+
+ // Skip implicit visibility change.
+ if (DirectiveKind == MacroDirective::MD_Visibility)
+ return;
+
+ auto Role = DirectiveKind == MacroDirective::MD_Define
+ ? SymbolRole::Definition
+ : SymbolRole::Undefinition;
+ DataConsumer.handleMacroOccurrence(II, MI, static_cast<unsigned>(Role), Loc);
+}
+
+static void indexPreprocessorMacros(Preprocessor &PP,
IndexDataConsumer &DataConsumer) {
- for (const auto &M : PP.macros())
- if (MacroDirective *MD = M.second.getLatest()) {
- auto *MI = MD->getMacroInfo();
- // When using modules, it may happen that we find #undef of a macro that
- // was defined in another module. In such case, MI may be nullptr, since
- // we only look for macro definitions in the current TU. In that case,
- // there is nothing to index.
- if (!MI)
- continue;
-
- DataConsumer.handleMacroOccurrence(
- M.first, MD->getMacroInfo(),
- static_cast<unsigned>(index::SymbolRole::Definition),
- MD->getLocation());
+ for (const auto &M : PP.macros()) {
+ for (auto *MD = M.second.getLatest(); MD; MD = MD->getPrevious()) {
+ indexPreprocessorMacro(M.first, MD->getMacroInfo(), MD->getKind(),
+ MD->getLocation(), DataConsumer);
}
+ }
+}
+
+static void indexPreprocessorModuleMacros(Preprocessor &PP,
+ serialization::ModuleFile &Mod,
+ IndexDataConsumer &DataConsumer) {
+ for (const auto &M : PP.macros()) {
+ if (M.second.getLatest() == nullptr) {
+ for (auto *MM : PP.getLeafModuleMacros(M.first)) {
+ auto *OwningMod = MM->getOwningModule();
+ if (OwningMod && OwningMod->getASTFile() == Mod.File) {
+ if (auto *MI = MM->getMacroInfo()) {
+ indexPreprocessorMacro(M.first, MI, MacroDirective::MD_Define,
+ MI->getDefinitionLoc(), DataConsumer);
+ }
+ }
+ }
+ }
+ }
}
void index::indexASTUnit(ASTUnit &Unit, IndexDataConsumer &DataConsumer,
@@ -225,8 +301,9 @@ void index::indexModuleFile(serialization::ModuleFile &Mod, ASTReader &Reader,
IndexCtx.setASTContext(Ctx);
DataConsumer.initialize(Ctx);
- if (Opts.IndexMacrosInPreprocessor)
- indexPreprocessorMacros(Reader.getPreprocessor(), DataConsumer);
+ if (Opts.IndexMacrosInPreprocessor) {
+ indexPreprocessorModuleMacros(Reader.getPreprocessor(), Mod, DataConsumer);
+ }
for (const Decl *D : Reader.getModuleFileLevelDecls(Mod)) {
IndexCtx.indexTopLevelDecl(D);
diff --git a/clang/lib/Index/IndexingContext.cpp b/clang/lib/Index/IndexingContext.cpp
index 784a6008575b..8a962a055bac 100644
--- a/clang/lib/Index/IndexingContext.cpp
+++ b/clang/lib/Index/IndexingContext.cpp
@@ -457,6 +457,8 @@ bool IndexingContext::handleDeclOccurrence(const Decl *D, SourceLocation Loc,
void IndexingContext::handleMacroDefined(const IdentifierInfo &Name,
SourceLocation Loc,
const MacroInfo &MI) {
+ if (!shouldIndexMacroOccurrence(/*IsRef=*/false, Loc))
+ return;
SymbolRoleSet Roles = (unsigned)SymbolRole::Definition;
DataConsumer.handleMacroOccurrence(&Name, &MI, Roles, Loc);
}
@@ -464,6 +466,8 @@ void IndexingContext::handleMacroDefined(const IdentifierInfo &Name,
void IndexingContext::handleMacroUndefined(const IdentifierInfo &Name,
SourceLocation Loc,
const MacroInfo &MI) {
+ if (!shouldIndexMacroOccurrence(/*IsRef=*/false, Loc))
+ return;
SymbolRoleSet Roles = (unsigned)SymbolRole::Undefinition;
DataConsumer.handleMacroOccurrence(&Name, &MI, Roles, Loc);
}
@@ -471,6 +475,37 @@ void IndexingContext::handleMacroUndefined(const IdentifierInfo &Name,
void IndexingContext::handleMacroReference(const IdentifierInfo &Name,
SourceLocation Loc,
const MacroInfo &MI) {
+ if (!shouldIndexMacroOccurrence(/*IsRef=*/true, Loc))
+ return;
SymbolRoleSet Roles = (unsigned)SymbolRole::Reference;
DataConsumer.handleMacroOccurrence(&Name, &MI, Roles, Loc);
}
+
+bool IndexingContext::shouldIndexMacroOccurrence(bool IsRef,
+ SourceLocation Loc) {
+ if (!IndexOpts.IndexMacros)
+ return false;
+
+ switch (IndexOpts.SystemSymbolFilter) {
+ case IndexingOptions::SystemSymbolFilterKind::None:
+ break;
+ case IndexingOptions::SystemSymbolFilterKind::DeclarationsOnly:
+ if (!IsRef)
+ return true;
+ break;
+ case IndexingOptions::SystemSymbolFilterKind::All:
+ return true;
+ }
+
+ SourceManager &SM = Ctx->getSourceManager();
+ FileID FID = SM.getFileID(SM.getFileLoc(Loc));
+ if (FID.isInvalid())
+ return false;
+
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &SEntry = SM.getSLocEntry(FID, &Invalid);
+ if (Invalid || !SEntry.isFile())
+ return false;
+
+ return SEntry.getFile().getFileCharacteristic() == SrcMgr::C_User;
+}
diff --git a/clang/lib/Index/IndexingContext.h b/clang/lib/Index/IndexingContext.h
index 3136878c080c..626d81f003e9 100644
--- a/clang/lib/Index/IndexingContext.h
+++ b/clang/lib/Index/IndexingContext.h
@@ -124,6 +124,8 @@ public:
private:
bool shouldIgnoreIfImplicit(const Decl *D);
+ bool shouldIndexMacroOccurrence(bool IsRef, SourceLocation Loc);
+
bool handleDeclOccurrence(const Decl *D, SourceLocation Loc,
bool IsRef, const Decl *Parent,
SymbolRoleSet Roles,
diff --git a/clang/lib/Index/USRGeneration.cpp b/clang/lib/Index/USRGeneration.cpp
index abaeb1a4232f..6db763ca6f2b 100644
--- a/clang/lib/Index/USRGeneration.cpp
+++ b/clang/lib/Index/USRGeneration.cpp
@@ -732,6 +732,8 @@ void USRGenerator::VisitType(QualType T) {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::ShortAccum:
case BuiltinType::Accum:
case BuiltinType::LongAccum:
@@ -1101,15 +1103,14 @@ bool clang::index::generateUSRForMacro(const MacroDefinitionRecord *MD,
bool clang::index::generateUSRForMacro(StringRef MacroName, SourceLocation Loc,
const SourceManager &SM,
SmallVectorImpl<char> &Buf) {
- // Don't generate USRs for things with invalid locations.
- if (MacroName.empty() || Loc.isInvalid())
+ if (MacroName.empty())
return true;
llvm::raw_svector_ostream Out(Buf);
// Assume that system headers are sane. Don't put source location
// information into the USR if the macro comes from a system header.
- bool ShouldGenerateLocation = !SM.isInSystemHeader(Loc);
+ bool ShouldGenerateLocation = Loc.isValid() && !SM.isInSystemHeader(Loc);
Out << getUSRSpacePrefix();
if (ShouldGenerateLocation)
diff --git a/clang/lib/Interpreter/IncrementalExecutor.cpp b/clang/lib/Interpreter/IncrementalExecutor.cpp
new file mode 100644
index 000000000000..9a368d9122bc
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -0,0 +1,63 @@
+//===--- IncrementalExecutor.cpp - Incremental Execution --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class which performs incremental code execution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IncrementalExecutor.h"
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/TargetSelect.h"
+
+namespace clang {
+
+IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
+ llvm::Error &Err,
+ const llvm::Triple &Triple)
+ : TSCtx(TSC) {
+ using namespace llvm::orc;
+ llvm::ErrorAsOutParameter EAO(&Err);
+
+ auto JTMB = JITTargetMachineBuilder(Triple);
+ if (auto JitOrErr = LLJITBuilder().setJITTargetMachineBuilder(JTMB).create())
+ Jit = std::move(*JitOrErr);
+ else {
+ Err = JitOrErr.takeError();
+ return;
+ }
+
+ const char Pref = Jit->getDataLayout().getGlobalPrefix();
+ // Discover symbols from the process as a fallback.
+ if (auto PSGOrErr = DynamicLibrarySearchGenerator::GetForCurrentProcess(Pref))
+ Jit->getMainJITDylib().addGenerator(std::move(*PSGOrErr));
+ else {
+ Err = PSGOrErr.takeError();
+ return;
+ }
+}
+
+IncrementalExecutor::~IncrementalExecutor() {}
+
+llvm::Error IncrementalExecutor::addModule(std::unique_ptr<llvm::Module> M) {
+ return Jit->addIRModule(llvm::orc::ThreadSafeModule(std::move(M), TSCtx));
+}
+
+llvm::Error IncrementalExecutor::runCtors() const {
+ return Jit->initialize(Jit->getMainJITDylib());
+}
+
+} // end namespace clang
diff --git a/clang/lib/Interpreter/IncrementalExecutor.h b/clang/lib/Interpreter/IncrementalExecutor.h
new file mode 100644
index 000000000000..b4c6ddec1047
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalExecutor.h
@@ -0,0 +1,48 @@
+//===--- IncrementalExecutor.h - Incremental Execution ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class which performs incremental code execution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
+#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+
+#include <memory>
+
+namespace llvm {
+class Error;
+class Module;
+namespace orc {
+class LLJIT;
+class ThreadSafeContext;
+} // namespace orc
+} // namespace llvm
+
+namespace clang {
+class IncrementalExecutor {
+ using CtorDtorIterator = llvm::orc::CtorDtorIterator;
+ std::unique_ptr<llvm::orc::LLJIT> Jit;
+ llvm::orc::ThreadSafeContext &TSCtx;
+
+public:
+ IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, llvm::Error &Err,
+ const llvm::Triple &Triple);
+ ~IncrementalExecutor();
+
+ llvm::Error addModule(std::unique_ptr<llvm::Module> M);
+ llvm::Error runCtors() const;
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
diff --git a/clang/lib/Interpreter/IncrementalParser.cpp b/clang/lib/Interpreter/IncrementalParser.cpp
new file mode 100644
index 000000000000..897e2cd1aaed
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalParser.cpp
@@ -0,0 +1,292 @@
+//===--------- IncrementalParser.cpp - Incremental Compilation -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class which performs incremental code compilation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "IncrementalParser.h"
+
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/FrontendTool/Utils.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/Sema.h"
+
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Timer.h"
+
+#include <sstream>
+
+namespace clang {
+
+/// A custom action enabling the incremental processing functionality.
+///
+/// The usual \p FrontendAction expects one call to ExecuteAction and once it
+/// sees a call to \p EndSourceFile it deletes some of the important objects
+/// such as \p Preprocessor and \p Sema assuming no further input will come.
+///
+/// \p IncrementalAction ensures it keep its underlying action's objects alive
+/// as long as the \p IncrementalParser needs them.
+///
+class IncrementalAction : public WrapperFrontendAction {
+private:
+ bool IsTerminating = false;
+
+public:
+ IncrementalAction(CompilerInstance &CI, llvm::LLVMContext &LLVMCtx,
+ llvm::Error &Err)
+ : WrapperFrontendAction([&]() {
+ llvm::ErrorAsOutParameter EAO(&Err);
+ std::unique_ptr<FrontendAction> Act;
+ switch (CI.getFrontendOpts().ProgramAction) {
+ default:
+ Err = llvm::createStringError(
+ std::errc::state_not_recoverable,
+ "Driver initialization failed. "
+ "Incremental mode for action %d is not supported",
+ CI.getFrontendOpts().ProgramAction);
+ return Act;
+ case frontend::ASTDump:
+ LLVM_FALLTHROUGH;
+ case frontend::ASTPrint:
+ LLVM_FALLTHROUGH;
+ case frontend::ParseSyntaxOnly:
+ Act = CreateFrontendAction(CI);
+ break;
+ case frontend::EmitAssembly:
+ LLVM_FALLTHROUGH;
+ case frontend::EmitObj:
+ LLVM_FALLTHROUGH;
+ case frontend::EmitLLVMOnly:
+ Act.reset(new EmitLLVMOnlyAction(&LLVMCtx));
+ break;
+ }
+ return Act;
+ }()) {}
+ FrontendAction *getWrapped() const { return WrappedAction.get(); }
+ TranslationUnitKind getTranslationUnitKind() override {
+ return TU_Incremental;
+ }
+ void ExecuteAction() override {
+ CompilerInstance &CI = getCompilerInstance();
+ assert(CI.hasPreprocessor() && "No PP!");
+
+ // FIXME: Move the truncation aspect of this into Sema, we delayed this till
+ // here so the source manager would be initialized.
+ if (hasCodeCompletionSupport() &&
+ !CI.getFrontendOpts().CodeCompletionAt.FileName.empty())
+ CI.createCodeCompletionConsumer();
+
+ // Use a code completion consumer?
+ CodeCompleteConsumer *CompletionConsumer = nullptr;
+ if (CI.hasCodeCompletionConsumer())
+ CompletionConsumer = &CI.getCodeCompletionConsumer();
+
+ Preprocessor &PP = CI.getPreprocessor();
+ PP.enableIncrementalProcessing();
+ PP.EnterMainSourceFile();
+
+ if (!CI.hasSema())
+ CI.createSema(getTranslationUnitKind(), CompletionConsumer);
+ }
+
+ // Do not terminate after processing the input. This allows us to keep various
+ // clang objects alive and to incrementally grow the current TU.
+ void EndSourceFile() override {
+ // The WrappedAction can be nullptr if we issued an error in the ctor.
+ if (IsTerminating && getWrapped())
+ WrapperFrontendAction::EndSourceFile();
+ }
+
+ void FinalizeAction() {
+ assert(!IsTerminating && "Already finalized!");
+ IsTerminating = true;
+ EndSourceFile();
+ }
+};
+
+IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+ llvm::LLVMContext &LLVMCtx,
+ llvm::Error &Err)
+ : CI(std::move(Instance)) {
+ llvm::ErrorAsOutParameter EAO(&Err);
+ Act = std::make_unique<IncrementalAction>(*CI, LLVMCtx, Err);
+ if (Err)
+ return;
+ CI->ExecuteAction(*Act);
+ Consumer = &CI->getASTConsumer();
+ P.reset(
+ new Parser(CI->getPreprocessor(), CI->getSema(), /*SkipBodies=*/false));
+ P->Initialize();
+}
+
+IncrementalParser::~IncrementalParser() { Act->FinalizeAction(); }
+
+llvm::Expected<PartialTranslationUnit &>
+IncrementalParser::ParseOrWrapTopLevelDecl() {
+ // Recover resources if we crash before exiting this method.
+ Sema &S = CI->getSema();
+ llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(&S);
+ Sema::GlobalEagerInstantiationScope GlobalInstantiations(S, /*Enabled=*/true);
+ Sema::LocalEagerInstantiationScope LocalInstantiations(S);
+
+ PTUs.emplace_back(PartialTranslationUnit());
+ PartialTranslationUnit &LastPTU = PTUs.back();
+ // Add a new PTU.
+ ASTContext &C = S.getASTContext();
+ C.addTranslationUnitDecl();
+ LastPTU.TUPart = C.getTranslationUnitDecl();
+
+ // Skip previous eof due to last incremental input.
+ if (P->getCurToken().is(tok::eof)) {
+ P->ConsumeToken();
+ // FIXME: Clang does not call ExitScope on finalizing the regular TU, we
+ // might want to do that around HandleEndOfTranslationUnit.
+ P->ExitScope();
+ S.CurContext = nullptr;
+ // Start a new PTU.
+ P->EnterScope(Scope::DeclScope);
+ S.ActOnTranslationUnitScope(P->getCurScope());
+ }
+
+ Parser::DeclGroupPtrTy ADecl;
+ for (bool AtEOF = P->ParseFirstTopLevelDecl(ADecl); !AtEOF;
+ AtEOF = P->ParseTopLevelDecl(ADecl)) {
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl && !Consumer->HandleTopLevelDecl(ADecl.get()))
+ return llvm::make_error<llvm::StringError>("Parsing failed. "
+ "The consumer rejected a decl",
+ std::error_code());
+ }
+
+ DiagnosticsEngine &Diags = getCI()->getDiagnostics();
+ if (Diags.hasErrorOccurred()) {
+ TranslationUnitDecl *MostRecentTU = C.getTranslationUnitDecl();
+ TranslationUnitDecl *PreviousTU = MostRecentTU->getPreviousDecl();
+ assert(PreviousTU && "Must have a TU from the ASTContext initialization!");
+ TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
+ assert(FirstTU);
+ FirstTU->RedeclLink.setLatest(PreviousTU);
+ C.TUDecl = PreviousTU;
+ S.TUScope->setEntity(PreviousTU);
+
+ // Clean up the lookup table
+ if (StoredDeclsMap *Map = PreviousTU->getLookupPtr()) {
+ for (auto I = Map->begin(); I != Map->end(); ++I) {
+ StoredDeclsList &List = I->second;
+ DeclContextLookupResult R = List.getLookupResult();
+ for (NamedDecl *D : R)
+ if (D->getTranslationUnitDecl() == MostRecentTU)
+ List.remove(D);
+ if (List.isNull())
+ Map->erase(I);
+ }
+ }
+
+ // FIXME: Do not reset the pragma handlers.
+ Diags.Reset();
+ return llvm::make_error<llvm::StringError>("Parsing failed.",
+ std::error_code());
+ }
+
+ // Process any TopLevelDecls generated by #pragma weak.
+ for (Decl *D : S.WeakTopLevelDecls()) {
+ DeclGroupRef DGR(D);
+ Consumer->HandleTopLevelDecl(DGR);
+ }
+
+ LocalInstantiations.perform();
+ GlobalInstantiations.perform();
+
+ Consumer->HandleTranslationUnit(C);
+
+ return LastPTU;
+}
+
+static CodeGenerator *getCodeGen(FrontendAction *Act) {
+ IncrementalAction *IncrAct = static_cast<IncrementalAction *>(Act);
+ FrontendAction *WrappedAct = IncrAct->getWrapped();
+ if (!WrappedAct->hasIRSupport())
+ return nullptr;
+ return static_cast<CodeGenAction *>(WrappedAct)->getCodeGenerator();
+}
+
+llvm::Expected<PartialTranslationUnit &>
+IncrementalParser::Parse(llvm::StringRef input) {
+ Preprocessor &PP = CI->getPreprocessor();
+ assert(PP.isIncrementalProcessingEnabled() && "Not in incremental mode!?");
+
+ std::ostringstream SourceName;
+ SourceName << "input_line_" << InputCount++;
+
+ // Create an uninitialized memory buffer, copy code in and append "\n"
+ size_t InputSize = input.size(); // don't include trailing 0
+ // MemBuffer size should *not* include terminating zero
+ std::unique_ptr<llvm::MemoryBuffer> MB(
+ llvm::WritableMemoryBuffer::getNewUninitMemBuffer(InputSize + 1,
+ SourceName.str()));
+ char *MBStart = const_cast<char *>(MB->getBufferStart());
+ memcpy(MBStart, input.data(), InputSize);
+ MBStart[InputSize] = '\n';
+
+ SourceManager &SM = CI->getSourceManager();
+
+ // FIXME: Create SourceLocation, which will allow clang to order the overload
+ // candidates for example
+ SourceLocation NewLoc = SM.getLocForStartOfFile(SM.getMainFileID());
+
+ // Create FileID for the current buffer.
+ FileID FID = SM.createFileID(std::move(MB), SrcMgr::C_User, /*LoadedID=*/0,
+ /*LoadedOffset=*/0, NewLoc);
+
+ // NewLoc only used for diags.
+ if (PP.EnterSourceFile(FID, /*DirLookup=*/0, NewLoc))
+ return llvm::make_error<llvm::StringError>("Parsing failed. "
+ "Cannot enter source file.",
+ std::error_code());
+
+ auto PTU = ParseOrWrapTopLevelDecl();
+ if (!PTU)
+ return PTU.takeError();
+
+ if (PP.getLangOpts().DelayedTemplateParsing) {
+ // Microsoft-specific:
+ // Late parsed templates can leave unswallowed "macro"-like tokens.
+ // They will seriously confuse the Parser when entering the next
+ // source file. So lex until we are EOF.
+ Token Tok;
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+ }
+
+ Token AssertTok;
+ PP.Lex(AssertTok);
+ assert(AssertTok.is(tok::eof) &&
+ "Lexer must be EOF when starting incremental parse!");
+
+ if (CodeGenerator *CG = getCodeGen(Act.get())) {
+ std::unique_ptr<llvm::Module> M(CG->ReleaseModule());
+ CG->StartModule("incr_module_" + std::to_string(PTUs.size()),
+ M->getContext());
+
+ PTU->TheModule = std::move(M);
+ }
+
+ return PTU;
+}
+} // end namespace clang
diff --git a/clang/lib/Interpreter/IncrementalParser.h b/clang/lib/Interpreter/IncrementalParser.h
new file mode 100644
index 000000000000..aa8142cbe493
--- /dev/null
+++ b/clang/lib/Interpreter/IncrementalParser.h
@@ -0,0 +1,77 @@
+//===--- IncrementalParser.h - Incremental Compilation ----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the class which performs incremental code compilation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
+#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
+
+#include "clang/Interpreter/PartialTranslationUnit.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+
+#include <list>
+#include <memory>
+namespace llvm {
+class LLVMContext;
+}
+
+namespace clang {
+class ASTConsumer;
+class CompilerInstance;
+class CodeGenerator;
+class DeclGroupRef;
+class FrontendAction;
+class IncrementalAction;
+class Parser;
+
+/// Provides support for incremental compilation. Keeps track of the state
+/// changes between the subsequent incremental input.
+///
+class IncrementalParser {
+ /// Long-lived, incremental parsing action.
+ std::unique_ptr<IncrementalAction> Act;
+
+ /// Compiler instance performing the incremental compilation.
+ std::unique_ptr<CompilerInstance> CI;
+
+ /// Parser.
+ std::unique_ptr<Parser> P;
+
+ /// Consumer to process the produced top level decls. Owned by Act.
+ ASTConsumer *Consumer = nullptr;
+
+ /// Counts the number of direct user input lines that have been parsed.
+ unsigned InputCount = 0;
+
+ /// List containing every information about every incrementally parsed piece
+ /// of code.
+ std::list<PartialTranslationUnit> PTUs;
+
+public:
+ IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
+ llvm::LLVMContext &LLVMCtx, llvm::Error &Err);
+ ~IncrementalParser();
+
+ const CompilerInstance *getCI() const { return CI.get(); }
+
+ /// Parses incremental input by creating an in-memory file.
+ ///\returns a \c PartialTranslationUnit which holds information about the
+ /// \c TranslationUnitDecl and \c llvm::Module corresponding to the input.
+ llvm::Expected<PartialTranslationUnit &> Parse(llvm::StringRef Input);
+
+private:
+ llvm::Expected<PartialTranslationUnit &> ParseOrWrapTopLevelDecl();
+};
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_INTERPRETER_INCREMENTALPARSER_H
diff --git a/clang/lib/Interpreter/Interpreter.cpp b/clang/lib/Interpreter/Interpreter.cpp
new file mode 100644
index 000000000000..937504f34739
--- /dev/null
+++ b/clang/lib/Interpreter/Interpreter.cpp
@@ -0,0 +1,225 @@
+//===------ Interpreter.cpp - Incremental Compilation and Execution -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the component which performs incremental code
+// compilation and execution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Interpreter/Interpreter.h"
+
+#include "IncrementalExecutor.h"
+#include "IncrementalParser.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/PreprocessorOptions.h"
+
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Host.h"
+
+using namespace clang;
+
+// FIXME: Figure out how to unify with namespace init_convenience from
+// tools/clang-import-test/clang-import-test.cpp and
+// examples/clang-interpreter/main.cpp
+namespace {
+/// Retrieves the clang CC1 specific flags out of the compilation's jobs.
+/// \returns NULL on error.
+static llvm::Expected<const llvm::opt::ArgStringList *>
+GetCC1Arguments(DiagnosticsEngine *Diagnostics,
+ driver::Compilation *Compilation) {
+ // We expect to get back exactly one Command job, if we didn't something
+ // failed. Extract that job from the Compilation.
+ const driver::JobList &Jobs = Compilation->getJobs();
+ if (!Jobs.size() || !isa<driver::Command>(*Jobs.begin()))
+ return llvm::createStringError(std::errc::state_not_recoverable,
+ "Driver initialization failed. "
+ "Unable to create a driver job");
+
+ // The one job we find should be to invoke clang again.
+ const driver::Command *Cmd = cast<driver::Command>(&(*Jobs.begin()));
+ if (llvm::StringRef(Cmd->getCreator().getName()) != "clang")
+ return llvm::createStringError(std::errc::state_not_recoverable,
+ "Driver initialization failed");
+
+ return &Cmd->getArguments();
+}
+
+static llvm::Expected<std::unique_ptr<CompilerInstance>>
+CreateCI(const llvm::opt::ArgStringList &Argv) {
+ std::unique_ptr<CompilerInstance> Clang(new CompilerInstance());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+
+ // Register the support for object-file-wrapped Clang modules.
+ // FIXME: Clang should register these container operations automatically.
+ auto PCHOps = Clang->getPCHContainerOperations();
+ PCHOps->registerWriter(std::make_unique<ObjectFilePCHContainerWriter>());
+ PCHOps->registerReader(std::make_unique<ObjectFilePCHContainerReader>());
+
+ // Buffer diagnostics from argument parsing so that we can output them using
+ // a well formed diagnostic object.
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
+ bool Success = CompilerInvocation::CreateFromArgs(
+ Clang->getInvocation(), llvm::makeArrayRef(Argv.begin(), Argv.size()),
+ Diags);
+
+ // Infer the builtin include path if unspecified.
+ if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
+ Clang->getHeaderSearchOpts().ResourceDir.empty())
+ Clang->getHeaderSearchOpts().ResourceDir =
+ CompilerInvocation::GetResourcesPath(Argv[0], nullptr);
+
+ // Create the actual diagnostics engine.
+ Clang->createDiagnostics();
+ if (!Clang->hasDiagnostics())
+ return llvm::createStringError(std::errc::state_not_recoverable,
+ "Initialization failed. "
+ "Unable to create diagnostics engine");
+
+ DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
+ if (!Success)
+ return llvm::createStringError(std::errc::state_not_recoverable,
+ "Initialization failed. "
+ "Unable to flush diagnostics");
+
+ // FIXME: Merge with CompilerInstance::ExecuteAction.
+ llvm::MemoryBuffer *MB = llvm::MemoryBuffer::getMemBuffer("").release();
+ Clang->getPreprocessorOpts().addRemappedFile("<<< inputs >>>", MB);
+
+ Clang->setTarget(TargetInfo::CreateTargetInfo(
+ Clang->getDiagnostics(), Clang->getInvocation().TargetOpts));
+ if (!Clang->hasTarget())
+ return llvm::createStringError(std::errc::state_not_recoverable,
+ "Initialization failed. "
+ "Target is missing");
+
+ Clang->getTarget().adjust(Clang->getDiagnostics(), Clang->getLangOpts());
+
+ return std::move(Clang);
+}
+
+} // anonymous namespace
+
+llvm::Expected<std::unique_ptr<CompilerInstance>>
+IncrementalCompilerBuilder::create(std::vector<const char *> &ClangArgv) {
+
+ // If we don't know ClangArgv0 or the address of main() at this point, try
+ // to guess it anyway (it's possible on some platforms).
+ std::string MainExecutableName =
+ llvm::sys::fs::getMainExecutable(nullptr, nullptr);
+
+ ClangArgv.insert(ClangArgv.begin(), MainExecutableName.c_str());
+
+ // Prepending -c to force the driver to do something if no action was
+ // specified. By prepending we allow users to override the default
+ // action and use other actions in incremental mode.
+ // FIXME: Print proper driver diagnostics if the driver flags are wrong.
+ ClangArgv.insert(ClangArgv.begin() + 1, "-c");
+
+ if (!llvm::is_contained(ClangArgv, " -x")) {
+ // We do C++ by default; append right after argv[0] if no "-x" given
+ ClangArgv.push_back("-x");
+ ClangArgv.push_back("c++");
+ }
+
+ // Put a dummy C++ file on to ensure there's at least one compile job for the
+ // driver to construct.
+ ClangArgv.push_back("<<< inputs >>>");
+
+ CompilerInvocation Invocation;
+ // Buffer diagnostics from argument parsing so that we can output them using a
+ // well formed diagnostic object.
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
+ DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
+ unsigned MissingArgIndex, MissingArgCount;
+ const llvm::opt::OptTable &Opts = driver::getDriverOptTable();
+ llvm::opt::InputArgList ParsedArgs =
+ Opts.ParseArgs(ArrayRef<const char *>(ClangArgv).slice(1),
+ MissingArgIndex, MissingArgCount);
+ ParseDiagnosticArgs(*DiagOpts, ParsedArgs, &Diags);
+
+ driver::Driver Driver(/*MainBinaryName=*/ClangArgv[0],
+ llvm::sys::getProcessTriple(), Diags);
+ Driver.setCheckInputsExist(false); // the input comes from mem buffers
+ llvm::ArrayRef<const char *> RF = llvm::makeArrayRef(ClangArgv);
+ std::unique_ptr<driver::Compilation> Compilation(Driver.BuildCompilation(RF));
+
+ if (Compilation->getArgs().hasArg(driver::options::OPT_v))
+ Compilation->getJobs().Print(llvm::errs(), "\n", /*Quote=*/false);
+
+ auto ErrOrCC1Args = GetCC1Arguments(&Diags, Compilation.get());
+ if (auto Err = ErrOrCC1Args.takeError())
+ return std::move(Err);
+
+ return CreateCI(**ErrOrCC1Args);
+}
+
+Interpreter::Interpreter(std::unique_ptr<CompilerInstance> CI,
+ llvm::Error &Err) {
+ llvm::ErrorAsOutParameter EAO(&Err);
+ auto LLVMCtx = std::make_unique<llvm::LLVMContext>();
+ TSCtx = std::make_unique<llvm::orc::ThreadSafeContext>(std::move(LLVMCtx));
+ IncrParser = std::make_unique<IncrementalParser>(std::move(CI),
+ *TSCtx->getContext(), Err);
+}
+
+Interpreter::~Interpreter() {}
+
+llvm::Expected<std::unique_ptr<Interpreter>>
+Interpreter::create(std::unique_ptr<CompilerInstance> CI) {
+ llvm::Error Err = llvm::Error::success();
+ auto Interp =
+ std::unique_ptr<Interpreter>(new Interpreter(std::move(CI), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(Interp);
+}
+
+const CompilerInstance *Interpreter::getCompilerInstance() const {
+ return IncrParser->getCI();
+}
+
+llvm::Expected<PartialTranslationUnit &>
+Interpreter::Parse(llvm::StringRef Code) {
+ return IncrParser->Parse(Code);
+}
+
+llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
+ assert(T.TheModule);
+ if (!IncrExecutor) {
+ const llvm::Triple &Triple =
+ getCompilerInstance()->getASTContext().getTargetInfo().getTriple();
+ llvm::Error Err = llvm::Error::success();
+ IncrExecutor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, Triple);
+
+ if (Err)
+ return Err;
+ }
+ // FIXME: Add a callback to retain the llvm::Module once the JIT is done.
+ if (auto Err = IncrExecutor->addModule(std::move(T.TheModule)))
+ return Err;
+
+ if (auto Err = IncrExecutor->runCtors())
+ return Err;
+
+ return llvm::Error::success();
+}
diff --git a/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
index cdb4a79fa11a..cfca167f8bf1 100644
--- a/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
+++ b/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
@@ -846,6 +846,8 @@ bool Minimizer::lexPPLine(const char *&First, const char *const End) {
.Case("ifdef", pp_ifdef)
.Case("ifndef", pp_ifndef)
.Case("elif", pp_elif)
+ .Case("elifdef", pp_elifdef)
+ .Case("elifndef", pp_elifndef)
.Case("else", pp_else)
.Case("endif", pp_endif)
.Case("pragma", pp_pragma_import)
@@ -904,7 +906,7 @@ bool clang::minimize_source_to_dependency_directives::computeSkippedRanges(
struct Directive {
enum DirectiveKind {
If, // if/ifdef/ifndef
- Else // elif,else
+ Else // elif/elifdef/elifndef, else
};
int Offset;
DirectiveKind Kind;
@@ -919,6 +921,8 @@ bool clang::minimize_source_to_dependency_directives::computeSkippedRanges(
break;
case pp_elif:
+ case pp_elifdef:
+ case pp_elifndef:
case pp_else: {
if (Offsets.empty())
return true;
diff --git a/clang/lib/Lex/HeaderMap.cpp b/clang/lib/Lex/HeaderMap.cpp
index d44ef29c05d1..ae5e6b221953 100644
--- a/clang/lib/Lex/HeaderMap.cpp
+++ b/clang/lib/Lex/HeaderMap.cpp
@@ -224,7 +224,7 @@ StringRef HeaderMapImpl::lookupFilename(StringRef Filename,
Optional<StringRef> Key = getString(B.Key);
if (LLVM_UNLIKELY(!Key))
continue;
- if (!Filename.equals_lower(*Key))
+ if (!Filename.equals_insensitive(*Key))
continue;
// If so, we have a match in the hash table. Construct the destination
@@ -240,3 +240,32 @@ StringRef HeaderMapImpl::lookupFilename(StringRef Filename,
return StringRef(DestPath.begin(), DestPath.size());
}
}
+
+StringRef HeaderMapImpl::reverseLookupFilename(StringRef DestPath) const {
+ if (!ReverseMap.empty())
+ return ReverseMap.lookup(DestPath);
+
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+ StringRef RetKey;
+ for (unsigned i = 0; i != NumBuckets; ++i) {
+ HMapBucket B = getBucket(i);
+ if (B.Key == HMAP_EmptyBucketKey)
+ continue;
+
+ Optional<StringRef> Key = getString(B.Key);
+ Optional<StringRef> Prefix = getString(B.Prefix);
+ Optional<StringRef> Suffix = getString(B.Suffix);
+ if (LLVM_LIKELY(Key && Prefix && Suffix)) {
+ SmallVector<char, 1024> Buf;
+ Buf.append(Prefix->begin(), Prefix->end());
+ Buf.append(Suffix->begin(), Suffix->end());
+ StringRef Value(Buf.begin(), Buf.size());
+ ReverseMap[Value] = *Key;
+
+ if (DestPath == Value)
+ RetKey = *Key;
+ }
+ }
+ return RetKey;
+}
diff --git a/clang/lib/Lex/HeaderSearch.cpp b/clang/lib/Lex/HeaderSearch.cpp
index 99c92e91aad5..d5adbcf62cbc 100644
--- a/clang/lib/Lex/HeaderSearch.cpp
+++ b/clang/lib/Lex/HeaderSearch.cpp
@@ -727,7 +727,7 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
if (!isAngled && !FoundByHeaderMap) {
SmallString<128> NewInclude("<");
if (IsIncludeeInFramework) {
- NewInclude += StringRef(ToFramework).drop_back(10); // drop .framework
+ NewInclude += ToFramework.str().drop_back(10); // drop .framework
NewInclude += "/";
}
NewInclude += IncludeFilename;
@@ -1834,7 +1834,7 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
};
for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- // FIXME: Support this search within frameworks and header maps.
+ // FIXME: Support this search within frameworks.
if (!SearchDirs[I].isNormalDir())
continue;
@@ -1848,6 +1848,19 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
if (!BestPrefixLength && CheckDir(path::parent_path(MainFile)) && IsSystem)
*IsSystem = false;
+ // Try resolving resulting filename via reverse search in header maps,
+ // key from header name is user prefered name for the include file.
+ StringRef Filename = File.drop_front(BestPrefixLength);
+ for (unsigned I = 0; I != SearchDirs.size(); ++I) {
+ if (!SearchDirs[I].isHeaderMap())
+ continue;
- return path::convert_to_slash(File.drop_front(BestPrefixLength));
+ StringRef SpelledFilename =
+ SearchDirs[I].getHeaderMap()->reverseLookupFilename(Filename);
+ if (!SpelledFilename.empty()) {
+ Filename = SpelledFilename;
+ break;
+ }
+ }
+ return path::convert_to_slash(Filename);
}
diff --git a/clang/lib/Lex/Lexer.cpp b/clang/lib/Lex/Lexer.cpp
index 34732b659771..3034af231e0e 100644
--- a/clang/lib/Lex/Lexer.cpp
+++ b/clang/lib/Lex/Lexer.cpp
@@ -588,7 +588,7 @@ PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
// Create a lexer starting at the beginning of the file. Note that we use a
// "fake" file source location at offset 1 so that the lexer will track our
// position within the file.
- const unsigned StartOffset = 1;
+ const SourceLocation::UIntTy StartOffset = 1;
SourceLocation FileLoc = SourceLocation::getFromRawEncoding(StartOffset);
Lexer TheLexer(FileLoc, LangOpts, Buffer.begin(), Buffer.begin(),
Buffer.end());
@@ -682,6 +682,8 @@ PreambleBounds Lexer::ComputePreamble(StringRef Buffer,
.Case("ifdef", PDK_Skipped)
.Case("ifndef", PDK_Skipped)
.Case("elif", PDK_Skipped)
+ .Case("elifdef", PDK_Skipped)
+ .Case("elifndef", PDK_Skipped)
.Case("else", PDK_Skipped)
.Case("endif", PDK_Skipped)
.Default(PDK_Unknown);
@@ -875,6 +877,14 @@ static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
return CharSourceRange::getCharRange(Begin, End);
}
+// Assumes that `Loc` is in an expansion.
+static bool isInExpansionTokenRange(const SourceLocation Loc,
+ const SourceManager &SM) {
+ return SM.getSLocEntry(SM.getFileID(Loc))
+ .getExpansion()
+ .isExpansionTokenRange();
+}
+
CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts) {
@@ -894,10 +904,12 @@ CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
}
if (Begin.isFileID() && End.isMacroID()) {
- if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
- &End)) ||
- (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
- &End)))
+ if (Range.isTokenRange()) {
+ if (!isAtEndOfMacroExpansion(End, SM, LangOpts, &End))
+ return {};
+ // Use the *original* end, not the expanded one in `End`.
+ Range.setTokenRange(isInExpansionTokenRange(Range.getEnd(), SM));
+ } else if (!isAtStartOfMacroExpansion(End, SM, LangOpts, &End))
return {};
Range.setEnd(End);
return makeRangeFromFileLocs(Range, SM, LangOpts);
@@ -912,6 +924,9 @@ CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
&MacroEnd)))) {
Range.setBegin(MacroBegin);
Range.setEnd(MacroEnd);
+ // Use the *original* `End`, not the expanded one in `MacroEnd`.
+ if (Range.isTokenRange())
+ Range.setTokenRange(isInExpansionTokenRange(End, SM));
return makeRangeFromFileLocs(Range, SM, LangOpts);
}
@@ -1788,12 +1803,14 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we have a digit separator, continue.
- if (C == '\'' && getLangOpts().CPlusPlus14) {
+ if (C == '\'' && (getLangOpts().CPlusPlus14 || getLangOpts().C2x)) {
unsigned NextSize;
char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
if (isIdentifierBody(Next)) {
if (!isLexingRawMode())
- Diag(CurPtr, diag::warn_cxx11_compat_digit_separator);
+ Diag(CurPtr, getLangOpts().CPlusPlus
+ ? diag::warn_cxx11_compat_digit_separator
+ : diag::warn_c2x_compat_digit_separator);
CurPtr = ConsumeChar(CurPtr, Size, Result);
CurPtr = ConsumeChar(CurPtr, NextSize, Result);
return LexNumericConstant(Result, CurPtr);
@@ -2059,7 +2076,7 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
if (C == '\\')
C = getAndAdvanceChar(CurPtr, Result);
- if (C == '\n' || C == '\r' || // Newline.
+ if (isVerticalWhitespace(C) || // Newline.
(C == 0 && (CurPtr - 1 == BufferEnd))) { // End of file.
// If the filename is unterminated, then it must just be a lone <
// character. Return this as such.
@@ -2441,56 +2458,70 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
Lexer *L) {
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
- // Back up off the newline.
- --CurPtr;
+ // Position of the first trigraph in the ending sequence.
+ const char *TrigraphPos = 0;
+ // Position of the first whitespace after a '\' in the ending sequence.
+ const char *SpacePos = 0;
- // If this is a two-character newline sequence, skip the other character.
- if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
- // \n\n or \r\r -> not escaped newline.
- if (CurPtr[0] == CurPtr[1])
- return false;
- // \n\r or \r\n -> skip the newline.
+ while (true) {
+ // Back up off the newline.
--CurPtr;
- }
- // If we have horizontal whitespace, skip over it. We allow whitespace
- // between the slash and newline.
- bool HasSpace = false;
- while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
- --CurPtr;
- HasSpace = true;
- }
+ // If this is a two-character newline sequence, skip the other character.
+ if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
+ // \n\n or \r\r -> not escaped newline.
+ if (CurPtr[0] == CurPtr[1])
+ return false;
+ // \n\r or \r\n -> skip the newline.
+ --CurPtr;
+ }
- // If we have a slash, we know this is an escaped newline.
- if (*CurPtr == '\\') {
- if (CurPtr[-1] != '*') return false;
- } else {
- // It isn't a slash, is it the ?? / trigraph?
- if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
- CurPtr[-3] != '*')
+ // If we have horizontal whitespace, skip over it. We allow whitespace
+ // between the slash and newline.
+ while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
+ SpacePos = CurPtr;
+ --CurPtr;
+ }
+
+ // If we have a slash, this is an escaped newline.
+ if (*CurPtr == '\\') {
+ --CurPtr;
+ } else if (CurPtr[0] == '/' && CurPtr[-1] == '?' && CurPtr[-2] == '?') {
+ // This is a trigraph encoding of a slash.
+ TrigraphPos = CurPtr - 2;
+ CurPtr -= 3;
+ } else {
return false;
+ }
- // This is the trigraph ending the comment. Emit a stern warning!
- CurPtr -= 2;
+ // If the character preceding the escaped newline is a '*', then after line
+ // splicing we have a '*/' ending the comment.
+ if (*CurPtr == '*')
+ break;
+ if (*CurPtr != '\n' && *CurPtr != '\r')
+ return false;
+ }
+
+ if (TrigraphPos) {
// If no trigraphs are enabled, warn that we ignored this trigraph and
// ignore this * character.
if (!L->getLangOpts().Trigraphs) {
if (!L->isLexingRawMode())
- L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
+ L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
return false;
}
if (!L->isLexingRawMode())
- L->Diag(CurPtr, diag::trigraph_ends_block_comment);
+ L->Diag(TrigraphPos, diag::trigraph_ends_block_comment);
}
// Warn about having an escaped newline between the */ characters.
if (!L->isLexingRawMode())
- L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
+ L->Diag(CurPtr + 1, diag::escaped_newline_block_comment_end);
// If there was space between the backslash and newline, warn about it.
- if (HasSpace && !L->isLexingRawMode())
- L->Diag(CurPtr, diag::backslash_newline_space);
+ if (SpacePos && !L->isLexingRawMode())
+ L->Diag(SpacePos, diag::backslash_newline_space);
return true;
}
@@ -2762,6 +2793,11 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
if (PP->isRecordingPreamble() && PP->isInPrimaryFile()) {
PP->setRecordedPreambleConditionalStack(ConditionalStack);
+ // If the preamble cuts off the end of a header guard, consider it guarded.
+ // The guard is valid for the preamble content itself, and for tools the
+ // most useful answer is "yes, this file has a header guard".
+ if (!ConditionalStack.empty())
+ MIOpt.ExitTopLevelConditional();
ConditionalStack.clear();
}
@@ -3206,10 +3242,10 @@ LexNextToken:
const char *CurPtr = BufferPtr;
// Small amounts of horizontal whitespace is very common between tokens.
- if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
- ++CurPtr;
- while ((*CurPtr == ' ') || (*CurPtr == '\t'))
+ if (isHorizontalWhitespace(*CurPtr)) {
+ do {
++CurPtr;
+ } while (isHorizontalWhitespace(*CurPtr));
// If we are keeping whitespace and other tokens, just return what we just
// skipped. The next lexer invocation will return the token after the
diff --git a/clang/lib/Lex/LiteralSupport.cpp b/clang/lib/Lex/LiteralSupport.cpp
index 6c3cdbdf6492..85d826ce9c6f 100644
--- a/clang/lib/Lex/LiteralSupport.cpp
+++ b/clang/lib/Lex/LiteralSupport.cpp
@@ -546,6 +546,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isLong = false;
isUnsigned = false;
isLongLong = false;
+ isSizeT = false;
isHalf = false;
isFloat = false;
isImaginary = false;
@@ -589,6 +590,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
// integer constant.
bool isFixedPointConstant = isFixedPointLiteral();
bool isFPConstant = isFloatingLiteral();
+ bool HasSize = false;
// Loop over all of the characters of the suffix. If we see something bad,
// we break out of the loop.
@@ -616,14 +618,17 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (!(LangOpts.Half || LangOpts.FixedPoint))
break;
if (isIntegerLiteral()) break; // Error for integer constant.
- if (isHalf || isFloat || isLong) break; // HH, FH, LH invalid.
+ if (HasSize)
+ break;
+ HasSize = true;
isHalf = true;
continue; // Success.
case 'f': // FP Suffix for "float"
case 'F':
if (!isFPConstant) break; // Error for integer constant.
- if (isHalf || isFloat || isLong || isFloat128)
- break; // HF, FF, LF, QF invalid.
+ if (HasSize)
+ break;
+ HasSize = true;
// CUDA host and device may have different _Float16 support, therefore
// allows f16 literals to avoid false alarm.
@@ -640,8 +645,9 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
case 'q': // FP Suffix for "__float128"
case 'Q':
if (!isFPConstant) break; // Error for integer constant.
- if (isHalf || isFloat || isLong || isFloat128)
- break; // HQ, FQ, LQ, QQ invalid.
+ if (HasSize)
+ break;
+ HasSize = true;
isFloat128 = true;
continue; // Success.
case 'u':
@@ -652,8 +658,9 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
continue; // Success.
case 'l':
case 'L':
- if (isLong || isLongLong) break; // Cannot be repeated.
- if (isHalf || isFloat || isFloat128) break; // LH, LF, LQ invalid.
+ if (HasSize)
+ break;
+ HasSize = true;
// Check for long long. The L's need to be adjacent and the same case.
if (s[1] == s[0]) {
@@ -665,42 +672,54 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isLong = true;
}
continue; // Success.
+ case 'z':
+ case 'Z':
+ if (isFPConstant)
+ break; // Invalid for floats.
+ if (HasSize)
+ break;
+ HasSize = true;
+ isSizeT = true;
+ continue;
case 'i':
case 'I':
- if (LangOpts.MicrosoftExt) {
- if (isLong || isLongLong || MicrosoftInteger)
+ if (LangOpts.MicrosoftExt && !isFPConstant) {
+ // Allow i8, i16, i32, and i64. First, look ahead and check if
+ // suffixes are Microsoft integers and not the imaginary unit.
+ uint8_t Bits = 0;
+ size_t ToSkip = 0;
+ switch (s[1]) {
+ case '8': // i8 suffix
+ Bits = 8;
+ ToSkip = 2;
break;
-
- if (!isFPConstant) {
- // Allow i8, i16, i32, and i64.
- switch (s[1]) {
- case '8':
- s += 2; // i8 suffix
- MicrosoftInteger = 8;
- break;
- case '1':
- if (s[2] == '6') {
- s += 3; // i16 suffix
- MicrosoftInteger = 16;
- }
- break;
- case '3':
- if (s[2] == '2') {
- s += 3; // i32 suffix
- MicrosoftInteger = 32;
- }
- break;
- case '6':
- if (s[2] == '4') {
- s += 3; // i64 suffix
- MicrosoftInteger = 64;
- }
- break;
- default:
- break;
+ case '1':
+ if (s[2] == '6') { // i16 suffix
+ Bits = 16;
+ ToSkip = 3;
+ }
+ break;
+ case '3':
+ if (s[2] == '2') { // i32 suffix
+ Bits = 32;
+ ToSkip = 3;
+ }
+ break;
+ case '6':
+ if (s[2] == '4') { // i64 suffix
+ Bits = 64;
+ ToSkip = 3;
}
+ break;
+ default:
+ break;
}
- if (MicrosoftInteger) {
+ if (Bits) {
+ if (HasSize)
+ break;
+ HasSize = true;
+ MicrosoftInteger = Bits;
+ s += ToSkip;
assert(s <= ThisTokEnd && "didn't maximally munch?");
break;
}
@@ -727,6 +746,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isLong = false;
isUnsigned = false;
isLongLong = false;
+ isSizeT = false;
isFloat = false;
isFloat16 = false;
isHalf = false;
@@ -1628,16 +1648,29 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
// Check for raw string
if (ThisTokBuf[0] == 'R') {
+ if (ThisTokBuf[1] != '"') {
+ // The file may have come from PCH and then changed after loading the
+ // PCH; Fail gracefully.
+ return DiagnoseLexingError(StringToks[i].getLocation());
+ }
ThisTokBuf += 2; // skip R"
+ // C++11 [lex.string]p2: A `d-char-sequence` shall consist of at most 16
+ // characters.
+ constexpr unsigned MaxRawStrDelimLen = 16;
+
const char *Prefix = ThisTokBuf;
- while (ThisTokBuf[0] != '(')
+ while (static_cast<unsigned>(ThisTokBuf - Prefix) < MaxRawStrDelimLen &&
+ ThisTokBuf[0] != '(')
++ThisTokBuf;
+ if (ThisTokBuf[0] != '(')
+ return DiagnoseLexingError(StringToks[i].getLocation());
++ThisTokBuf; // skip '('
// Remove same number of characters from the end
ThisTokEnd -= ThisTokBuf - Prefix;
- assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
+ if (ThisTokEnd < ThisTokBuf)
+ return DiagnoseLexingError(StringToks[i].getLocation());
// C++14 [lex.string]p4: A source-file new-line in a raw string literal
// results in a new-line in the resulting execution string-literal.
diff --git a/clang/lib/Lex/ModuleMap.cpp b/clang/lib/Lex/ModuleMap.cpp
index bbda1f15a702..f9af7c2a24fb 100644
--- a/clang/lib/Lex/ModuleMap.cpp
+++ b/clang/lib/Lex/ModuleMap.cpp
@@ -260,9 +260,10 @@ void ModuleMap::resolveHeader(Module *Mod,
<< UmbrellaMod->getFullModuleName();
else
// Record this umbrella header.
- setUmbrellaHeader(Mod, *File, RelativePathName.str());
+ setUmbrellaHeader(Mod, *File, Header.FileName, RelativePathName.str());
} else {
- Module::Header H = {std::string(RelativePathName.str()), *File};
+ Module::Header H = {Header.FileName, std::string(RelativePathName.str()),
+ *File};
if (Header.Kind == Module::HK_Excluded)
excludeHeader(Mod, H);
else
@@ -300,12 +301,12 @@ bool ModuleMap::resolveAsBuiltinHeader(
// supplied by Clang. Find that builtin header.
SmallString<128> Path;
llvm::sys::path::append(Path, BuiltinIncludeDir->getName(), Header.FileName);
- auto File = SourceMgr.getFileManager().getOptionalFileRef(Path);
+ auto File = SourceMgr.getFileManager().getFile(Path);
if (!File)
return false;
auto Role = headerKindToRole(Header.Kind);
- Module::Header H = {std::string(Path.str()), *File};
+ Module::Header H = {Header.FileName, std::string(Path.str()), *File};
addHeader(Mod, H, Role);
return true;
}
@@ -1012,7 +1013,7 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
// Look for an umbrella header.
SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
llvm::sys::path::append(UmbrellaName, "Headers", ModuleName + ".h");
- auto UmbrellaHeader = FileMgr.getOptionalFileRef(UmbrellaName);
+ auto UmbrellaHeader = FileMgr.getFile(UmbrellaName);
// FIXME: If there's no umbrella header, we could probably scan the
// framework to load *everything*. But, it's not clear that this is a good
@@ -1038,11 +1039,13 @@ Module *ModuleMap::inferFrameworkModule(const DirectoryEntry *FrameworkDir,
Result->NoUndeclaredIncludes |= Attrs.NoUndeclaredIncludes;
Result->Directory = FrameworkDir;
+ // Chop off the first framework bit, as that is implied.
+ StringRef RelativePath = UmbrellaName.str().substr(
+ Result->getTopLevelModule()->Directory->getName().size());
+ RelativePath = llvm::sys::path::relative_path(RelativePath);
+
// umbrella header "umbrella-header-name"
- //
- // The "Headers/" component of the name is implied because this is
- // a framework module.
- setUmbrellaHeader(Result, *UmbrellaHeader, ModuleName + ".h");
+ setUmbrellaHeader(Result, *UmbrellaHeader, ModuleName + ".h", RelativePath);
// export *
Result->Exports.push_back(Module::ExportDecl(nullptr, true));
@@ -1121,22 +1124,28 @@ Module *ModuleMap::createShadowedModule(StringRef Name, bool IsFramework,
return Result;
}
-void ModuleMap::setUmbrellaHeader(Module *Mod, FileEntryRef UmbrellaHeader,
- Twine NameAsWritten) {
+void ModuleMap::setUmbrellaHeader(
+ Module *Mod, const FileEntry *UmbrellaHeader, const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory) {
Headers[UmbrellaHeader].push_back(KnownHeader(Mod, NormalHeader));
- Mod->Umbrella = &UmbrellaHeader.getMapEntry();
+ Mod->Umbrella = UmbrellaHeader;
Mod->UmbrellaAsWritten = NameAsWritten.str();
- UmbrellaDirs[UmbrellaHeader.getDir()] = Mod;
+ Mod->UmbrellaRelativeToRootModuleDirectory =
+ PathRelativeToRootModuleDirectory.str();
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
// Notify callbacks that we just added a new header.
for (const auto &Cb : Callbacks)
Cb->moduleMapAddUmbrellaHeader(&SourceMgr.getFileManager(), UmbrellaHeader);
}
-void ModuleMap::setUmbrellaDir(Module *Mod, DirectoryEntryRef UmbrellaDir,
- Twine NameAsWritten) {
- Mod->Umbrella = &UmbrellaDir.getMapEntry();
+void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir,
+ const Twine &NameAsWritten,
+ const Twine &PathRelativeToRootModuleDirectory) {
+ Mod->Umbrella = UmbrellaDir;
Mod->UmbrellaAsWritten = NameAsWritten.str();
+ Mod->UmbrellaRelativeToRootModuleDirectory =
+ PathRelativeToRootModuleDirectory.str();
UmbrellaDirs[UmbrellaDir] = Mod;
}
@@ -1369,7 +1378,7 @@ namespace clang {
RSquare
} Kind;
- unsigned Location;
+ SourceLocation::UIntTy Location;
unsigned StringLength;
union {
// If Kind != IntegerLiteral.
@@ -2405,6 +2414,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
std::string DirName = std::string(Tok.getString());
+ std::string DirNameAsWritten = DirName;
SourceLocation DirNameLoc = consumeToken();
// Check whether we already have an umbrella.
@@ -2416,15 +2426,15 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Look for this file.
- Optional<DirectoryEntryRef> Dir;
+ const DirectoryEntry *Dir = nullptr;
if (llvm::sys::path::is_absolute(DirName)) {
- if (auto D = SourceMgr.getFileManager().getOptionalDirectoryRef(DirName))
+ if (auto D = SourceMgr.getFileManager().getDirectory(DirName))
Dir = *D;
} else {
SmallString<128> PathName;
PathName = Directory->getName();
llvm::sys::path::append(PathName, DirName);
- if (auto D = SourceMgr.getFileManager().getOptionalDirectoryRef(PathName))
+ if (auto D = SourceMgr.getFileManager().getDirectory(PathName))
Dir = *D;
}
@@ -2445,8 +2455,8 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
SourceMgr.getFileManager().getVirtualFileSystem();
for (llvm::vfs::recursive_directory_iterator I(FS, Dir->getName(), EC), E;
I != E && !EC; I.increment(EC)) {
- if (auto FE = SourceMgr.getFileManager().getOptionalFileRef(I->path())) {
- Module::Header Header = {std::string(I->path()), *FE};
+ if (auto FE = SourceMgr.getFileManager().getFile(I->path())) {
+ Module::Header Header = {"", std::string(I->path()), *FE};
Headers.push_back(std::move(Header));
}
}
@@ -2459,7 +2469,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
return;
}
- if (Module *OwningModule = Map.UmbrellaDirs[*Dir]) {
+ if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
<< OwningModule->getFullModuleName();
HadError = true;
@@ -2467,7 +2477,7 @@ void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
}
// Record this umbrella directory.
- Map.setUmbrellaDir(ActiveModule, *Dir, DirName);
+ Map.setUmbrellaDir(ActiveModule, Dir, DirNameAsWritten, DirName);
}
/// Parse a module export declaration.
diff --git a/clang/lib/Lex/PPCaching.cpp b/clang/lib/Lex/PPCaching.cpp
index 31548d246d5a..e05e52ba9bb5 100644
--- a/clang/lib/Lex/PPCaching.cpp
+++ b/clang/lib/Lex/PPCaching.cpp
@@ -145,7 +145,7 @@ bool Preprocessor::IsPreviousCachedToken(const Token &Tok) const {
if (LastCachedTok.getKind() != Tok.getKind())
return false;
- int RelOffset = 0;
+ SourceLocation::IntTy RelOffset = 0;
if ((!getSourceManager().isInSameSLocAddrSpace(
Tok.getLocation(), getLastCachedTokenLocation(), &RelOffset)) ||
RelOffset)
diff --git a/clang/lib/Lex/PPConditionalDirectiveRecord.cpp b/clang/lib/Lex/PPConditionalDirectiveRecord.cpp
index facee28007c7..ddc88f8e8f95 100644
--- a/clang/lib/Lex/PPConditionalDirectiveRecord.cpp
+++ b/clang/lib/Lex/PPConditionalDirectiveRecord.cpp
@@ -101,6 +101,28 @@ void PPConditionalDirectiveRecord::Elif(SourceLocation Loc,
CondDirectiveStack.back() = Loc;
}
+void PPConditionalDirectiveRecord::Elifdef(SourceLocation Loc, const Token &,
+ const MacroDefinition &) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+void PPConditionalDirectiveRecord::Elifdef(SourceLocation Loc, SourceRange,
+ SourceLocation) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+
+void PPConditionalDirectiveRecord::Elifndef(SourceLocation Loc, const Token &,
+ const MacroDefinition &) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+void PPConditionalDirectiveRecord::Elifndef(SourceLocation Loc, SourceRange,
+ SourceLocation) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.back() = Loc;
+}
+
void PPConditionalDirectiveRecord::Else(SourceLocation Loc,
SourceLocation IfLoc) {
addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
diff --git a/clang/lib/Lex/PPDirectives.cpp b/clang/lib/Lex/PPDirectives.cpp
index d6b03d85913d..556dd8daf652 100644
--- a/clang/lib/Lex/PPDirectives.cpp
+++ b/clang/lib/Lex/PPDirectives.cpp
@@ -100,31 +100,19 @@ enum MacroDiag {
MD_ReservedMacro //> #define of #undef reserved id, disabled by default
};
-/// Checks if the specified identifier is reserved in the specified
-/// language.
-/// This function does not check if the identifier is a keyword.
-static bool isReservedId(StringRef Text, const LangOptions &Lang) {
- // C++ [macro.names], C11 7.1.3:
- // All identifiers that begin with an underscore and either an uppercase
- // letter or another underscore are always reserved for any use.
- if (Text.size() >= 2 && Text[0] == '_' &&
- (isUppercase(Text[1]) || Text[1] == '_'))
- return true;
- // C++ [global.names]
- // Each name that contains a double underscore ... is reserved to the
- // implementation for any use.
- if (Lang.CPlusPlus) {
- if (Text.find("__") != StringRef::npos)
- return true;
- }
- return false;
-}
+/// Enumerates possible %select values for the pp_err_elif_after_else and
+/// pp_err_elif_without_if diagnostics.
+enum PPElifDiag {
+ PED_Elif,
+ PED_Elifdef,
+ PED_Elifndef
+};
// The -fmodule-name option tells the compiler to textually include headers in
// the specified module, meaning clang won't build the specified module. This is
// useful in a number of situations, for instance, when building a library that
// vends a module map, one might want to avoid hitting intermediate build
-// products containimg the the module map or avoid finding the system installed
+// products containing the the module map or avoid finding the system installed
// modulemap for that library.
static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
StringRef ModuleName) {
@@ -141,9 +129,50 @@ static bool isForModuleBuilding(Module *M, StringRef CurrentModule,
static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
- StringRef Text = II->getName();
- if (isReservedId(Text, Lang))
+ if (II->isReserved(Lang) != ReservedIdentifierStatus::NotReserved) {
+ // list from:
+ // - https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_macros.html
+ // - https://docs.microsoft.com/en-us/cpp/c-runtime-library/security-features-in-the-crt?view=msvc-160
+ // - man 7 feature_test_macros
+ // The list must be sorted for correct binary search.
+ static constexpr StringRef ReservedMacro[] = {
+ "_ATFILE_SOURCE",
+ "_BSD_SOURCE",
+ "_CRT_NONSTDC_NO_WARNINGS",
+ "_CRT_SECURE_CPP_OVERLOAD_STANDARD_NAMES",
+ "_CRT_SECURE_NO_WARNINGS",
+ "_FILE_OFFSET_BITS",
+ "_FORTIFY_SOURCE",
+ "_GLIBCXX_ASSERTIONS",
+ "_GLIBCXX_CONCEPT_CHECKS",
+ "_GLIBCXX_DEBUG",
+ "_GLIBCXX_DEBUG_PEDANTIC",
+ "_GLIBCXX_PARALLEL",
+ "_GLIBCXX_PARALLEL_ASSERTIONS",
+ "_GLIBCXX_SANITIZE_VECTOR",
+ "_GLIBCXX_USE_CXX11_ABI",
+ "_GLIBCXX_USE_DEPRECATED",
+ "_GNU_SOURCE",
+ "_ISOC11_SOURCE",
+ "_ISOC95_SOURCE",
+ "_ISOC99_SOURCE",
+ "_LARGEFILE64_SOURCE",
+ "_POSIX_C_SOURCE",
+ "_REENTRANT",
+ "_SVID_SOURCE",
+ "_THREAD_SAFE",
+ "_XOPEN_SOURCE",
+ "_XOPEN_SOURCE_EXTENDED",
+ "__STDCPP_WANT_MATH_SPEC_FUNCS__",
+ "__STDC_FORMAT_MACROS",
+ };
+ if (std::binary_search(std::begin(ReservedMacro), std::end(ReservedMacro),
+ II->getName()))
+ return MD_NoWarn;
+
return MD_ReservedMacro;
+ }
+ StringRef Text = II->getName();
if (II->isKeyword(Lang))
return MD_KeywordDef;
if (Lang.CPlusPlus11 && (Text.equals("override") || Text.equals("final")))
@@ -153,9 +182,8 @@ static MacroDiag shouldWarnOnMacroDef(Preprocessor &PP, IdentifierInfo *II) {
static MacroDiag shouldWarnOnMacroUndef(Preprocessor &PP, IdentifierInfo *II) {
const LangOptions &Lang = PP.getLangOpts();
- StringRef Text = II->getName();
// Do not warn on keyword undef. It is generally harmless and widely used.
- if (isReservedId(Text, Lang))
+ if (II->isReserved(Lang) != ReservedIdentifierStatus::NotReserved)
return MD_ReservedMacro;
return MD_NoWarn;
}
@@ -168,7 +196,7 @@ static MacroDiag shouldWarnOnMacroUndef(Preprocessor &PP, IdentifierInfo *II) {
static bool warnByDefaultOnWrongCase(StringRef Include) {
// If the first component of the path is "boost", treat this like a standard header
// for the purposes of diagnostics.
- if (::llvm::sys::path::begin(Include)->equals_lower("boost"))
+ if (::llvm::sys::path::begin(Include)->equals_insensitive("boost"))
return true;
// "condition_variable" is the longest standard header name at 18 characters.
@@ -441,9 +469,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurLexer->Lex(Tok);
if (Tok.is(tok::code_completion)) {
+ setCodeCompletionReached();
if (CodeComplete)
CodeComplete->CodeCompleteInConditionalExclusion();
- setCodeCompletionReached();
continue;
}
@@ -558,7 +586,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
// If this is a #else with a #else before it, report the error.
- if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_else_after_else);
// Note that we've seen a #else in this conditional.
CondInfo.FoundElse = true;
@@ -582,7 +611,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
// If this is a #elif with a #else before it, report the error.
- if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_elif_after_else) << PED_Elif;
// If this is in a skipping block or if we're already handled this #if
// block, don't bother parsing the condition.
@@ -595,6 +625,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->LexingRawMode = false;
IdentifierInfo *IfNDefMacro = nullptr;
DirectiveEvalResult DER = EvaluateDirectiveExpression(IfNDefMacro);
+ // Stop if Lexer became invalid after hitting code completion token.
+ if (!CurPPLexer)
+ return;
const bool CondValue = DER.Conditional;
CurPPLexer->LexingRawMode = true;
if (Callbacks) {
@@ -609,6 +642,59 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
break;
}
}
+ } else if (Sub == "lifdef" || // "elifdef"
+ Sub == "lifndef") { // "elifndef"
+ bool IsElifDef = Sub == "lifdef";
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+ Token DirectiveToken = Tok;
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_elif_after_else)
+ << (IsElifDef ? PED_Elifdef : PED_Elifndef);
+
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition.
+ if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ DiscardUntilEndOfDirective();
+ } else {
+ // Restore the value of LexingRawMode so that identifiers are
+ // looked up, etc, inside the #elif[n]def expression.
+ assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
+ CurPPLexer->LexingRawMode = false;
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok);
+ CurPPLexer->LexingRawMode = true;
+
+ // If the macro name token is tok::eod, there was an error that was
+ // already reported.
+ if (MacroNameTok.is(tok::eod)) {
+ // Skip code until we get to #endif. This helps with recovery by
+ // not emitting an error when the #endif is reached.
+ continue;
+ }
+
+ CheckEndOfDirective(IsElifDef ? "elifdef" : "elifndef");
+
+ IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
+ auto MD = getMacroDefinition(MII);
+ MacroInfo *MI = MD.getMacroInfo();
+
+ if (Callbacks) {
+ if (IsElifDef) {
+ Callbacks->Elifdef(DirectiveToken.getLocation(), MacroNameTok,
+ MD);
+ } else {
+ Callbacks->Elifndef(DirectiveToken.getLocation(), MacroNameTok,
+ MD);
+ }
+ }
+ // If this condition is true, enter it!
+ if (static_cast<bool>(MI) == IsElifDef) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+ }
}
}
@@ -966,10 +1052,10 @@ void Preprocessor::HandleDirective(Token &Result) {
case tok::eod:
return; // null directive.
case tok::code_completion:
+ setCodeCompletionReached();
if (CodeComplete)
CodeComplete->CodeCompleteDirective(
CurPPLexer->getConditionalStackDepth() > 0);
- setCodeCompletionReached();
return;
case tok::numeric_constant: // # 7 GNU line marker directive.
if (getLangOpts().AsmPreprocessor)
@@ -992,7 +1078,10 @@ void Preprocessor::HandleDirective(Token &Result) {
return HandleIfdefDirective(Result, SavedHash, true,
ReadAnyTokensBeforeDirective);
case tok::pp_elif:
- return HandleElifDirective(Result, SavedHash);
+ case tok::pp_elifdef:
+ case tok::pp_elifndef:
+ return HandleElifFamilyDirective(Result, SavedHash, II->getPPKeywordID());
+
case tok::pp_else:
return HandleElseDirective(Result, SavedHash);
case tok::pp_endif:
@@ -1045,12 +1134,12 @@ void Preprocessor::HandleDirective(Token &Result) {
break;
case tok::pp___public_macro:
- if (getLangOpts().Modules)
+ if (getLangOpts().Modules || getLangOpts().ModulesLocalVisibility)
return HandleMacroPublicDirective(Result);
break;
case tok::pp___private_macro:
- if (getLangOpts().Modules)
+ if (getLangOpts().Modules || getLangOpts().ModulesLocalVisibility)
return HandleMacroPrivateDirective();
break;
}
@@ -1391,7 +1480,7 @@ void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
// Find the first non-whitespace character, so that we can make the
// diagnostic more succinct.
- StringRef Msg = StringRef(Message).ltrim(' ');
+ StringRef Msg = Message.str().ltrim(' ');
if (isWarning)
Diag(Tok, diag::pp_hash_warning) << Msg;
@@ -1634,7 +1723,8 @@ static bool trySimplifyPath(SmallVectorImpl<StringRef> &Components,
// If these path components differ by more than just case, then we
// may be looking at symlinked paths. Bail on this diagnostic to avoid
// noisy false positives.
- SuggestReplacement = RealPathComponentIter->equals_lower(Component);
+ SuggestReplacement =
+ RealPathComponentIter->equals_insensitive(Component);
if (!SuggestReplacement)
break;
Component = *RealPathComponentIter;
@@ -2870,6 +2960,23 @@ void Preprocessor::HandleDefineDirective(
// If the callbacks want to know, tell them about the macro definition.
if (Callbacks)
Callbacks->MacroDefined(MacroNameTok, MD);
+
+ // If we're in MS compatibility mode and the macro being defined is the
+ // assert macro, implicitly add a macro definition for static_assert to work
+ // around their broken assert.h header file in C. Only do so if there isn't
+ // already a static_assert macro defined.
+ if (!getLangOpts().CPlusPlus && getLangOpts().MSVCCompat &&
+ MacroNameTok.getIdentifierInfo()->isStr("assert") &&
+ !isMacroDefined("static_assert")) {
+ MacroInfo *MI = AllocateMacroInfo(SourceLocation());
+
+ Token Tok;
+ Tok.startToken();
+ Tok.setKind(tok::kw__Static_assert);
+ Tok.setIdentifierInfo(getIdentifierInfo("_Static_assert"));
+ MI->AddTokenToBody(Tok);
+ (void)appendDefMacroDirective(getIdentifierInfo("static_assert"), MI);
+ }
}
/// HandleUndefDirective - Implements \#undef.
@@ -3006,6 +3113,10 @@ void Preprocessor::HandleIfDirective(Token &IfToken,
IdentifierInfo *IfNDefMacro = nullptr;
const DirectiveEvalResult DER = EvaluateDirectiveExpression(IfNDefMacro);
const bool ConditionalTrue = DER.Conditional;
+ // Lexer might become invalid if we hit code completion point while evaluating
+ // expression.
+ if (!CurPPLexer)
+ return;
// If this condition is equivalent to #ifndef X, and if this is the first
// directive seen, handle it for the multiple-include optimization.
@@ -3110,10 +3221,13 @@ void Preprocessor::HandleElseDirective(Token &Result, const Token &HashToken) {
/*FoundElse*/ true, Result.getLocation());
}
-/// HandleElifDirective - Implements the \#elif directive.
-///
-void Preprocessor::HandleElifDirective(Token &ElifToken,
- const Token &HashToken) {
+/// Implements the \#elif, \#elifdef, and \#elifndef directives.
+void Preprocessor::HandleElifFamilyDirective(Token &ElifToken,
+ const Token &HashToken,
+ tok::PPKeywordKind Kind) {
+ PPElifDiag DirKind = Kind == tok::pp_elif ? PED_Elif
+ : Kind == tok::pp_elifdef ? PED_Elifdef
+ : PED_Elifndef;
++NumElse;
// #elif directive in a non-skipping conditional... start skipping.
@@ -3123,7 +3237,7 @@ void Preprocessor::HandleElifDirective(Token &ElifToken,
PPConditionalInfo CI;
if (CurPPLexer->popConditionalLevel(CI)) {
- Diag(ElifToken, diag::pp_err_elif_without_if);
+ Diag(ElifToken, diag::pp_err_elif_without_if) << DirKind;
return;
}
@@ -3132,11 +3246,26 @@ void Preprocessor::HandleElifDirective(Token &ElifToken,
CurPPLexer->MIOpt.EnterTopLevelConditional();
// If this is a #elif with a #else before it, report the error.
- if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
+ if (CI.FoundElse)
+ Diag(ElifToken, diag::pp_err_elif_after_else) << DirKind;
- if (Callbacks)
- Callbacks->Elif(ElifToken.getLocation(), ConditionRange,
- PPCallbacks::CVK_NotEvaluated, CI.IfLoc);
+ if (Callbacks) {
+ switch (Kind) {
+ case tok::pp_elif:
+ Callbacks->Elif(ElifToken.getLocation(), ConditionRange,
+ PPCallbacks::CVK_NotEvaluated, CI.IfLoc);
+ break;
+ case tok::pp_elifdef:
+ Callbacks->Elifdef(ElifToken.getLocation(), ConditionRange, CI.IfLoc);
+ break;
+ case tok::pp_elifndef:
+ Callbacks->Elifndef(ElifToken.getLocation(), ConditionRange, CI.IfLoc);
+ break;
+ default:
+ assert(false && "unexpected directive kind");
+ break;
+ }
+ }
bool RetainExcludedCB = PPOpts->RetainExcludedConditionalBlocks &&
getSourceManager().isInMainFile(ElifToken.getLocation());
diff --git a/clang/lib/Lex/PPExpressions.cpp b/clang/lib/Lex/PPExpressions.cpp
index 8c120c13d7d2..cab4bab630dc 100644
--- a/clang/lib/Lex/PPExpressions.cpp
+++ b/clang/lib/Lex/PPExpressions.cpp
@@ -321,6 +321,14 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
PP.Diag(PeekTok, diag::ext_c99_longlong);
}
+ // 'z/uz' literals are a C++2b feature.
+ if (Literal.isSizeT)
+ PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus
+ ? PP.getLangOpts().CPlusPlus2b
+ ? diag::warn_cxx20_compat_size_t_suffix
+ : diag::ext_cxx2b_size_t_suffix
+ : diag::err_cxx2b_size_t_suffix);
+
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
// Overflow parsing integer literal.
@@ -658,13 +666,13 @@ static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
if (ValueLive && Res.isUnsigned()) {
if (!LHS.isUnsigned() && LHS.Val.isNegative())
PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 0
- << LHS.Val.toString(10, true) + " to " +
- LHS.Val.toString(10, false)
+ << toString(LHS.Val, 10, true) + " to " +
+ toString(LHS.Val, 10, false)
<< LHS.getRange() << RHS.getRange();
if (!RHS.isUnsigned() && RHS.Val.isNegative())
PP.Diag(OpLoc, diag::warn_pp_convert_to_positive) << 1
- << RHS.Val.toString(10, true) + " to " +
- RHS.Val.toString(10, false)
+ << toString(RHS.Val, 10, true) + " to " +
+ toString(RHS.Val, 10, false)
<< LHS.getRange() << RHS.getRange();
}
LHS.Val.setIsUnsigned(Res.isUnsigned());
diff --git a/clang/lib/Lex/PPMacroExpansion.cpp b/clang/lib/Lex/PPMacroExpansion.cpp
index 43d31d6c5732..8728ac9e2166 100644
--- a/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/clang/lib/Lex/PPMacroExpansion.cpp
@@ -25,6 +25,7 @@
#include "clang/Lex/ExternalPreprocessorSource.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/MacroArgs.h"
#include "clang/Lex/MacroInfo.h"
#include "clang/Lex/Preprocessor.h"
@@ -170,7 +171,8 @@ ModuleMacro *Preprocessor::addModuleMacro(Module *Mod, IdentifierInfo *II,
return MM;
}
-ModuleMacro *Preprocessor::getModuleMacro(Module *Mod, IdentifierInfo *II) {
+ModuleMacro *Preprocessor::getModuleMacro(Module *Mod,
+ const IdentifierInfo *II) {
llvm::FoldingSetNodeID ID;
ModuleMacro::Profile(ID, Mod, II);
@@ -1428,7 +1430,7 @@ static bool isTargetVendor(const TargetInfo &TI, const IdentifierInfo *II) {
StringRef VendorName = TI.getTriple().getVendorName();
if (VendorName.empty())
VendorName = "unknown";
- return VendorName.equals_lower(II->getName());
+ return VendorName.equals_insensitive(II->getName());
}
/// Implements the __is_target_os builtin macro.
@@ -1812,7 +1814,14 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
if (!Tok.isAnnotation() && Tok.getIdentifierInfo())
Tok.setKind(tok::identifier);
- else {
+ else if (Tok.is(tok::string_literal) && !Tok.hasUDSuffix()) {
+ StringLiteralParser Literal(Tok, *this);
+ if (Literal.hadError)
+ return;
+
+ Tok.setIdentifierInfo(getIdentifierInfo(Literal.GetString()));
+ Tok.setKind(tok::identifier);
+ } else {
Diag(Tok.getLocation(), diag::err_pp_identifier_arg_not_identifier)
<< Tok.getKind();
// Don't walk past anything that's not a real token.
diff --git a/clang/lib/Lex/Pragma.cpp b/clang/lib/Lex/Pragma.cpp
index a05df060813e..c89061ba6d02 100644
--- a/clang/lib/Lex/Pragma.cpp
+++ b/clang/lib/Lex/Pragma.cpp
@@ -412,9 +412,13 @@ void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
HeaderInfo.MarkFileIncludeOnce(getCurrentFileLexer()->getFileEntry());
}
-void Preprocessor::HandlePragmaMark() {
+void Preprocessor::HandlePragmaMark(Token &MarkTok) {
assert(CurPPLexer && "No current lexer?");
- CurLexer->ReadToEndOfLine();
+
+ SmallString<64> Buffer;
+ CurLexer->ReadToEndOfLine(&Buffer);
+ if (Callbacks)
+ Callbacks->PragmaMark(MarkTok.getLocation(), Buffer);
}
/// HandlePragmaPoison - Handle \#pragma GCC poison. PoisonTok is the 'poison'.
@@ -992,7 +996,7 @@ struct PragmaMarkHandler : public PragmaHandler {
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &MarkTok) override {
- PP.HandlePragmaMark();
+ PP.HandlePragmaMark(MarkTok);
}
};
@@ -1122,6 +1126,57 @@ struct PragmaDebugHandler : public PragmaHandler {
DebugOverflowStack();
} else if (II->isStr("captured")) {
HandleCaptured(PP);
+ } else if (II->isStr("modules")) {
+ struct ModuleVisitor {
+ Preprocessor &PP;
+ void visit(Module *M, bool VisibleOnly) {
+ SourceLocation ImportLoc = PP.getModuleImportLoc(M);
+ if (!VisibleOnly || ImportLoc.isValid()) {
+ llvm::errs() << M->getFullModuleName() << " ";
+ if (ImportLoc.isValid()) {
+ llvm::errs() << M << " visible ";
+ ImportLoc.print(llvm::errs(), PP.getSourceManager());
+ }
+ llvm::errs() << "\n";
+ }
+ for (Module *Sub : M->submodules()) {
+ if (!VisibleOnly || ImportLoc.isInvalid() || Sub->IsExplicit)
+ visit(Sub, VisibleOnly);
+ }
+ }
+ void visitAll(bool VisibleOnly) {
+ for (auto &NameAndMod :
+ PP.getHeaderSearchInfo().getModuleMap().modules())
+ visit(NameAndMod.second, VisibleOnly);
+ }
+ } Visitor{PP};
+
+ Token Kind;
+ PP.LexUnexpandedToken(Kind);
+ auto *DumpII = Kind.getIdentifierInfo();
+ if (!DumpII) {
+ PP.Diag(Kind, diag::warn_pragma_debug_missing_argument)
+ << II->getName();
+ } else if (DumpII->isStr("all")) {
+ Visitor.visitAll(false);
+ } else if (DumpII->isStr("visible")) {
+ Visitor.visitAll(true);
+ } else if (DumpII->isStr("building")) {
+ for (auto &Building : PP.getBuildingSubmodules()) {
+ llvm::errs() << "in " << Building.M->getFullModuleName();
+ if (Building.ImportLoc.isValid()) {
+ llvm::errs() << " imported ";
+ if (Building.IsPragma)
+ llvm::errs() << "via pragma ";
+ llvm::errs() << "at ";
+ Building.ImportLoc.print(llvm::errs(), PP.getSourceManager());
+ llvm::errs() << "\n";
+ }
+ }
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
+ << DumpII->getName();
+ }
} else {
PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
<< II->getName();
@@ -1904,6 +1959,7 @@ void Preprocessor::RegisterBuiltinPragmas() {
AddPragmaHandler(new PragmaExecCharsetHandler());
AddPragmaHandler(new PragmaIncludeAliasHandler());
AddPragmaHandler(new PragmaHdrstopHandler());
+ AddPragmaHandler(new PragmaSystemHeaderHandler());
}
// Pragmas added by plugins
diff --git a/clang/lib/Lex/PreprocessingRecord.cpp b/clang/lib/Lex/PreprocessingRecord.cpp
index 115256db4809..ed59dbdf018d 100644
--- a/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/clang/lib/Lex/PreprocessingRecord.cpp
@@ -411,6 +411,14 @@ void PreprocessingRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok,
MacroNameTok.getLocation());
}
+void PreprocessingRecord::Elifdef(SourceLocation Loc, const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ // This is not actually a macro expansion but record it as a macro reference.
+ if (MD)
+ addMacroExpansion(MacroNameTok, MD.getMacroInfo(),
+ MacroNameTok.getLocation());
+}
+
void PreprocessingRecord::Ifndef(SourceLocation Loc, const Token &MacroNameTok,
const MacroDefinition &MD) {
// This is not actually a macro expansion but record it as a macro reference.
@@ -419,6 +427,15 @@ void PreprocessingRecord::Ifndef(SourceLocation Loc, const Token &MacroNameTok,
MacroNameTok.getLocation());
}
+void PreprocessingRecord::Elifndef(SourceLocation Loc,
+ const Token &MacroNameTok,
+ const MacroDefinition &MD) {
+ // This is not actually a macro expansion but record it as a macro reference.
+ if (MD)
+ addMacroExpansion(MacroNameTok, MD.getMacroInfo(),
+ MacroNameTok.getLocation());
+}
+
void PreprocessingRecord::Defined(const Token &MacroNameTok,
const MacroDefinition &MD,
SourceRange Range) {
diff --git a/clang/lib/Lex/Preprocessor.cpp b/clang/lib/Lex/Preprocessor.cpp
index 94f1ce91f884..32ea8791d29a 100644
--- a/clang/lib/Lex/Preprocessor.cpp
+++ b/clang/lib/Lex/Preprocessor.cpp
@@ -119,12 +119,8 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
// a macro. They get unpoisoned where it is allowed.
(Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
- if (getLangOpts().CPlusPlus20) {
- (Ident__VA_OPT__ = getIdentifierInfo("__VA_OPT__"))->setIsPoisoned();
- SetPoisonReason(Ident__VA_OPT__,diag::ext_pp_bad_vaopt_use);
- } else {
- Ident__VA_OPT__ = nullptr;
- }
+ (Ident__VA_OPT__ = getIdentifierInfo("__VA_OPT__"))->setIsPoisoned();
+ SetPoisonReason(Ident__VA_OPT__,diag::ext_pp_bad_vaopt_use);
// Initialize the pragma handlers.
RegisterBuiltinPragmas();
@@ -278,7 +274,7 @@ void Preprocessor::PrintStats() {
llvm::errs() << " " << NumEnteredSourceFiles << " source files entered.\n";
llvm::errs() << " " << MaxIncludeStackDepth << " max include stack depth\n";
llvm::errs() << " " << NumIf << " #if/#ifndef/#ifdef.\n";
- llvm::errs() << " " << NumElse << " #else/#elif.\n";
+ llvm::errs() << " " << NumElse << " #else/#elif/#elifdef/#elifndef.\n";
llvm::errs() << " " << NumEndif << " #endif.\n";
llvm::errs() << " " << NumPragma << " #pragma.\n";
llvm::errs() << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
@@ -446,15 +442,15 @@ bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
void Preprocessor::CodeCompleteIncludedFile(llvm::StringRef Dir,
bool IsAngled) {
+ setCodeCompletionReached();
if (CodeComplete)
CodeComplete->CodeCompleteIncludedFile(Dir, IsAngled);
- setCodeCompletionReached();
}
void Preprocessor::CodeCompleteNaturalLanguage() {
+ setCodeCompletionReached();
if (CodeComplete)
CodeComplete->CodeCompleteNaturalLanguage();
- setCodeCompletionReached();
}
/// getSpelling - This method is used to get the spelling of a token into a
@@ -720,6 +716,12 @@ IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
}
// Update the token info (identifier info and appropriate token kind).
+ // FIXME: the raw_identifier may contain leading whitespace which is removed
+ // from the cleaned identifier token. The SourceLocation should be updated to
+ // refer to the non-whitespace character. For instance, the text "\\\nB" (a
+ // line continuation before 'B') is parsed as a single tok::raw_identifier and
+ // is cleaned to tok::identifier "B". After cleaning the token's length is
+ // still 3 and the SourceLocation refers to the location of the backslash.
Identifier.setIdentifierInfo(II);
if (getLangOpts().MSVCCompat && II->isCPlusPlusOperatorKeyword() &&
getSourceManager().isInSystemHeader(Identifier.getLocation()))
diff --git a/clang/lib/Lex/TokenLexer.cpp b/clang/lib/Lex/TokenLexer.cpp
index 97cb2cf0bb8c..41e7f3f1dccb 100644
--- a/clang/lib/Lex/TokenLexer.cpp
+++ b/clang/lib/Lex/TokenLexer.cpp
@@ -148,12 +148,12 @@ bool TokenLexer::MaybeRemoveCommaBeforeVaArgs(
return false;
// GCC removes the comma in the expansion of " ... , ## __VA_ARGS__ " if
- // __VA_ARGS__ is empty, but not in strict mode where there are no
- // named arguments, where it remains. With GNU extensions, it is removed
- // regardless of named arguments.
+ // __VA_ARGS__ is empty, but not in strict C99 mode where there are no
+ // named arguments, where it remains. In all other modes, including C99
+ // with GNU extensions, it is removed regardless of named arguments.
// Microsoft also appears to support this extension, unofficially.
- if (!PP.getLangOpts().GNUMode && !PP.getLangOpts().MSVCCompat &&
- Macro->getNumParams() < 2)
+ if (PP.getLangOpts().C99 && !PP.getLangOpts().GNUMode
+ && Macro->getNumParams() < 2)
return false;
// Is a comma available to be removed?
@@ -971,7 +971,7 @@ TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const {
assert(SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength) &&
"Expected loc to come from the macro definition");
- unsigned relativeOffset = 0;
+ SourceLocation::UIntTy relativeOffset = 0;
SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength, &relativeOffset);
return MacroExpansionStart.getLocWithOffset(relativeOffset);
}
@@ -1010,7 +1010,7 @@ static void updateConsecutiveMacroArgTokens(SourceManager &SM,
if (CurLoc.isFileID() != NextLoc.isFileID())
break; // Token from different kind of FileID.
- int RelOffs;
+ SourceLocation::IntTy RelOffs;
if (!SM.isInSameSLocAddrSpace(CurLoc, NextLoc, &RelOffs))
break; // Token from different local/loaded location.
// Check that token is not before the previous token or more than 50
@@ -1027,10 +1027,11 @@ static void updateConsecutiveMacroArgTokens(SourceManager &SM,
// For the consecutive tokens, find the length of the SLocEntry to contain
// all of them.
Token &LastConsecutiveTok = *(NextTok-1);
- int LastRelOffs = 0;
+ SourceLocation::IntTy LastRelOffs = 0;
SM.isInSameSLocAddrSpace(FirstLoc, LastConsecutiveTok.getLocation(),
&LastRelOffs);
- unsigned FullLength = LastRelOffs + LastConsecutiveTok.getLength();
+ SourceLocation::UIntTy FullLength =
+ LastRelOffs + LastConsecutiveTok.getLength();
// Create a macro expansion SLocEntry that will "contain" all of the tokens.
SourceLocation Expansion =
@@ -1040,7 +1041,7 @@ static void updateConsecutiveMacroArgTokens(SourceManager &SM,
// expanded location.
for (; begin_tokens < NextTok; ++begin_tokens) {
Token &Tok = *begin_tokens;
- int RelOffs = 0;
+ SourceLocation::IntTy RelOffs = 0;
SM.isInSameSLocAddrSpace(FirstLoc, Tok.getLocation(), &RelOffs);
Tok.setLocation(Expansion.getLocWithOffset(RelOffs));
}
diff --git a/clang/lib/Parse/ParseCXXInlineMethods.cpp b/clang/lib/Parse/ParseCXXInlineMethods.cpp
index b0335905b6f8..116724a0d50b 100644
--- a/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -778,6 +778,7 @@ void Parser::ParseLexedPragma(LateParsedPragma &LP) {
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
assert(Tok.isAnnotation() && "Expected annotation token.");
switch (Tok.getKind()) {
+ case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = LP.getAccessSpecifier();
ParsedAttributesWithRange Attrs(AttrFactory);
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 571164139630..f4f5f461e3b6 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -103,6 +103,24 @@ static bool FindLocsWithCommonFileID(Preprocessor &PP, SourceLocation StartLoc,
return AttrStartIsInMacro && AttrEndIsInMacro;
}
+void Parser::ParseAttributes(unsigned WhichAttrKinds,
+ ParsedAttributesWithRange &Attrs,
+ SourceLocation *End,
+ LateParsedAttrList *LateAttrs) {
+ bool MoreToParse;
+ do {
+ // Assume there's nothing left to parse, but if any attributes are in fact
+ // parsed, loop to ensure all specified attribute combinations are parsed.
+ MoreToParse = false;
+ if (WhichAttrKinds & PAKM_CXX11)
+ MoreToParse |= MaybeParseCXX11Attributes(Attrs, End);
+ if (WhichAttrKinds & PAKM_GNU)
+ MoreToParse |= MaybeParseGNUAttributes(Attrs, End, LateAttrs);
+ if (WhichAttrKinds & PAKM_Declspec)
+ MoreToParse |= MaybeParseMicrosoftDeclSpecs(Attrs, End);
+ } while (MoreToParse);
+}
+
/// ParseGNUAttributes - Parse a non-empty attributes list.
///
/// [GNU] attributes:
@@ -144,15 +162,19 @@ static bool FindLocsWithCommonFileID(Preprocessor &PP, SourceLocation StartLoc,
/// ',' or ')' are ignored, otherwise they produce a parse error.
///
/// We follow the C++ model, but don't allow junk after the identifier.
-void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc,
- LateParsedAttrList *LateAttrs,
- Declarator *D) {
+void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
+ SourceLocation *EndLoc,
+ LateParsedAttrList *LateAttrs, Declarator *D) {
assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
+ SourceLocation StartLoc = Tok.getLocation(), Loc;
+
+ if (!EndLoc)
+ EndLoc = &Loc;
+
while (Tok.is(tok::kw___attribute)) {
SourceLocation AttrTokLoc = ConsumeToken();
- unsigned OldNumAttrs = attrs.size();
+ unsigned OldNumAttrs = Attrs.size();
unsigned OldNumLateAttrs = LateAttrs ? LateAttrs->size() : 0;
if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
@@ -180,14 +202,14 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation AttrNameLoc = ConsumeToken();
if (Tok.isNot(tok::l_paren)) {
- attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
ParsedAttr::AS_GNU);
continue;
}
// Handle "parameterized" attributes
if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc, nullptr,
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, nullptr,
SourceLocation(), ParsedAttr::AS_GNU, D);
continue;
}
@@ -220,8 +242,8 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation Loc = Tok.getLocation();
if (ExpectAndConsume(tok::r_paren))
SkipUntil(tok::r_paren, StopAtSemi);
- if (endLoc)
- *endLoc = Loc;
+ if (EndLoc)
+ *EndLoc = Loc;
// If this was declared in a macro, attach the macro IdentifierInfo to the
// parsed attribute.
@@ -233,8 +255,8 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
Lexer::getSourceText(ExpansionRange, SM, PP.getLangOpts());
IdentifierInfo *MacroII = PP.getIdentifierInfo(FoundName);
- for (unsigned i = OldNumAttrs; i < attrs.size(); ++i)
- attrs[i].setMacroIdentifier(MacroII, ExpansionRange.getBegin());
+ for (unsigned i = OldNumAttrs; i < Attrs.size(); ++i)
+ Attrs[i].setMacroIdentifier(MacroII, ExpansionRange.getBegin());
if (LateAttrs) {
for (unsigned i = OldNumLateAttrs; i < LateAttrs->size(); ++i)
@@ -242,6 +264,8 @@ void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
}
}
}
+
+ Attrs.Range = SourceRange(StartLoc, *EndLoc);
}
/// Determine whether the given attribute has an identifier argument.
@@ -1589,7 +1613,30 @@ void Parser::DiagnoseProhibitedAttributes(
}
void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
- unsigned DiagID) {
+ unsigned DiagID, bool DiagnoseEmptyAttrs) {
+
+ if (DiagnoseEmptyAttrs && Attrs.empty() && Attrs.Range.isValid()) {
+ // An attribute list has been parsed, but it was empty.
+ // This is the case for [[]].
+ const auto &LangOpts = getLangOpts();
+ auto &SM = PP.getSourceManager();
+ Token FirstLSquare;
+ Lexer::getRawToken(Attrs.Range.getBegin(), FirstLSquare, SM, LangOpts);
+
+ if (FirstLSquare.is(tok::l_square)) {
+ llvm::Optional<Token> SecondLSquare =
+ Lexer::findNextToken(FirstLSquare.getLocation(), SM, LangOpts);
+
+ if (SecondLSquare && SecondLSquare->is(tok::l_square)) {
+ // The attribute range starts with [[, but is empty. So this must
+ // be [[]], which we are supposed to diagnose because
+ // DiagnoseEmptyAttrs is true.
+ Diag(Attrs.Range.getBegin(), DiagID) << Attrs.Range;
+ return;
+ }
+ }
+ }
+
for (const ParsedAttr &AL : Attrs) {
if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
continue;
@@ -1603,6 +1650,13 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
}
}
+void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs) {
+ for (const ParsedAttr &PA : Attrs) {
+ if (PA.isCXX11Attribute() || PA.isC2xAttribute())
+ Diag(PA.getLoc(), diag::ext_cxx11_attr_placement) << PA << PA.getRange();
+ }
+}
+
// Usually, `__attribute__((attrib)) class Foo {} var` means that attribute
// applies to var, not the type Foo.
// As an exception to the rule, __declspec(align(...)) before the
@@ -1952,8 +2006,8 @@ Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
// Check to see if we have a function *definition* which must have a body.
if (D.isFunctionDeclarator()) {
if (Tok.is(tok::equal) && NextToken().is(tok::code_completion)) {
- Actions.CodeCompleteAfterFunctionEquals(D);
cutOffParsing();
+ Actions.CodeCompleteAfterFunctionEquals(D);
return nullptr;
}
// Look at the next token to make sure that this isn't a function
@@ -2292,9 +2346,9 @@ Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(
InitializerScopeRAII InitScope(*this, D, ThisDecl);
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
Actions.FinalizeDeclaration(ThisDecl);
- cutOffParsing();
return nullptr;
}
@@ -3024,6 +3078,19 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
SourceLocation Loc = Tok.getLocation();
+ // Helper for image types in OpenCL.
+ auto handleOpenCLImageKW = [&] (StringRef Ext, TypeSpecifierType ImageTypeSpec) {
+ // Check if the image type is supported and otherwise turn the keyword into an identifier
+ // because image types from extensions are not reserved identifiers.
+ if (!StringRef(Ext).empty() && !getActions().getOpenCLOptions().isSupported(Ext, getLangOpts())) {
+ Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ return false;
+ }
+ isInvalid = DS.SetTypeSpecType(ImageTypeSpec, Loc, PrevSpec, DiagID, Policy);
+ return true;
+ };
+
switch (Tok.getKind()) {
default:
DoneWithDeclSpec:
@@ -3072,10 +3139,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
= DSContext == DeclSpecContext::DSC_top_level ||
(DSContext == DeclSpecContext::DSC_class && DS.isFriendSpecified());
+ cutOffParsing();
Actions.CodeCompleteDeclSpec(getCurScope(), DS,
AllowNonIdentifiers,
AllowNestedNameSpecifiers);
- return cutOffParsing();
+ return;
}
if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
@@ -3088,8 +3156,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
else if (CurParsedObjCImpl)
CCC = Sema::PCC_ObjCImplementation;
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
- return cutOffParsing();
+ return;
}
case tok::coloncolon: // ::foo::bar
@@ -3485,14 +3554,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
}
- // GNU attributes support.
+ // Attributes support.
case tok::kw___attribute:
- ParseGNUAttributes(DS.getAttributes(), nullptr, LateAttrs);
- continue;
-
- // Microsoft declspec support.
case tok::kw___declspec:
- ParseMicrosoftDeclSpecs(DS.getAttributes());
+ ParseAttributes(PAKM_GNU | PAKM_Declspec, DS.getAttributes(), nullptr,
+ LateAttrs);
continue;
// Microsoft single token adornments.
@@ -3631,8 +3697,8 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// C++ for OpenCL does not allow virtual function qualifier, to avoid
// function pointers restricted in OpenCL v2.0 s6.9.a.
if (getLangOpts().OpenCLCPlusPlus &&
- !getActions().getOpenCLOptions().isEnabled(
- "__cl_clang_function_pointers")) {
+ !getActions().getOpenCLOptions().isAvailableOption(
+ "__cl_clang_function_pointers", getLangOpts())) {
DiagID = diag::err_openclcxx_virtual_function;
PrevSpec = Tok.getIdentifierInfo()->getNameStart();
isInvalid = true;
@@ -3881,18 +3947,22 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw_pipe:
if (!getLangOpts().OpenCL || (getLangOpts().OpenCLVersion < 200 &&
!getLangOpts().OpenCLCPlusPlus)) {
- // OpenCL 2.0 defined this keyword. OpenCL 1.2 and earlier should
- // support the "pipe" word as identifier.
+ // OpenCL 2.0 and later define this keyword. OpenCL 1.2 and earlier
+ // should support the "pipe" word as identifier.
Tok.getIdentifierInfo()->revertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
goto DoneWithDeclSpec;
}
isInvalid = DS.SetTypePipe(true, Loc, PrevSpec, DiagID, Policy);
break;
-#define GENERIC_IMAGE_TYPE(ImgType, Id) \
- case tok::kw_##ImgType##_t: \
- isInvalid = DS.SetTypeSpecType(DeclSpec::TST_##ImgType##_t, Loc, PrevSpec, \
- DiagID, Policy); \
- break;
+// We only need to enumerate each image type once.
+#define IMAGE_READ_WRITE_TYPE(Type, Id, Ext)
+#define IMAGE_WRITE_TYPE(Type, Id, Ext)
+#define IMAGE_READ_TYPE(ImgType, Id, Ext) \
+ case tok::kw_##ImgType##_t: \
+ if (!handleOpenCLImageKW(Ext, DeclSpec::TST_##ImgType##_t)) \
+ goto DoneWithDeclSpec; \
+ break;
#include "clang/Basic/OpenCLImageTypes.def"
case tok::kw___unknown_anytype:
isInvalid = DS.SetTypeSpecType(TST_unknown_anytype, Loc,
@@ -4002,8 +4072,9 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::kw___generic:
// generic address space is introduced only in OpenCL v2.0
// see OpenCL C Spec v2.0 s6.5.5
- if (Actions.getLangOpts().OpenCLVersion < 200 &&
- !Actions.getLangOpts().OpenCLCPlusPlus) {
+ // OpenCL v3.0 introduces __opencl_c_generic_address_space
+ // feature macro to indicate if generic address space is supported
+ if (!Actions.getLangOpts().OpenCLGenericAddressSpace) {
DiagID = diag::err_opencl_unknown_type_specifier;
PrevSpec = Tok.getIdentifierInfo()->getNameStart();
isInvalid = true;
@@ -4216,7 +4287,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
}
// Parse _Static_assert declaration.
- if (Tok.is(tok::kw__Static_assert)) {
+ if (Tok.isOneOf(tok::kw__Static_assert, tok::kw_static_assert)) {
SourceLocation DeclEnd;
ParseStaticAssertDeclaration(DeclEnd);
continue;
@@ -4232,7 +4303,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
continue;
}
- if (Tok.is(tok::annot_pragma_openmp)) {
+ if (Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp)) {
// Result can be ignored, because it must be always empty.
AccessSpecifier AS = AS_none;
ParsedAttributesWithRange Attrs(AttrFactory);
@@ -4348,15 +4419,14 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Parse the tag portion of this.
if (Tok.is(tok::code_completion)) {
// Code completion for an enum name.
+ cutOffParsing();
Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
- return cutOffParsing();
+ return;
}
// If attributes exist after tag, parse them.
ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseGNUAttributes(attrs);
- MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftDeclSpecs(attrs);
+ MaybeParseAttributes(PAKM_GNU | PAKM_Declspec | PAKM_CXX11, attrs);
SourceLocation ScopedEnumKWLoc;
bool IsScopedUsingClassTag = false;
@@ -4373,9 +4443,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
ProhibitAttributes(attrs);
// They are allowed afterwards, though.
- MaybeParseGNUAttributes(attrs);
- MaybeParseCXX11Attributes(attrs);
- MaybeParseMicrosoftDeclSpecs(attrs);
+ MaybeParseAttributes(PAKM_GNU | PAKM_Declspec | PAKM_CXX11, attrs);
}
// C++11 [temp.explicit]p12:
@@ -4617,7 +4685,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// or opaque-enum-declaration anywhere.
if (IsElaboratedTypeSpecifier && !getLangOpts().MicrosoftExt &&
!getLangOpts().ObjC) {
- ProhibitAttributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ /*DiagnoseEmptyAttrs=*/true);
if (BaseType.isUsable())
Diag(BaseRange.getBegin(), diag::ext_enum_base_in_type_specifier)
<< (AllowEnumSpecifier == AllowDefiningTypeSpec::Yes) << BaseRange;
@@ -4761,7 +4830,6 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// If attributes exist after the enumerator, parse them.
ParsedAttributesWithRange attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
- ProhibitAttributes(attrs); // GNU-style attributes are prohibited.
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
if (getLangOpts().CPlusPlus)
Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
@@ -5059,8 +5127,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
default: return false;
case tok::kw_pipe:
- return (getLangOpts().OpenCL && getLangOpts().OpenCLVersion >= 200) ||
- getLangOpts().OpenCLCPlusPlus;
+ return getLangOpts().OpenCLPipe;
case tok::identifier: // foo::bar
// Unfortunate hack to support "Class.factoryMethod" notation.
@@ -5180,6 +5247,7 @@ bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
case tok::kw_friend:
// static_assert-declaration
+ case tok::kw_static_assert:
case tok::kw__Static_assert:
// GNU typeof support.
@@ -5448,11 +5516,12 @@ void Parser::ParseTypeQualifierListOpt(
switch (Tok.getKind()) {
case tok::code_completion:
+ cutOffParsing();
if (CodeCompletionHandler)
(*CodeCompletionHandler)();
else
Actions.CodeCompleteTypeQualifiers(DS);
- return cutOffParsing();
+ return;
case tok::kw_const:
isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
@@ -5587,8 +5656,7 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
if (Kind == tok::star || Kind == tok::caret)
return true;
- if (Kind == tok::kw_pipe &&
- ((Lang.OpenCL && Lang.OpenCLVersion >= 200) || Lang.OpenCLCPlusPlus))
+ if (Kind == tok::kw_pipe && Lang.OpenCLPipe)
return true;
if (!Lang.CPlusPlus)
@@ -6990,8 +7058,9 @@ void Parser::ParseBracketDeclarator(Declarator &D) {
std::move(attrs), T.getCloseLocation());
return;
} else if (Tok.getKind() == tok::code_completion) {
+ cutOffParsing();
Actions.CodeCompleteBracketDeclarator(getCurScope());
- return cutOffParsing();
+ return;
}
// If valid, this location is the position where we read the 'static' keyword.
@@ -7274,6 +7343,7 @@ bool Parser::TryAltiVecVectorTokenOutOfLine() {
case tok::kw_float:
case tok::kw_double:
case tok::kw_bool:
+ case tok::kw__Bool:
case tok::kw___bool:
case tok::kw___pixel:
Tok.setKind(tok::kw___vector);
@@ -7283,7 +7353,8 @@ bool Parser::TryAltiVecVectorTokenOutOfLine() {
Tok.setKind(tok::kw___vector);
return true;
}
- if (Next.getIdentifierInfo() == Ident_bool) {
+ if (Next.getIdentifierInfo() == Ident_bool ||
+ Next.getIdentifierInfo() == Ident_Bool) {
Tok.setKind(tok::kw___vector);
return true;
}
@@ -7308,6 +7379,7 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
case tok::kw_float:
case tok::kw_double:
case tok::kw_bool:
+ case tok::kw__Bool:
case tok::kw___bool:
case tok::kw___pixel:
isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
@@ -7317,8 +7389,10 @@ bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
return true;
}
- if (Next.getIdentifierInfo() == Ident_bool) {
- isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID,Policy);
+ if (Next.getIdentifierInfo() == Ident_bool ||
+ Next.getIdentifierInfo() == Ident_Bool) {
+ isInvalid =
+ DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID, Policy);
return true;
}
break;
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 88ebb59f9a60..ca5c013a51fe 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -63,8 +63,8 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
ObjCDeclContextSwitch ObjCDC(*this);
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteNamespaceDecl(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteNamespaceDecl(getCurScope());
return nullptr;
}
@@ -132,7 +132,7 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
<< FixItHint::CreateRemoval(InlineLoc);
Decl *NSAlias = ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
return Actions.ConvertDeclToDeclGroup(NSAlias);
-}
+ }
BalancedDelimiterTracker T(*this, tok::l_brace);
if (T.consumeOpen()) {
@@ -283,8 +283,8 @@ Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
ConsumeToken(); // eat the '='.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
return nullptr;
}
@@ -471,8 +471,8 @@ Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
SourceLocation UsingLoc = ConsumeToken();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteUsing(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteUsing(getCurScope());
return nullptr;
}
@@ -497,11 +497,7 @@ Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
}
// Otherwise, it must be a using-declaration or an alias-declaration.
-
- // Using declarations can't have attributes.
- ProhibitAttributes(attrs);
-
- return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd,
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd, attrs,
AS_none);
}
@@ -525,8 +521,8 @@ Decl *Parser::ParseUsingDirective(DeclaratorContext Context,
SourceLocation NamespcLoc = ConsumeToken();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteUsingDirective(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteUsingDirective(getCurScope());
return nullptr;
}
@@ -627,7 +623,8 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
if (getLangOpts().CPlusPlus11 && Context == DeclaratorContext::Member &&
Tok.is(tok::identifier) &&
(NextToken().is(tok::semi) || NextToken().is(tok::comma) ||
- NextToken().is(tok::ellipsis)) &&
+ NextToken().is(tok::ellipsis) || NextToken().is(tok::l_square) ||
+ NextToken().is(tok::kw___attribute)) &&
D.SS.isNotEmpty() && LastII == Tok.getIdentifierInfo() &&
!D.SS.getScopeRep()->getAsNamespace() &&
!D.SS.getScopeRep()->getAsNamespaceAlias()) {
@@ -670,11 +667,48 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
/// alias-declaration: C++11 [dcl.dcl]p1
/// 'using' identifier attribute-specifier-seq[opt] = type-id ;
///
+/// using-enum-declaration: [C++20, dcl.enum]
+/// 'using' elaborated-enum-specifier ;
+///
+/// elaborated-enum-specifier:
+/// 'enum' nested-name-specifier[opt] identifier
Parser::DeclGroupPtrTy
-Parser::ParseUsingDeclaration(DeclaratorContext Context,
- const ParsedTemplateInfo &TemplateInfo,
- SourceLocation UsingLoc, SourceLocation &DeclEnd,
- AccessSpecifier AS) {
+Parser::ParseUsingDeclaration(
+ DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc, SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &PrefixAttrs, AccessSpecifier AS) {
+ SourceLocation UELoc;
+ if (TryConsumeToken(tok::kw_enum, UELoc)) {
+ // C++20 using-enum
+ Diag(UELoc, getLangOpts().CPlusPlus20
+ ? diag::warn_cxx17_compat_using_enum_declaration
+ : diag::ext_using_enum_declaration);
+
+ DiagnoseCXX11AttributeExtension(PrefixAttrs);
+
+ DeclSpec DS(AttrFactory);
+ ParseEnumSpecifier(UELoc, DS, TemplateInfo, AS,
+ // DSC_trailing has the semantics we desire
+ DeclSpecContext::DSC_trailing);
+
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive_declaration)
+ << 1 /* declaration */ << R << FixItHint::CreateRemoval(R);
+
+ return nullptr;
+ }
+
+ Decl *UED = Actions.ActOnUsingEnumDeclaration(getCurScope(), AS, UsingLoc,
+ UELoc, DS);
+ DeclEnd = Tok.getLocation();
+ if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ "using-enum declaration"))
+ SkipUntil(tok::semi);
+
+ return Actions.ConvertDeclToDeclGroup(UED);
+ }
+
// Check for misplaced attributes before the identifier in an
// alias-declaration.
ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
@@ -684,8 +718,18 @@ Parser::ParseUsingDeclaration(DeclaratorContext Context,
bool InvalidDeclarator = ParseUsingDeclarator(Context, D);
ParsedAttributesWithRange Attrs(AttrFactory);
- MaybeParseGNUAttributes(Attrs);
- MaybeParseCXX11Attributes(Attrs);
+ MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs);
+
+ // If we had any misplaced attributes from earlier, this is where they
+ // should have been written.
+ if (MisplacedAttrs.Range.isValid()) {
+ Diag(MisplacedAttrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ << FixItHint::CreateInsertionFromRange(
+ Tok.getLocation(),
+ CharSourceRange::getTokenRange(MisplacedAttrs.Range))
+ << FixItHint::CreateRemoval(MisplacedAttrs.Range);
+ Attrs.takeAllFrom(MisplacedAttrs);
+ }
// Maybe this is an alias-declaration.
if (Tok.is(tok::equal)) {
@@ -694,16 +738,7 @@ Parser::ParseUsingDeclaration(DeclaratorContext Context,
return nullptr;
}
- // If we had any misplaced attributes from earlier, this is where they
- // should have been written.
- if (MisplacedAttrs.Range.isValid()) {
- Diag(MisplacedAttrs.Range.getBegin(), diag::err_attributes_not_allowed)
- << FixItHint::CreateInsertionFromRange(
- Tok.getLocation(),
- CharSourceRange::getTokenRange(MisplacedAttrs.Range))
- << FixItHint::CreateRemoval(MisplacedAttrs.Range);
- Attrs.takeAllFrom(MisplacedAttrs);
- }
+ ProhibitAttributes(PrefixAttrs);
Decl *DeclFromDeclSpec = nullptr;
Decl *AD = ParseAliasDeclarationAfterDeclarator(
@@ -711,10 +746,7 @@ Parser::ParseUsingDeclaration(DeclaratorContext Context,
return Actions.ConvertDeclToDeclGroup(AD, DeclFromDeclSpec);
}
- // C++11 attributes are not allowed on a using-declaration, but GNU ones
- // are.
- ProhibitAttributes(MisplacedAttrs);
- ProhibitAttributes(Attrs);
+ DiagnoseCXX11AttributeExtension(PrefixAttrs);
// Diagnose an attempt to declare a templated using-declaration.
// In C++11, alias-declarations can be templates:
@@ -732,8 +764,10 @@ Parser::ParseUsingDeclaration(DeclaratorContext Context,
SmallVector<Decl *, 8> DeclsInGroup;
while (true) {
- // Parse (optional) attributes (most likely GNU strong-using extension).
- MaybeParseGNUAttributes(Attrs);
+ // Parse (optional) attributes.
+ MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs);
+ DiagnoseCXX11AttributeExtension(Attrs);
+ Attrs.addAll(PrefixAttrs.begin(), PrefixAttrs.end());
if (InvalidDeclarator)
SkipUntil(tok::comma, tok::semi, StopBeforeMatch);
@@ -772,8 +806,9 @@ Parser::ParseUsingDeclaration(DeclaratorContext Context,
// Eat ';'.
DeclEnd = Tok.getLocation();
if (ExpectAndConsume(tok::semi, diag::err_expected_after,
- !Attrs.empty() ? "attributes list"
- : "using declaration"))
+ !Attrs.empty() ? "attributes list"
+ : UELoc.isValid() ? "using-enum declaration"
+ : "using declaration"))
SkipUntil(tok::semi);
return Actions.BuildDeclaratorGroup(DeclsInGroup);
@@ -857,6 +892,16 @@ Decl *Parser::ParseAliasDeclarationAfterDeclarator(
DeclFromDeclSpec);
}
+static FixItHint getStaticAssertNoMessageFixIt(const Expr *AssertExpr,
+ SourceLocation EndExprLoc) {
+ if (const auto *BO = dyn_cast_or_null<BinaryOperator>(AssertExpr)) {
+ if (BO->getOpcode() == BO_LAnd &&
+ isa<StringLiteral>(BO->getRHS()->IgnoreImpCasts()))
+ return FixItHint::CreateReplacement(BO->getOperatorLoc(), ",");
+ }
+ return FixItHint::CreateInsertion(EndExprLoc, ", \"\"");
+}
+
/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
///
/// [C++0x] static_assert-declaration:
@@ -871,8 +916,13 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
Diag(Tok, diag::ext_c11_feature) << Tok.getName();
- if (Tok.is(tok::kw_static_assert))
- Diag(Tok, diag::warn_cxx98_compat_static_assert);
+ if (Tok.is(tok::kw_static_assert)) {
+ if (!getLangOpts().CPlusPlus)
+ Diag(Tok, diag::ext_ms_static_assert)
+ << FixItHint::CreateReplacement(Tok.getLocation(), "_Static_assert");
+ else
+ Diag(Tok, diag::warn_cxx98_compat_static_assert);
+ }
SourceLocation StaticAssertLoc = ConsumeToken();
@@ -893,12 +943,17 @@ Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
ExprResult AssertMessage;
if (Tok.is(tok::r_paren)) {
- Diag(Tok, getLangOpts().CPlusPlus17
- ? diag::warn_cxx14_compat_static_assert_no_message
- : diag::ext_static_assert_no_message)
- << (getLangOpts().CPlusPlus17
- ? FixItHint()
- : FixItHint::CreateInsertion(Tok.getLocation(), ", \"\""));
+ unsigned DiagVal;
+ if (getLangOpts().CPlusPlus17)
+ DiagVal = diag::warn_cxx14_compat_static_assert_no_message;
+ else if (getLangOpts().CPlusPlus)
+ DiagVal = diag::ext_cxx_static_assert_no_message;
+ else if (getLangOpts().C2x)
+ DiagVal = diag::warn_c17_compat_static_assert_no_message;
+ else
+ DiagVal = diag::ext_c_static_assert_no_message;
+ Diag(Tok, DiagVal) << getStaticAssertNoMessageFixIt(AssertExpr.get(),
+ Tok.getLocation());
} else {
if (ExpectAndConsume(tok::comma)) {
SkipUntil(tok::semi);
@@ -1414,8 +1469,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (Tok.is(tok::code_completion)) {
// Code completion for a struct, class, or union name.
+ cutOffParsing();
Actions.CodeCompleteTag(getCurScope(), TagType);
- return cutOffParsing();
+ return;
}
// C++03 [temp.explicit] 14.7.2/8:
@@ -1435,8 +1491,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
ParsedAttributesWithRange attrs(AttrFactory);
// If attributes exist after tag, parse them.
- MaybeParseGNUAttributes(attrs);
- MaybeParseMicrosoftDeclSpecs(attrs);
+ MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
// Parse inheritance specifiers.
if (Tok.isOneOf(tok::kw___single_inheritance,
@@ -1444,10 +1499,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
tok::kw___virtual_inheritance))
ParseMicrosoftInheritanceClassAttributes(attrs);
- // If C++0x attributes exist here, parse them.
- // FIXME: Are we consistent with the ordering of parsing of different
- // styles of attributes?
- MaybeParseCXX11Attributes(attrs);
+ // Allow attributes to precede or succeed the inheritance specifiers.
+ MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
// Source location used by FIXIT to insert misplaced
// C++11 attributes
@@ -1693,7 +1746,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
- (isCXX11FinalKeyword() &&
+ (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
if (DS.isFriendSpecified()) {
// C++ [class.friend]p2:
@@ -1709,14 +1762,18 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Okay, this is a class definition.
TUK = Sema::TUK_Definition;
}
- } else if (isCXX11FinalKeyword() && (NextToken().is(tok::l_square) ||
- NextToken().is(tok::kw_alignas))) {
+ } else if (isClassCompatibleKeyword() &&
+ (NextToken().is(tok::l_square) ||
+ NextToken().is(tok::kw_alignas) ||
+ isCXX11VirtSpecifier(NextToken()) != VirtSpecifiers::VS_None)) {
// We can't tell if this is a definition or reference
// until we skipped the 'final' and C++11 attribute specifiers.
TentativeParsingAction PA(*this);
- // Skip the 'final' keyword.
- ConsumeToken();
+ // Skip the 'final', abstract'... keywords.
+ while (isClassCompatibleKeyword()) {
+ ConsumeToken();
+ }
// Skip C++11 attribute specifiers.
while (true) {
@@ -1819,7 +1876,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
TUK == Sema::TUK_Declaration) {
// This is an explicit instantiation of a class template.
- ProhibitAttributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ /*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnExplicitInstantiation(
getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
@@ -1834,7 +1892,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
} else if (TUK == Sema::TUK_Reference ||
(TUK == Sema::TUK_Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
- ProhibitAttributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ /*DiagnoseEmptyAttrs=*/true);
TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
SS,
TemplateId->TemplateKWLoc,
@@ -1906,7 +1965,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TagType, StartLoc, SS, Name, NameLoc, attrs);
} else if (TUK == Sema::TUK_Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
- ProhibitAttributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ /*DiagnoseEmptyAttrs=*/true);
TagOrTempResult = Actions.ActOnTemplatedFriendTag(
getCurScope(), DS.getFriendSpecLoc(), TagType, StartLoc, SS, Name,
@@ -1915,7 +1975,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TemplateParams ? TemplateParams->size() : 0));
} else {
if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
- ProhibitAttributes(attrs);
+ ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
+ /* DiagnoseEmptyAttrs=*/true);
if (TUK == Sema::TUK_Definition &&
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
@@ -1961,7 +2022,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TUK == Sema::TUK_Definition) {
assert(Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
- isCXX11FinalKeyword());
+ isClassCompatibleKeyword());
if (SkipBody.ShouldSkip)
SkipCXXMemberSpecification(StartLoc, AttrFixitLoc, TagType,
TagOrTempResult.get());
@@ -2228,8 +2289,10 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
Ident_final = &PP.getIdentifierTable().get("final");
if (getLangOpts().GNUKeywords)
Ident_GNU_final = &PP.getIdentifierTable().get("__final");
- if (getLangOpts().MicrosoftExt)
+ if (getLangOpts().MicrosoftExt) {
Ident_sealed = &PP.getIdentifierTable().get("sealed");
+ Ident_abstract = &PP.getIdentifierTable().get("abstract");
+ }
Ident_override = &PP.getIdentifierTable().get("override");
}
@@ -2239,6 +2302,9 @@ VirtSpecifiers::Specifier Parser::isCXX11VirtSpecifier(const Token &Tok) const {
if (II == Ident_sealed)
return VirtSpecifiers::VS_Sealed;
+ if (II == Ident_abstract)
+ return VirtSpecifiers::VS_Abstract;
+
if (II == Ident_final)
return VirtSpecifiers::VS_Final;
@@ -2284,6 +2350,8 @@ void Parser::ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS,
<< VirtSpecifiers::getSpecifierName(Specifier);
} else if (Specifier == VirtSpecifiers::VS_Sealed) {
Diag(Tok.getLocation(), diag::ext_ms_sealed_keyword);
+ } else if (Specifier == VirtSpecifiers::VS_Abstract) {
+ Diag(Tok.getLocation(), diag::ext_ms_abstract_keyword);
} else if (Specifier == VirtSpecifiers::VS_GNU_Final) {
Diag(Tok.getLocation(), diag::ext_warn_gnu_final);
} else {
@@ -2306,6 +2374,16 @@ bool Parser::isCXX11FinalKeyword() const {
Specifier == VirtSpecifiers::VS_Sealed;
}
+/// isClassCompatibleKeyword - Determine whether the next token is a C++11
+/// 'final' or Microsoft 'sealed' or 'abstract' contextual keywords.
+bool Parser::isClassCompatibleKeyword() const {
+ VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier();
+ return Specifier == VirtSpecifiers::VS_Final ||
+ Specifier == VirtSpecifiers::VS_GNU_Final ||
+ Specifier == VirtSpecifiers::VS_Sealed ||
+ Specifier == VirtSpecifiers::VS_Abstract;
+}
+
/// Parse a C++ member-declarator up to, but not including, the optional
/// brace-or-equal-initializer or pure-specifier.
bool Parser::ParseCXXMemberDeclaratorBeforeInitializer(
@@ -2589,6 +2667,13 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
ParsedAttributesViewWithRange FnAttrs;
// Optional C++11 attribute-specifier
MaybeParseCXX11Attributes(attrs);
+
+ // The next token may be an OpenMP pragma annotation token. That would
+ // normally be handled from ParseCXXClassMemberDeclarationWithPragmas, but in
+ // this case, it came from an *attribute* rather than a pragma. Handle it now.
+ if (Tok.is(tok::annot_attr_openmp))
+ return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
+
// We need to keep these attributes for future diagnostic
// before they are taken over by declaration specifier.
FnAttrs.addAll(attrs.begin(), attrs.end());
@@ -2597,8 +2682,6 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
MaybeParseMicrosoftAttributes(attrs);
if (Tok.is(tok::kw_using)) {
- ProhibitAttributes(attrs);
-
// Eat 'using'.
SourceLocation UsingLoc = ConsumeToken();
@@ -2617,7 +2700,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
return ParseUsingDeclaration(DeclaratorContext::Member, TemplateInfo,
- UsingLoc, DeclEnd, AS);
+ UsingLoc, DeclEnd, attrs, AS);
}
// Hold late-parsed attributes so we can attach a Decl to them later.
@@ -2733,8 +2816,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
else if (KW.is(tok::kw_delete))
DefinitionKind = FunctionDefinitionKind::Deleted;
else if (KW.is(tok::code_completion)) {
- Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
cutOffParsing();
+ Actions.CodeCompleteAfterFunctionEquals(DeclaratorInfo);
return nullptr;
}
}
@@ -2872,8 +2955,13 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
HasStaticInitializer = true;
}
+ if (PureSpecLoc.isValid() && VS.getAbstractLoc().isValid()) {
+ Diag(PureSpecLoc, diag::err_duplicate_virt_specifier) << "abstract";
+ }
if (ThisDecl && PureSpecLoc.isValid())
Actions.ActOnPureSpecifier(ThisDecl, PureSpecLoc);
+ else if (ThisDecl && VS.getAbstractLoc().isValid())
+ Actions.ActOnPureSpecifier(ThisDecl, VS.getAbstractLoc());
// Handle the initializer.
if (HasInClassInit != ICIS_NoInit) {
@@ -3180,6 +3268,7 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
return nullptr;
}
+ case tok::annot_attr_openmp:
case tok::annot_pragma_openmp:
return ParseOpenMPDeclarativeDirectiveWithExtDecl(
AS, AccessAttrs, /*Delayed=*/true, TagType, TagDecl);
@@ -3258,30 +3347,53 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
SourceLocation FinalLoc;
+ SourceLocation AbstractLoc;
bool IsFinalSpelledSealed = false;
+ bool IsAbstract = false;
// Parse the optional 'final' keyword.
if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
- VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier(Tok);
- assert((Specifier == VirtSpecifiers::VS_Final ||
- Specifier == VirtSpecifiers::VS_GNU_Final ||
- Specifier == VirtSpecifiers::VS_Sealed) &&
+ while (true) {
+ VirtSpecifiers::Specifier Specifier = isCXX11VirtSpecifier(Tok);
+ if (Specifier == VirtSpecifiers::VS_None)
+ break;
+ if (isCXX11FinalKeyword()) {
+ if (FinalLoc.isValid()) {
+ auto Skipped = ConsumeToken();
+ Diag(Skipped, diag::err_duplicate_class_virt_specifier)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ } else {
+ FinalLoc = ConsumeToken();
+ if (Specifier == VirtSpecifiers::VS_Sealed)
+ IsFinalSpelledSealed = true;
+ }
+ } else {
+ if (AbstractLoc.isValid()) {
+ auto Skipped = ConsumeToken();
+ Diag(Skipped, diag::err_duplicate_class_virt_specifier)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ } else {
+ AbstractLoc = ConsumeToken();
+ IsAbstract = true;
+ }
+ }
+ if (TagType == DeclSpec::TST_interface)
+ Diag(FinalLoc, diag::err_override_control_interface)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ else if (Specifier == VirtSpecifiers::VS_Final)
+ Diag(FinalLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_override_control_keyword
+ : diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ else if (Specifier == VirtSpecifiers::VS_Sealed)
+ Diag(FinalLoc, diag::ext_ms_sealed_keyword);
+ else if (Specifier == VirtSpecifiers::VS_Abstract)
+ Diag(AbstractLoc, diag::ext_ms_abstract_keyword);
+ else if (Specifier == VirtSpecifiers::VS_GNU_Final)
+ Diag(FinalLoc, diag::ext_warn_gnu_final);
+ }
+ assert((FinalLoc.isValid() || AbstractLoc.isValid()) &&
"not a class definition");
- FinalLoc = ConsumeToken();
- IsFinalSpelledSealed = Specifier == VirtSpecifiers::VS_Sealed;
-
- if (TagType == DeclSpec::TST_interface)
- Diag(FinalLoc, diag::err_override_control_interface)
- << VirtSpecifiers::getSpecifierName(Specifier);
- else if (Specifier == VirtSpecifiers::VS_Final)
- Diag(FinalLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_override_control_keyword
- : diag::ext_override_control_keyword)
- << VirtSpecifiers::getSpecifierName(Specifier);
- else if (Specifier == VirtSpecifiers::VS_Sealed)
- Diag(FinalLoc, diag::ext_ms_sealed_keyword);
- else if (Specifier == VirtSpecifiers::VS_GNU_Final)
- Diag(FinalLoc, diag::ext_warn_gnu_final);
// Parse any C++11 attributes after 'final' keyword.
// These attributes are not allowed to appear here,
@@ -3354,7 +3466,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
if (TagDecl)
Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, FinalLoc,
- IsFinalSpelledSealed,
+ IsFinalSpelledSealed, IsAbstract,
T.getOpenLocation());
// C++ 11p3: Members of a class defined with the keyword class are private
@@ -3401,15 +3513,6 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// declarations and the lexed inline method definitions, along with any
// delayed attributes.
- // Save the state of Sema.FPFeatures, and change the setting
- // to the levels specified on the command line. Previous level
- // will be restored when the RAII object is destroyed.
- Sema::FPFeaturesStateRAII SaveFPFeaturesState(Actions);
- FPOptionsOverride NewOverrides;
- Actions.CurFPFeatures = NewOverrides.applyOverrides(getLangOpts());
- Actions.FpPragmaStack.Act(Tok.getLocation(), Sema::PSK_Reset, StringRef(),
- {} /*unused*/);
-
SourceLocation SavedPrevTokLocation = PrevTokLocation;
ParseLexedPragmas(getCurrentClass());
ParseLexedAttributes(getCurrentClass());
@@ -3491,9 +3594,10 @@ void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
do {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteConstructorInitializer(ConstructorDecl,
MemInitializers);
- return cutOffParsing();
+ return;
}
MemInitResult MemInit = ParseMemInitializer(ConstructorDecl);
@@ -4039,6 +4143,70 @@ IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
}
}
+void Parser::ParseOpenMPAttributeArgs(IdentifierInfo *AttrName,
+ CachedTokens &OpenMPTokens) {
+ // Both 'sequence' and 'directive' attributes require arguments, so parse the
+ // open paren for the argument list.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return;
+ }
+
+ if (AttrName->isStr("directive")) {
+ // If the attribute is named `directive`, we can consume its argument list
+ // and push the tokens from it into the cached token stream for a new OpenMP
+ // pragma directive.
+ Token OMPBeginTok;
+ OMPBeginTok.startToken();
+ OMPBeginTok.setKind(tok::annot_attr_openmp);
+ OMPBeginTok.setLocation(Tok.getLocation());
+ OpenMPTokens.push_back(OMPBeginTok);
+
+ ConsumeAndStoreUntil(tok::r_paren, OpenMPTokens, /*StopAtSemi=*/false,
+ /*ConsumeFinalToken*/ false);
+ Token OMPEndTok;
+ OMPEndTok.startToken();
+ OMPEndTok.setKind(tok::annot_pragma_openmp_end);
+ OMPEndTok.setLocation(Tok.getLocation());
+ OpenMPTokens.push_back(OMPEndTok);
+ } else {
+ assert(AttrName->isStr("sequence") &&
+ "Expected either 'directive' or 'sequence'");
+ // If the attribute is named 'sequence', its argument is a list of one or
+ // more OpenMP attributes (either 'omp::directive' or 'omp::sequence',
+ // where the 'omp::' is optional).
+ do {
+ // We expect to see one of the following:
+ // * An identifier (omp) for the attribute namespace followed by ::
+ // * An identifier (directive) or an identifier (sequence).
+ SourceLocation IdentLoc;
+ IdentifierInfo *Ident = TryParseCXX11AttributeIdentifier(IdentLoc);
+
+ // If there is an identifier and it is 'omp', a double colon is required
+ // followed by the actual identifier we're after.
+ if (Ident && Ident->isStr("omp") && !ExpectAndConsume(tok::coloncolon))
+ Ident = TryParseCXX11AttributeIdentifier(IdentLoc);
+
+ // If we failed to find an identifier (scoped or otherwise), or we found
+ // an unexpected identifier, diagnose.
+ if (!Ident || (!Ident->isStr("directive") && !Ident->isStr("sequence"))) {
+ Diag(Tok.getLocation(), diag::err_expected_sequence_or_directive);
+ SkipUntil(tok::r_paren, StopBeforeMatch);
+ continue;
+ }
+ // We read an identifier. If the identifier is one of the ones we
+ // expected, we can recurse to parse the args.
+ ParseOpenMPAttributeArgs(Ident, OpenMPTokens);
+
+ // There may be a comma to signal that we expect another directive in the
+ // sequence.
+ } while (TryConsumeToken(tok::comma));
+ }
+ // Parse the closing paren for the argument list.
+ T.consumeClose();
+}
+
static bool IsBuiltInOrStandardCXX11Attribute(IdentifierInfo *AttrName,
IdentifierInfo *ScopeName) {
switch (
@@ -4079,7 +4247,8 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc) {
+ SourceLocation ScopeLoc,
+ CachedTokens &OpenMPTokens) {
assert(Tok.is(tok::l_paren) && "Not a C++11 attribute argument list");
SourceLocation LParenLoc = Tok.getLocation();
const LangOptions &LO = getLangOpts();
@@ -4104,6 +4273,18 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
return true;
}
+ if (ScopeName && ScopeName->isStr("omp")) {
+ Diag(AttrNameLoc, getLangOpts().OpenMP >= 51
+ ? diag::warn_omp51_compat_attributes
+ : diag::ext_omp_attributes);
+
+ ParseOpenMPAttributeArgs(AttrName, OpenMPTokens);
+
+ // We claim that an attribute was parsed and added so that one is not
+ // created for us by the caller.
+ return true;
+ }
+
unsigned NumArgs;
// Some Clang-scoped attributes have some special parsing behavior.
if (ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang")))
@@ -4163,11 +4344,12 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
///
/// [C++11] attribute-namespace:
/// identifier
-void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
- SourceLocation *endLoc) {
+void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
+ CachedTokens &OpenMPTokens,
+ SourceLocation *EndLoc) {
if (Tok.is(tok::kw_alignas)) {
Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
- ParseAlignmentSpecifier(attrs, endLoc);
+ ParseAlignmentSpecifier(Attrs, EndLoc);
return;
}
@@ -4200,10 +4382,21 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
llvm::SmallDenseMap<IdentifierInfo*, SourceLocation, 4> SeenAttrs;
- while (Tok.isNot(tok::r_square)) {
- // attribute not present
- if (TryConsumeToken(tok::comma))
- continue;
+ bool AttrParsed = false;
+ while (!Tok.isOneOf(tok::r_square, tok::semi)) {
+ if (AttrParsed) {
+ // If we parsed an attribute, a comma is required before parsing any
+ // additional attributes.
+ if (ExpectAndConsume(tok::comma)) {
+ SkipUntil(tok::r_square, StopAtSemi | StopBeforeMatch);
+ continue;
+ }
+ AttrParsed = false;
+ }
+
+ // Eat all remaining superfluous commas before parsing the next attribute.
+ while (TryConsumeToken(tok::comma))
+ ;
SourceLocation ScopeLoc, AttrLoc;
IdentifierInfo *ScopeName = nullptr, *AttrName = nullptr;
@@ -4236,38 +4429,39 @@ void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
}
}
- bool StandardAttr = IsBuiltInOrStandardCXX11Attribute(AttrName, ScopeName);
- bool AttrParsed = false;
-
- if (StandardAttr &&
- !SeenAttrs.insert(std::make_pair(AttrName, AttrLoc)).second)
- Diag(AttrLoc, diag::err_cxx11_attribute_repeated)
- << AttrName << SourceRange(SeenAttrs[AttrName]);
-
// Parse attribute arguments
if (Tok.is(tok::l_paren))
- AttrParsed = ParseCXX11AttributeArgs(AttrName, AttrLoc, attrs, endLoc,
- ScopeName, ScopeLoc);
+ AttrParsed = ParseCXX11AttributeArgs(AttrName, AttrLoc, Attrs, EndLoc,
+ ScopeName, ScopeLoc, OpenMPTokens);
- if (!AttrParsed)
- attrs.addNew(
+ if (!AttrParsed) {
+ Attrs.addNew(
AttrName,
SourceRange(ScopeLoc.isValid() ? ScopeLoc : AttrLoc, AttrLoc),
ScopeName, ScopeLoc, nullptr, 0,
getLangOpts().CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x);
+ AttrParsed = true;
+ }
if (TryConsumeToken(tok::ellipsis))
Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
<< AttrName;
}
+ // If we hit an error and recovered by parsing up to a semicolon, eat the
+ // semicolon and don't issue further diagnostics about missing brackets.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ return;
+ }
+
SourceLocation CloseLoc = Tok.getLocation();
if (ExpectAndConsume(tok::r_square))
SkipUntil(tok::r_square);
else if (Tok.is(tok::r_square))
checkCompoundToken(CloseLoc, tok::r_square, CompoundToken::AttrEnd);
- if (endLoc)
- *endLoc = Tok.getLocation();
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
if (ExpectAndConsume(tok::r_square))
SkipUntil(tok::r_square);
}
diff --git a/clang/lib/Parse/ParseExpr.cpp b/clang/lib/Parse/ParseExpr.cpp
index 6acf76d713fd..22f3b7624c45 100644
--- a/clang/lib/Parse/ParseExpr.cpp
+++ b/clang/lib/Parse/ParseExpr.cpp
@@ -159,9 +159,9 @@ Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
/// Parse an expr that doesn't include (top-level) commas.
ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteExpression(getCurScope(),
PreferredType.get(Tok.getLocation()));
- cutOffParsing();
return ExprError();
}
@@ -1156,9 +1156,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
ConsumeToken();
if (Tok.is(tok::code_completion) && &II != Ident_super) {
+ cutOffParsing();
Actions.CodeCompleteObjCClassPropertyRefExpr(
getCurScope(), II, ILoc, ExprStatementTokLoc == ILoc);
- cutOffParsing();
return ExprError();
}
// Allow either an identifier or the keyword 'class' (in C++).
@@ -1469,6 +1469,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw_this:
Res = ParseCXXThis();
break;
+ case tok::kw___builtin_sycl_unique_stable_name:
+ Res = ParseSYCLUniqueStableNameExpression();
+ break;
case tok::annot_typename:
if (isStartOfObjCClassMessageMissingOpenBracket()) {
@@ -1724,9 +1727,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
Res = ParseBlockLiteralExpression();
break;
case tok::code_completion: {
+ cutOffParsing();
Actions.CodeCompleteExpression(getCurScope(),
PreferredType.get(Tok.getLocation()));
- cutOffParsing();
return ExprError();
}
case tok::l_square:
@@ -1807,7 +1810,9 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
// These can be followed by postfix-expr pieces.
PreferredType = SavedType;
Res = ParsePostfixExpressionSuffix(Res);
- if (getLangOpts().OpenCL)
+ if (getLangOpts().OpenCL &&
+ !getActions().getOpenCLOptions().isAvailableOption(
+ "__cl_clang_function_pointers", getLangOpts()))
if (Expr *PostfixExpr = Res.get()) {
QualType Ty = PostfixExpr->getType();
if (!Ty.isNull() && Ty->isFunctionType()) {
@@ -1854,9 +1859,9 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
if (InMessageExpression)
return LHS;
+ cutOffParsing();
Actions.CodeCompletePostfixExpression(
getCurScope(), LHS, PreferredType.get(Tok.getLocation()));
- cutOffParsing();
return ExprError();
case tok::identifier:
@@ -2138,12 +2143,12 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
CorrectedBase = Base;
// Code completion for a member access expression.
+ cutOffParsing();
Actions.CodeCompleteMemberReferenceExpr(
getCurScope(), Base, CorrectedBase, OpLoc, OpKind == tok::arrow,
Base && ExprStatementTokLoc == Base->getBeginLoc(),
PreferredType.get(Tok.getLocation()));
- cutOffParsing();
return ExprError();
}
@@ -2322,6 +2327,33 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
return Operand;
}
+/// Parse a __builtin_sycl_unique_stable_name expression. Accepts a type-id as
+/// a parameter.
+ExprResult Parser::ParseSYCLUniqueStableNameExpression() {
+ assert(Tok.is(tok::kw___builtin_sycl_unique_stable_name) &&
+ "Not __bulitin_sycl_unique_stable_name");
+
+ SourceLocation OpLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // __builtin_sycl_unique_stable_name expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "__builtin_sycl_unique_stable_name"))
+ return ExprError();
+
+ TypeResult Ty = ParseTypeName();
+
+ if (Ty.isInvalid()) {
+ T.skipToEnd();
+ return ExprError();
+ }
+
+ if (T.consumeClose())
+ return ExprError();
+
+ return Actions.ActOnSYCLUniqueStableNameExpr(OpLoc, T.getOpenLocation(),
+ T.getCloseLocation(), Ty.get());
+}
/// Parse a sizeof or alignof expression.
///
@@ -2776,10 +2808,10 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
CastTy = nullptr;
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteExpression(
getCurScope(), PreferredType.get(Tok.getLocation()),
/*IsParenthesized=*/ExprType >= CompoundLiteral);
- cutOffParsing();
return ExprError();
}
@@ -3410,8 +3442,9 @@ Parser::ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
/// \endverbatim
void Parser::ParseBlockId(SourceLocation CaretLoc) {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
- return cutOffParsing();
+ return;
}
// Parse the specifier-qualifier-list piece.
@@ -3596,8 +3629,8 @@ Optional<AvailabilitySpec> Parser::ParseAvailabilitySpec() {
} else {
// Parse the platform name.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteAvailabilityPlatformName();
cutOffParsing();
+ Actions.CodeCompleteAvailabilityPlatformName();
return None;
}
if (Tok.isNot(tok::identifier)) {
diff --git a/clang/lib/Parse/ParseExprCXX.cpp b/clang/lib/Parse/ParseExprCXX.cpp
index 4b5703d79f28..f3d10b4a0889 100644
--- a/clang/lib/Parse/ParseExprCXX.cpp
+++ b/clang/lib/Parse/ParseExprCXX.cpp
@@ -9,7 +9,6 @@
// This file implements the Expression parsing implementation for C++.
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclTemplate.h"
@@ -17,6 +16,7 @@
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -235,6 +235,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
while (true) {
if (HasScopeSpecifier) {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
// Code completion for a nested-name-specifier, where the code
// completion token follows the '::'.
Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext,
@@ -245,7 +246,6 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
// token will cause assertion in
// Preprocessor::AnnotatePreviousCachedTokens.
SS.setEndLoc(Tok.getLocation());
- cutOffParsing();
return true;
}
@@ -688,9 +688,9 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
/// ParseLambdaExpression - Parse a C++11 lambda expression.
///
/// lambda-expression:
-/// lambda-introducer lambda-declarator[opt] compound-statement
+/// lambda-introducer lambda-declarator compound-statement
/// lambda-introducer '<' template-parameter-list '>'
-/// lambda-declarator[opt] compound-statement
+/// requires-clause[opt] lambda-declarator compound-statement
///
/// lambda-introducer:
/// '[' lambda-capture[opt] ']'
@@ -722,9 +722,13 @@ ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
/// '&' identifier initializer
///
/// lambda-declarator:
-/// '(' parameter-declaration-clause ')' attribute-specifier[opt]
-/// 'mutable'[opt] exception-specification[opt]
-/// trailing-return-type[opt]
+/// lambda-specifiers [C++2b]
+/// '(' parameter-declaration-clause ')' lambda-specifiers
+/// requires-clause[opt]
+///
+/// lambda-specifiers:
+/// decl-specifier-seq[opt] noexcept-specifier[opt]
+/// attribute-specifier-seq[opt] trailing-return-type[opt]
///
ExprResult Parser::ParseLambdaExpression() {
// Parse lambda-introducer.
@@ -877,9 +881,9 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// expression parser perform the completion.
if (Tok.is(tok::code_completion) &&
!(getLangOpts().ObjC && Tentative)) {
+ cutOffParsing();
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
- cutOffParsing();
break;
}
@@ -891,6 +895,7 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
}
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
// If we're in Objective-C++ and we have a bare '[', then this is more
// likely to be a message receiver.
if (getLangOpts().ObjC && Tentative && First)
@@ -898,7 +903,6 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
else
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/false);
- cutOffParsing();
break;
}
@@ -943,9 +947,9 @@ bool Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
ConsumeToken();
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
/*AfterAmpersand=*/true);
- cutOffParsing();
break;
}
}
@@ -1249,7 +1253,6 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
Actions.PushLambdaScope();
ParsedAttributes Attr(AttrFactory);
- SourceLocation DeclLoc = Tok.getLocation();
if (getLangOpts().CUDA) {
// In CUDA code, GNU attributes are allowed to appear immediately after the
// "[...]", even if there is no "(...)" before the lambda body.
@@ -1302,13 +1305,99 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
}
+ // Implement WG21 P2173, which allows attributes immediately before the
+ // lambda declarator and applies them to the corresponding function operator
+ // or operator template declaration. We accept this as a conforming extension
+ // in all language modes that support lambdas.
+ if (isCXX11AttributeSpecifier()) {
+ Diag(Tok, getLangOpts().CPlusPlus2b
+ ? diag::warn_cxx20_compat_decl_attrs_on_lambda
+ : diag::ext_decl_attrs_on_lambda);
+ MaybeParseCXX11Attributes(D);
+ }
+
TypeResult TrailingReturnType;
SourceLocation TrailingReturnTypeLoc;
+
+ auto ParseLambdaSpecifiers =
+ [&](SourceLocation LParenLoc, SourceLocation RParenLoc,
+ MutableArrayRef<DeclaratorChunk::ParamInfo> ParamInfo,
+ SourceLocation EllipsisLoc) {
+ SourceLocation DeclEndLoc = RParenLoc;
+
+ // GNU-style attributes must be parsed before the mutable specifier to
+ // be compatible with GCC. MSVC-style attributes must be parsed before
+ // the mutable specifier to be compatible with MSVC.
+ MaybeParseAttributes(PAKM_GNU | PAKM_Declspec, Attr);
+
+ // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update
+ // the DeclEndLoc.
+ SourceLocation MutableLoc;
+ SourceLocation ConstexprLoc;
+ SourceLocation ConstevalLoc;
+ tryConsumeLambdaSpecifierToken(*this, MutableLoc, ConstexprLoc,
+ ConstevalLoc, DeclEndLoc);
+
+ addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
+ addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
+ // Parse exception-specification[opt].
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ CachedTokens *ExceptionSpecTokens;
+ ESpecType = tryParseExceptionSpecification(
+ /*Delayed=*/false, ESpecRange, DynamicExceptions,
+ DynamicExceptionRanges, NoexceptExpr, ExceptionSpecTokens);
+
+ if (ESpecType != EST_None)
+ DeclEndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier[opt].
+ MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
+
+ // Parse OpenCL addr space attribute.
+ if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
+ tok::kw___constant, tok::kw___generic)) {
+ ParseOpenCLQualifiers(DS.getAttributes());
+ ConsumeToken();
+ }
+
+ SourceLocation FunLocalRangeEnd = DeclEndLoc;
+
+ // Parse trailing-return-type[opt].
+ if (Tok.is(tok::arrow)) {
+ FunLocalRangeEnd = Tok.getLocation();
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(
+ Range, /*MayBeFollowedByDirectInit*/ false);
+ TrailingReturnTypeLoc = Range.getBegin();
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ SourceLocation NoLoc;
+ D.AddTypeInfo(
+ DeclaratorChunk::getFunction(
+ /*HasProto=*/true,
+ /*IsAmbiguous=*/false, LParenLoc, ParamInfo.data(),
+ ParamInfo.size(), EllipsisLoc, RParenLoc,
+ /*RefQualifierIsLvalueRef=*/true,
+ /*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType, ESpecRange,
+ DynamicExceptions.data(), DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
+ /*ExceptionSpecTokens*/ nullptr,
+ /*DeclsInPrototype=*/None, LParenLoc, FunLocalRangeEnd, D,
+ TrailingReturnType, TrailingReturnTypeLoc, &DS),
+ std::move(Attr), DeclEndLoc);
+ };
+
if (Tok.is(tok::l_paren)) {
- ParseScope PrototypeScope(this,
- Scope::FunctionPrototypeScope |
- Scope::FunctionDeclarationScope |
- Scope::DeclScope);
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope |
+ Scope::FunctionDeclarationScope |
+ Scope::DeclScope);
BalancedDelimiterTracker T(*this, tok::l_paren);
T.consumeOpen();
@@ -1334,170 +1423,36 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
}
T.consumeClose();
- SourceLocation RParenLoc = T.getCloseLocation();
- SourceLocation DeclEndLoc = RParenLoc;
-
- // GNU-style attributes must be parsed before the mutable specifier to be
- // compatible with GCC.
- MaybeParseGNUAttributes(Attr, &DeclEndLoc);
-
- // MSVC-style attributes must be parsed before the mutable specifier to be
- // compatible with MSVC.
- MaybeParseMicrosoftDeclSpecs(Attr, &DeclEndLoc);
-
- // Parse mutable-opt and/or constexpr-opt or consteval-opt, and update the
- // DeclEndLoc.
- SourceLocation MutableLoc;
- SourceLocation ConstexprLoc;
- SourceLocation ConstevalLoc;
- tryConsumeLambdaSpecifierToken(*this, MutableLoc, ConstexprLoc,
- ConstevalLoc, DeclEndLoc);
-
- addConstexprToLambdaDeclSpecifier(*this, ConstexprLoc, DS);
- addConstevalToLambdaDeclSpecifier(*this, ConstevalLoc, DS);
- // Parse exception-specification[opt].
- ExceptionSpecificationType ESpecType = EST_None;
- SourceRange ESpecRange;
- SmallVector<ParsedType, 2> DynamicExceptions;
- SmallVector<SourceRange, 2> DynamicExceptionRanges;
- ExprResult NoexceptExpr;
- CachedTokens *ExceptionSpecTokens;
- ESpecType = tryParseExceptionSpecification(/*Delayed=*/false,
- ESpecRange,
- DynamicExceptions,
- DynamicExceptionRanges,
- NoexceptExpr,
- ExceptionSpecTokens);
-
- if (ESpecType != EST_None)
- DeclEndLoc = ESpecRange.getEnd();
-
- // Parse attribute-specifier[opt].
- MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
-
- // Parse OpenCL addr space attribute.
- if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
- tok::kw___constant, tok::kw___generic)) {
- ParseOpenCLQualifiers(DS.getAttributes());
- ConsumeToken();
- }
-
- SourceLocation FunLocalRangeEnd = DeclEndLoc;
-
- // Parse trailing-return-type[opt].
- if (Tok.is(tok::arrow)) {
- FunLocalRangeEnd = Tok.getLocation();
- SourceRange Range;
- TrailingReturnType =
- ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit*/ false);
- TrailingReturnTypeLoc = Range.getBegin();
- if (Range.getEnd().isValid())
- DeclEndLoc = Range.getEnd();
- }
- SourceLocation NoLoc;
- D.AddTypeInfo(DeclaratorChunk::getFunction(
- /*HasProto=*/true,
- /*IsAmbiguous=*/false, LParenLoc, ParamInfo.data(),
- ParamInfo.size(), EllipsisLoc, RParenLoc,
- /*RefQualifierIsLvalueRef=*/true,
- /*RefQualifierLoc=*/NoLoc, MutableLoc, ESpecType,
- ESpecRange, DynamicExceptions.data(),
- DynamicExceptionRanges.data(), DynamicExceptions.size(),
- NoexceptExpr.isUsable() ? NoexceptExpr.get() : nullptr,
- /*ExceptionSpecTokens*/ nullptr,
- /*DeclsInPrototype=*/None, LParenLoc, FunLocalRangeEnd, D,
- TrailingReturnType, TrailingReturnTypeLoc, &DS),
- std::move(Attr), DeclEndLoc);
+ // Parse lambda-specifiers.
+ ParseLambdaSpecifiers(LParenLoc, /*DeclEndLoc=*/T.getCloseLocation(),
+ ParamInfo, EllipsisLoc);
// Parse requires-clause[opt].
if (Tok.is(tok::kw_requires))
ParseTrailingRequiresClause(D);
-
- PrototypeScope.Exit();
-
- WarnIfHasCUDATargetAttr();
} else if (Tok.isOneOf(tok::kw_mutable, tok::arrow, tok::kw___attribute,
tok::kw_constexpr, tok::kw_consteval,
tok::kw___private, tok::kw___global, tok::kw___local,
tok::kw___constant, tok::kw___generic,
- tok::kw_requires) ||
+ tok::kw_requires, tok::kw_noexcept) ||
(Tok.is(tok::l_square) && NextToken().is(tok::l_square))) {
- // It's common to forget that one needs '()' before 'mutable', an attribute
- // specifier, the result type, or the requires clause. Deal with this.
- unsigned TokKind = 0;
- switch (Tok.getKind()) {
- case tok::kw_mutable: TokKind = 0; break;
- case tok::arrow: TokKind = 1; break;
- case tok::kw___attribute:
- case tok::kw___private:
- case tok::kw___global:
- case tok::kw___local:
- case tok::kw___constant:
- case tok::kw___generic:
- case tok::l_square: TokKind = 2; break;
- case tok::kw_constexpr: TokKind = 3; break;
- case tok::kw_consteval: TokKind = 4; break;
- case tok::kw_requires: TokKind = 5; break;
- default: llvm_unreachable("Unknown token kind");
- }
-
- Diag(Tok, diag::err_lambda_missing_parens)
- << TokKind
- << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
- SourceLocation DeclEndLoc = DeclLoc;
-
- // GNU-style attributes must be parsed before the mutable specifier to be
- // compatible with GCC.
- MaybeParseGNUAttributes(Attr, &DeclEndLoc);
-
- // Parse 'mutable', if it's there.
- SourceLocation MutableLoc;
- if (Tok.is(tok::kw_mutable)) {
- MutableLoc = ConsumeToken();
- DeclEndLoc = MutableLoc;
- }
-
- // Parse attribute-specifier[opt].
- MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
-
- // Parse the return type, if there is one.
- if (Tok.is(tok::arrow)) {
- SourceRange Range;
- TrailingReturnType =
- ParseTrailingReturnType(Range, /*MayBeFollowedByDirectInit*/ false);
- if (Range.getEnd().isValid())
- DeclEndLoc = Range.getEnd();
- }
+ if (!getLangOpts().CPlusPlus2b)
+ // It's common to forget that one needs '()' before 'mutable', an
+ // attribute specifier, the result type, or the requires clause. Deal with
+ // this.
+ Diag(Tok, diag::ext_lambda_missing_parens)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
SourceLocation NoLoc;
- D.AddTypeInfo(DeclaratorChunk::getFunction(
- /*HasProto=*/true,
- /*IsAmbiguous=*/false,
- /*LParenLoc=*/NoLoc,
- /*Params=*/nullptr,
- /*NumParams=*/0,
- /*EllipsisLoc=*/NoLoc,
- /*RParenLoc=*/NoLoc,
- /*RefQualifierIsLvalueRef=*/true,
- /*RefQualifierLoc=*/NoLoc, MutableLoc, EST_None,
- /*ESpecRange=*/SourceRange(),
- /*Exceptions=*/nullptr,
- /*ExceptionRanges=*/nullptr,
- /*NumExceptions=*/0,
- /*NoexceptExpr=*/nullptr,
- /*ExceptionSpecTokens=*/nullptr,
- /*DeclsInPrototype=*/None, DeclLoc, DeclEndLoc, D,
- TrailingReturnType),
- std::move(Attr), DeclEndLoc);
-
- // Parse the requires-clause, if present.
- if (Tok.is(tok::kw_requires))
- ParseTrailingRequiresClause(D);
-
- WarnIfHasCUDATargetAttr();
+ // Parse lambda-specifiers.
+ std::vector<DeclaratorChunk::ParamInfo> EmptyParamInfo;
+ ParseLambdaSpecifiers(/*LParenLoc=*/NoLoc, /*RParenLoc=*/NoLoc,
+ EmptyParamInfo, /*EllipsisLoc=*/NoLoc);
}
+ WarnIfHasCUDATargetAttr();
+
// FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
// it.
unsigned ScopeFlags = Scope::BlockScope | Scope::FnScope | Scope::DeclScope |
@@ -1979,17 +1934,36 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
/// \param FRI If non-null, a for range declaration is permitted, and if
/// present will be parsed and stored here, and a null result will be returned.
///
+/// \param EnterForConditionScope If true, enter a continue/break scope at the
+/// appropriate moment for a 'for' loop.
+///
/// \returns The parsed condition.
Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
- ForRangeInfo *FRI) {
+ ForRangeInfo *FRI,
+ bool EnterForConditionScope) {
+ // Helper to ensure we always enter a continue/break scope if requested.
+ struct ForConditionScopeRAII {
+ Scope *S;
+ void enter(bool IsConditionVariable) {
+ if (S) {
+ S->AddFlags(Scope::BreakScope | Scope::ContinueScope);
+ S->setIsConditionVarScope(IsConditionVariable);
+ }
+ }
+ ~ForConditionScopeRAII() {
+ if (S)
+ S->setIsConditionVarScope(false);
+ }
+ } ForConditionScope{EnterForConditionScope ? getCurScope() : nullptr};
+
ParenBraceBracketBalancer BalancerRAIIObj(*this);
PreferredType.enterCondition(Actions, Tok.getLocation());
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
cutOffParsing();
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
return Sema::ConditionError();
}
@@ -2006,6 +1980,9 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
// Determine what kind of thing we have.
switch (isCXXConditionDeclarationOrInitStatement(InitStmt, FRI)) {
case ConditionOrInitStatement::Expression: {
+ // If this is a for loop, we're entering its condition.
+ ForConditionScope.enter(/*IsConditionVariable=*/false);
+
ProhibitAttributes(attrs);
// We can have an empty expression here.
@@ -2048,11 +2025,16 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
}
case ConditionOrInitStatement::ForRangeDecl: {
+ // This is 'for (init-stmt; for-range-decl : range-expr)'.
+ // We're not actually in a for loop yet, so 'break' and 'continue' aren't
+ // permitted here.
assert(FRI && "should not parse a for range declaration here");
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy DG = ParseSimpleDeclaration(DeclaratorContext::ForInit,
DeclEnd, attrs, false, FRI);
FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+ assert((FRI->ColonLoc.isValid() || !DG) &&
+ "cannot find for range declaration");
return Sema::ConditionResult();
}
@@ -2061,6 +2043,9 @@ Sema::ConditionResult Parser::ParseCXXCondition(StmtResult *InitStmt,
break;
}
+ // If this is a for loop, we're entering its condition.
+ ForConditionScope.enter(/*IsConditionVariable=*/true);
+
// type-specifier-seq
DeclSpec DS(AttrFactory);
DS.takeAttributesFrom(attrs);
@@ -2600,10 +2585,10 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
}
case tok::code_completion: {
+ // Don't try to parse any further.
+ cutOffParsing();
// Code completion for the operator name.
Actions.CodeCompleteOperatorName(getCurScope());
- cutOffParsing();
- // Don't try to parse any further.
return true;
}
@@ -2651,9 +2636,10 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Grab the literal operator's suffix, which will be either the next token
// or a ud-suffix from the string literal.
+ bool IsUDSuffix = !Literal.getUDSuffix().empty();
IdentifierInfo *II = nullptr;
SourceLocation SuffixLoc;
- if (!Literal.getUDSuffix().empty()) {
+ if (IsUDSuffix) {
II = &PP.getIdentifierTable().get(Literal.getUDSuffix());
SuffixLoc =
Lexer::AdvanceToTokenCharacter(TokLocs[Literal.getUDSuffixToken()],
@@ -2690,7 +2676,7 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
Result.setLiteralOperatorId(II, KeywordLoc, SuffixLoc);
- return Actions.checkLiteralOperatorId(SS, Result);
+ return Actions.checkLiteralOperatorId(SS, Result, IsUDSuffix);
}
// Parse a conversion-function-id.
diff --git a/clang/lib/Parse/ParseInit.cpp b/clang/lib/Parse/ParseInit.cpp
index 9ac2b2e6f79b..9d9c03d28a97 100644
--- a/clang/lib/Parse/ParseInit.cpp
+++ b/clang/lib/Parse/ParseInit.cpp
@@ -159,8 +159,7 @@ static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
///
/// \p CodeCompleteCB is called with Designation parsed so far.
ExprResult Parser::ParseInitializerWithPotentialDesignator(
- llvm::function_ref<void(const Designation &)> CodeCompleteCB) {
-
+ DesignatorCompletionInfo DesignatorCompletion) {
// If this is the old-style GNU extension:
// designation ::= identifier ':'
// Handle it as a field designator. Otherwise, this must be the start of a
@@ -183,6 +182,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
Designation D;
D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ PreferredType.enterDesignatedInitializer(
+ Tok.getLocation(), DesignatorCompletion.PreferredBaseType, D);
return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
ParseInitializer());
}
@@ -199,8 +200,9 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
SourceLocation DotLoc = ConsumeToken();
if (Tok.is(tok::code_completion)) {
- CodeCompleteCB(Desig);
cutOffParsing();
+ Actions.CodeCompleteDesignator(DesignatorCompletion.PreferredBaseType,
+ DesignatorCompletion.InitExprs, Desig);
return ExprError();
}
if (Tok.isNot(tok::identifier)) {
@@ -388,6 +390,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
// Handle a normal designator sequence end, which is an equal.
if (Tok.is(tok::equal)) {
SourceLocation EqualLoc = ConsumeToken();
+ PreferredType.enterDesignatedInitializer(
+ Tok.getLocation(), DesignatorCompletion.PreferredBaseType, Desig);
return Actions.ActOnDesignatedInitializer(Desig, EqualLoc, false,
ParseInitializer());
}
@@ -396,6 +400,8 @@ ExprResult Parser::ParseInitializerWithPotentialDesignator(
// direct-list-initialization of the aggregate element. We allow this as an
// extension from C++11 onwards (when direct-list-initialization was added).
if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus11) {
+ PreferredType.enterDesignatedInitializer(
+ Tok.getLocation(), DesignatorCompletion.PreferredBaseType, Desig);
return Actions.ActOnDesignatedInitializer(Desig, SourceLocation(), false,
ParseBraceInitializer());
}
@@ -453,9 +459,9 @@ ExprResult Parser::ParseBraceInitializer() {
Actions, EnterExpressionEvaluationContext::InitList);
bool InitExprsOk = true;
- auto CodeCompleteDesignation = [&](const Designation &D) {
- Actions.CodeCompleteDesignator(PreferredType.get(T.getOpenLocation()),
- InitExprs, D);
+ DesignatorCompletionInfo DesignatorCompletion{
+ InitExprs,
+ PreferredType.get(T.getOpenLocation()),
};
while (1) {
@@ -476,7 +482,7 @@ ExprResult Parser::ParseBraceInitializer() {
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
+ SubElt = ParseInitializerWithPotentialDesignator(DesignatorCompletion);
else
SubElt = ParseInitializer();
@@ -556,9 +562,9 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
return false;
}
- auto CodeCompleteDesignation = [&](const Designation &D) {
- Actions.CodeCompleteDesignator(PreferredType.get(Braces.getOpenLocation()),
- InitExprs, D);
+ DesignatorCompletionInfo DesignatorCompletion{
+ InitExprs,
+ PreferredType.get(Braces.getOpenLocation()),
};
while (!isEofOrEom()) {
trailingComma = false;
@@ -566,7 +572,7 @@ bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
// initializer directly.
ExprResult SubElt;
if (MayBeDesignationStart())
- SubElt = ParseInitializerWithPotentialDesignator(CodeCompleteDesignation);
+ SubElt = ParseInitializerWithPotentialDesignator(DesignatorCompletion);
else
SubElt = ParseInitializer();
diff --git a/clang/lib/Parse/ParseObjc.cpp b/clang/lib/Parse/ParseObjc.cpp
index 88942ed173d0..9e145f57d61f 100644
--- a/clang/lib/Parse/ParseObjc.cpp
+++ b/clang/lib/Parse/ParseObjc.cpp
@@ -50,8 +50,8 @@ Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtDirective(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
return nullptr;
}
@@ -219,8 +219,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
// Code completion after '@interface'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
return nullptr;
}
@@ -253,8 +253,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
SourceLocation categoryLoc;
IdentifierInfo *categoryId = nullptr;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
cutOffParsing();
+ Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
return nullptr;
}
@@ -308,8 +308,8 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
// Code completion of superclass names.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
cutOffParsing();
+ Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
return nullptr;
}
@@ -472,8 +472,8 @@ ObjCTypeParamList *Parser::parseObjCTypeParamListOrProtocolRefs(
if (Tok.is(tok::code_completion)) {
// FIXME: If these aren't protocol references, we'll need different
// completions.
- Actions.CodeCompleteObjCProtocolReferences(protocolIdents);
cutOffParsing();
+ Actions.CodeCompleteObjCProtocolReferences(protocolIdents);
// FIXME: Better recovery here?.
return nullptr;
@@ -635,10 +635,11 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Code completion within an Objective-C interface.
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
CurParsedObjCImpl? Sema::PCC_ObjCImplementation
: Sema::PCC_ObjCInterface);
- return cutOffParsing();
+ return;
}
// If we don't have an @ directive, parse it as a function definition.
@@ -668,8 +669,9 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// Otherwise, we have an @ directive, eat the @.
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCAtDirective(getCurScope());
- return cutOffParsing();
+ return;
}
tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
@@ -778,8 +780,9 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
// We break out of the big loop in two cases: when we see @end or when we see
// EOF. In the former case, eat the @end. In the later case, emit an error.
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCAtDirective(getCurScope());
- return cutOffParsing();
+ return;
} else if (Tok.isObjCAtKeyword(tok::objc_end)) {
ConsumeToken(); // the "end" identifier
} else {
@@ -847,8 +850,9 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
while (1) {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
- return cutOffParsing();
+ return;
}
const IdentifierInfo *II = Tok.getIdentifierInfo();
@@ -893,11 +897,12 @@ void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
}
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
if (IsSetter)
Actions.CodeCompleteObjCPropertySetter(getCurScope());
else
Actions.CodeCompleteObjCPropertyGetter(getCurScope());
- return cutOffParsing();
+ return;
}
SourceLocation SelLoc;
@@ -1146,9 +1151,10 @@ void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
while (1) {
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCPassingType(
getCurScope(), DS, Context == DeclaratorContext::ObjCParameter);
- return cutOffParsing();
+ return;
}
if (Tok.isNot(tok::identifier))
@@ -1335,9 +1341,9 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
ParsingDeclRAIIObject PD(*this, ParsingDeclRAIIObject::NoParent);
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
/*ReturnType=*/nullptr);
- cutOffParsing();
return nullptr;
}
@@ -1350,14 +1356,13 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the method, parse them.
ParsedAttributes methodAttrs(AttrFactory);
- if (getLangOpts().ObjC)
- MaybeParseGNUAttributes(methodAttrs);
- MaybeParseCXX11Attributes(methodAttrs);
+ MaybeParseAttributes(PAKM_CXX11 | (getLangOpts().ObjC ? PAKM_GNU : 0),
+ methodAttrs);
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
ReturnType);
- cutOffParsing();
return nullptr;
}
@@ -1377,9 +1382,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC)
- MaybeParseGNUAttributes(methodAttrs);
- MaybeParseCXX11Attributes(methodAttrs);
+ MaybeParseAttributes(PAKM_CXX11 | (getLangOpts().ObjC ? PAKM_GNU : 0),
+ methodAttrs);
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
Decl *Result = Actions.ActOnMethodDeclaration(
@@ -1412,19 +1416,18 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// If attributes exist before the argument name, parse them.
// Regardless, collect all the attributes we've parsed so far.
- if (getLangOpts().ObjC)
- MaybeParseGNUAttributes(paramAttrs);
- MaybeParseCXX11Attributes(paramAttrs);
+ MaybeParseAttributes(PAKM_CXX11 | (getLangOpts().ObjC ? PAKM_GNU : 0),
+ paramAttrs);
ArgInfo.ArgAttrs = paramAttrs;
// Code completion for the next piece of the selector.
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
KeyIdents.push_back(SelIdent);
Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
mType == tok::minus,
/*AtParameterName=*/true,
ReturnType, KeyIdents);
- cutOffParsing();
return nullptr;
}
@@ -1444,11 +1447,11 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// Code completion for the next piece of the selector.
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
mType == tok::minus,
/*AtParameterName=*/false,
ReturnType, KeyIdents);
- cutOffParsing();
return nullptr;
}
@@ -1496,9 +1499,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// FIXME: Add support for optional parameter list...
// If attributes exist after the method, parse them.
- if (getLangOpts().ObjC)
- MaybeParseGNUAttributes(methodAttrs);
- MaybeParseCXX11Attributes(methodAttrs);
+ MaybeParseAttributes(PAKM_CXX11 | (getLangOpts().ObjC ? PAKM_GNU : 0),
+ methodAttrs);
if (KeyIdents.size() == 0)
return nullptr;
@@ -1531,8 +1533,8 @@ ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
while (1) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
cutOffParsing();
+ Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents);
return true;
}
@@ -1630,12 +1632,12 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
}
QualType BaseT = Actions.GetTypeFromParser(baseType);
+ cutOffParsing();
if (!BaseT.isNull() && BaseT->acceptsObjCTypeParams()) {
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
} else {
Actions.CodeCompleteObjCProtocolReferences(identifierLocPairs);
}
- cutOffParsing();
return;
}
@@ -1924,8 +1926,9 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
// Set the default visibility to private.
if (TryConsumeToken(tok::at)) { // parse objc-visibility-spec
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteObjCAtVisibility(getCurScope());
- return cutOffParsing();
+ return;
}
switch (Tok.getObjCKeywordID()) {
@@ -1954,9 +1957,10 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
}
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
Sema::PCC_ObjCInstanceVariableList);
- return cutOffParsing();
+ return;
}
// This needs to duplicate a small amount of code from
@@ -2021,8 +2025,8 @@ Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
ConsumeToken(); // the "protocol" identifier
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCProtocolDecl(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCProtocolDecl(getCurScope());
return nullptr;
}
@@ -2105,8 +2109,8 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
// Code completion after '@implementation'.
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCImplementationDecl(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCImplementationDecl(getCurScope());
return nullptr;
}
@@ -2143,8 +2147,8 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
IdentifierInfo *categoryId = nullptr;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
cutOffParsing();
+ Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
return nullptr;
}
@@ -2313,8 +2317,8 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
while (true) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
return nullptr;
}
@@ -2331,8 +2335,8 @@ Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
if (TryConsumeToken(tok::equal)) {
// property '=' ivar-name
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
cutOffParsing();
+ Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
return nullptr;
}
@@ -2391,8 +2395,8 @@ Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
while (true) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
return nullptr;
}
@@ -2728,8 +2732,8 @@ Decl *Parser::ParseObjCMethodDefinition() {
StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc,
ParsedStmtContext StmtCtx) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCAtStatement(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCAtStatement(getCurScope());
return StmtError();
}
@@ -2769,8 +2773,8 @@ StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc,
ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
switch (Tok.getKind()) {
case tok::code_completion:
- Actions.CodeCompleteObjCAtExpression(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCAtExpression(getCurScope());
return ExprError();
case tok::minus:
@@ -3016,8 +3020,8 @@ ExprResult Parser::ParseObjCMessageExpression() {
SourceLocation LBracLoc = ConsumeBracket(); // consume '['
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCMessageReceiver(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
return ExprError();
}
@@ -3153,6 +3157,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
InMessageExpressionRAIIObject InMessage(*this, true);
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, None,
false);
@@ -3162,7 +3167,6 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
else
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
None, false);
- cutOffParsing();
return ExprError();
}
@@ -3191,6 +3195,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
/// Parse the expression after ':'
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents,
@@ -3204,7 +3209,6 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
KeyIdents,
/*AtArgumentExpression=*/true);
- cutOffParsing();
return ExprError();
}
@@ -3229,6 +3233,7 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
// Code completion after each argument.
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
if (SuperLoc.isValid())
Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
KeyIdents,
@@ -3241,7 +3246,6 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
KeyIdents,
/*AtArgumentExpression=*/false);
- cutOffParsing();
return ExprError();
}
@@ -3581,8 +3585,8 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
ConsumeParen();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
cutOffParsing();
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
return ExprError();
}
@@ -3607,8 +3611,8 @@ ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
break;
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
cutOffParsing();
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents);
return ExprError();
}
diff --git a/clang/lib/Parse/ParseOpenMP.cpp b/clang/lib/Parse/ParseOpenMP.cpp
index db7e967b15ae..18e43c3734ac 100644
--- a/clang/lib/Parse/ParseOpenMP.cpp
+++ b/clang/lib/Parse/ParseOpenMP.cpp
@@ -131,6 +131,7 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_declare, OMPD_simd, OMPD_declare_simd},
{OMPD_declare, OMPD_target, OMPD_declare_target},
{OMPD_declare, OMPD_variant, OMPD_declare_variant},
+ {OMPD_begin_declare, OMPD_target, OMPD_begin_declare_target},
{OMPD_begin_declare, OMPD_variant, OMPD_begin_declare_variant},
{OMPD_end_declare, OMPD_variant, OMPD_end_declare_variant},
{OMPD_distribute, OMPD_parallel, OMPD_distribute_parallel},
@@ -441,9 +442,9 @@ void Parser::ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm) {
ConsumeToken();
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteInitializer(getCurScope(), OmpPrivParm);
Actions.FinalizeDeclaration(OmpPrivParm);
- cutOffParsing();
return;
}
@@ -1664,30 +1665,41 @@ parseOpenMPSimpleClause(Parser &P, OpenMPClauseKind Kind) {
return SimpleClauseData(Type, Loc, LOpen, TypeLoc, RLoc);
}
-Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
- // OpenMP 4.5 syntax with list of entities.
- Sema::NamedDeclSetType SameDirectiveDecls;
- SmallVector<std::tuple<OMPDeclareTargetDeclAttr::MapTypeTy, SourceLocation,
- NamedDecl *>,
- 4>
- DeclareTargetDecls;
- OMPDeclareTargetDeclAttr::DevTypeTy DT = OMPDeclareTargetDeclAttr::DT_Any;
+void Parser::ParseOMPDeclareTargetClauses(
+ Sema::DeclareTargetContextInfo &DTCI) {
SourceLocation DeviceTypeLoc;
+ bool RequiresToOrLinkClause = false;
+ bool HasToOrLinkClause = false;
while (Tok.isNot(tok::annot_pragma_openmp_end)) {
OMPDeclareTargetDeclAttr::MapTypeTy MT = OMPDeclareTargetDeclAttr::MT_To;
- if (Tok.is(tok::identifier)) {
+ bool HasIdentifier = Tok.is(tok::identifier);
+ if (HasIdentifier) {
+ // If we see any clause we need a to or link clause.
+ RequiresToOrLinkClause = true;
IdentifierInfo *II = Tok.getIdentifierInfo();
StringRef ClauseName = II->getName();
bool IsDeviceTypeClause =
getLangOpts().OpenMP >= 50 &&
getOpenMPClauseKind(ClauseName) == OMPC_device_type;
- // Parse 'to|link|device_type' clauses.
- if (!OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT) &&
- !IsDeviceTypeClause) {
+
+ bool IsToOrLinkClause =
+ OMPDeclareTargetDeclAttr::ConvertStrToMapTypeTy(ClauseName, MT);
+ assert((!IsDeviceTypeClause || !IsToOrLinkClause) && "Cannot be both!");
+
+ if (!IsDeviceTypeClause && DTCI.Kind == OMPD_begin_declare_target) {
Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
- << ClauseName << (getLangOpts().OpenMP >= 50 ? 1 : 0);
+ << ClauseName << 0;
break;
}
+ if (!IsDeviceTypeClause && !IsToOrLinkClause) {
+ Diag(Tok, diag::err_omp_declare_target_unexpected_clause)
+ << ClauseName << (getLangOpts().OpenMP >= 50 ? 2 : 1);
+ break;
+ }
+
+ if (IsToOrLinkClause)
+ HasToOrLinkClause = true;
+
// Parse 'device_type' clause and go to next clause if any.
if (IsDeviceTypeClause) {
Optional<SimpleClauseData> DevTypeData =
@@ -1697,16 +1709,17 @@ Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
// We already saw another device_type clause, diagnose it.
Diag(DevTypeData.getValue().Loc,
diag::warn_omp_more_one_device_type_clause);
+ break;
}
switch (static_cast<OpenMPDeviceType>(DevTypeData.getValue().Type)) {
case OMPC_DEVICE_TYPE_any:
- DT = OMPDeclareTargetDeclAttr::DT_Any;
+ DTCI.DT = OMPDeclareTargetDeclAttr::DT_Any;
break;
case OMPC_DEVICE_TYPE_host:
- DT = OMPDeclareTargetDeclAttr::DT_Host;
+ DTCI.DT = OMPDeclareTargetDeclAttr::DT_Host;
break;
case OMPC_DEVICE_TYPE_nohost:
- DT = OMPDeclareTargetDeclAttr::DT_NoHost;
+ DTCI.DT = OMPDeclareTargetDeclAttr::DT_NoHost;
break;
case OMPC_DEVICE_TYPE_unknown:
llvm_unreachable("Unexpected device_type");
@@ -1717,37 +1730,47 @@ Parser::DeclGroupPtrTy Parser::ParseOMPDeclareTargetClauses() {
}
ConsumeToken();
}
- auto &&Callback = [this, MT, &DeclareTargetDecls, &SameDirectiveDecls](
- CXXScopeSpec &SS, DeclarationNameInfo NameInfo) {
- NamedDecl *ND = Actions.lookupOpenMPDeclareTargetName(
- getCurScope(), SS, NameInfo, SameDirectiveDecls);
- if (ND)
- DeclareTargetDecls.emplace_back(MT, NameInfo.getLoc(), ND);
- };
- if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
- /*AllowScopeSpecifier=*/true))
+
+ if (DTCI.Kind == OMPD_declare_target || HasIdentifier) {
+ auto &&Callback = [this, MT, &DTCI](CXXScopeSpec &SS,
+ DeclarationNameInfo NameInfo) {
+ NamedDecl *ND =
+ Actions.lookupOpenMPDeclareTargetName(getCurScope(), SS, NameInfo);
+ if (!ND)
+ return;
+ Sema::DeclareTargetContextInfo::MapInfo MI{MT, NameInfo.getLoc()};
+ bool FirstMapping = DTCI.ExplicitlyMapped.try_emplace(ND, MI).second;
+ if (!FirstMapping)
+ Diag(NameInfo.getLoc(), diag::err_omp_declare_target_multiple)
+ << NameInfo.getName();
+ };
+ if (ParseOpenMPSimpleVarList(OMPD_declare_target, Callback,
+ /*AllowScopeSpecifier=*/true))
+ break;
+ }
+
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok,
+ diag::err_omp_begin_declare_target_unexpected_implicit_to_clause);
+ break;
+ }
+ if (!HasIdentifier && Tok.isNot(tok::annot_pragma_openmp_end)) {
+ Diag(Tok,
+ diag::err_omp_declare_target_unexpected_clause_after_implicit_to);
break;
+ }
// Consume optional ','.
if (Tok.is(tok::comma))
ConsumeToken();
}
+
+ // For declare target require at least 'to' or 'link' to be present.
+ if (DTCI.Kind == OMPD_declare_target && RequiresToOrLinkClause &&
+ !HasToOrLinkClause)
+ Diag(DTCI.Loc, diag::err_omp_declare_target_missing_to_or_link_clause);
+
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
- ConsumeAnyToken();
- for (auto &MTLocDecl : DeclareTargetDecls) {
- OMPDeclareTargetDeclAttr::MapTypeTy MT;
- SourceLocation Loc;
- NamedDecl *ND;
- std::tie(MT, Loc, ND) = MTLocDecl;
- // device_type clause is applied only to functions.
- Actions.ActOnOpenMPDeclareTargetName(
- ND, Loc, MT, isa<VarDecl>(ND) ? OMPDeclareTargetDeclAttr::DT_Any : DT);
- }
- SmallVector<Decl *, 4> Decls(SameDirectiveDecls.begin(),
- SameDirectiveDecls.end());
- if (Decls.empty())
- return DeclGroupPtrTy();
- return Actions.BuildDeclaratorGroup(Decls);
}
void Parser::skipUntilPragmaOpenMPEnd(OpenMPDirectiveKind DKind) {
@@ -1784,10 +1807,11 @@ void Parser::parseOMPEndDirective(OpenMPDirectiveKind BeginKind,
SkipUntil(tok::annot_pragma_openmp_end, StopBeforeMatch);
}
-void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
+void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
+ OpenMPDirectiveKind EndDKind,
SourceLocation DKLoc) {
- parseOMPEndDirective(OMPD_declare_target, OMPD_end_declare_target, DKind,
- DKLoc, Tok.getLocation(),
+ parseOMPEndDirective(BeginDKind, OMPD_end_declare_target, EndDKind, DKLoc,
+ Tok.getLocation(),
/* SkipUntilOpenMPEnd */ false);
// Skip the last annot_pragma_openmp_end.
if (Tok.is(tok::annot_pragma_openmp_end))
@@ -1833,7 +1857,8 @@ void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed,
DeclSpec::TST TagType, Decl *Tag) {
- assert(Tok.is(tok::annot_pragma_openmp) && "Not an OpenMP directive!");
+ assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
+ "Not an OpenMP directive!");
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1851,7 +1876,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Toks.push_back(Tok);
while (Cnt && Tok.isNot(tok::eof)) {
(void)ConsumeAnyToken();
- if (Tok.is(tok::annot_pragma_openmp))
+ if (Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp))
++Cnt;
else if (Tok.is(tok::annot_pragma_openmp_end))
--Cnt;
@@ -2074,7 +2099,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ConsumeAnyToken();
DeclGroupPtrTy Ptr;
- if (Tok.is(tok::annot_pragma_openmp)) {
+ if (Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp)) {
Ptr = ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs, Delayed,
TagType, Tag);
} else if (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
@@ -2101,58 +2126,48 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
ParseOMPDeclareVariantClauses(Ptr, Toks, Loc);
return Ptr;
}
+ case OMPD_begin_declare_target:
case OMPD_declare_target: {
SourceLocation DTLoc = ConsumeAnyToken();
- if (Tok.isNot(tok::annot_pragma_openmp_end)) {
- return ParseOMPDeclareTargetClauses();
- }
+ bool HasClauses = Tok.isNot(tok::annot_pragma_openmp_end);
+ bool HasImplicitMappings =
+ DKind == OMPD_begin_declare_target || !HasClauses;
+ Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc);
+ if (HasClauses)
+ ParseOMPDeclareTargetClauses(DTCI);
// Skip the last annot_pragma_openmp_end.
ConsumeAnyToken();
- if (!Actions.ActOnStartOpenMPDeclareTargetDirective(DTLoc))
- return DeclGroupPtrTy();
-
- ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
- llvm::SmallVector<Decl *, 4> Decls;
- DKind = parseOpenMPDirectiveKind(*this);
- while (DKind != OMPD_end_declare_target && Tok.isNot(tok::eof) &&
- Tok.isNot(tok::r_brace)) {
- DeclGroupPtrTy Ptr;
- // Here we expect to see some function declaration.
- if (AS == AS_none) {
- assert(TagType == DeclSpec::TST_unspecified);
- MaybeParseCXX11Attributes(Attrs);
- ParsingDeclSpec PDS(*this);
- Ptr = ParseExternalDeclaration(Attrs, &PDS);
- } else {
- Ptr =
- ParseCXXClassMemberDeclarationWithPragmas(AS, Attrs, TagType, Tag);
- }
- if (Ptr) {
- DeclGroupRef Ref = Ptr.get();
- Decls.append(Ref.begin(), Ref.end());
- }
- if (Tok.isAnnotation() && Tok.is(tok::annot_pragma_openmp)) {
- TentativeParsingAction TPA(*this);
- ConsumeAnnotationToken();
- DKind = parseOpenMPDirectiveKind(*this);
- if (DKind != OMPD_end_declare_target)
- TPA.Revert();
- else
- TPA.Commit();
- }
+ if (HasImplicitMappings) {
+ Actions.ActOnStartOpenMPDeclareTargetContext(DTCI);
+ return nullptr;
}
- ParseOMPEndDeclareTargetDirective(DKind, DTLoc);
- Actions.ActOnFinishOpenMPDeclareTargetDirective();
+ Actions.ActOnFinishedOpenMPDeclareTargetContext(DTCI);
+ llvm::SmallVector<Decl *, 4> Decls;
+ for (auto &It : DTCI.ExplicitlyMapped)
+ Decls.push_back(It.first);
return Actions.BuildDeclaratorGroup(Decls);
}
+ case OMPD_end_declare_target: {
+ if (!Actions.isInOpenMPDeclareTargetContext()) {
+ Diag(Tok, diag::err_omp_unexpected_directive)
+ << 1 << getOpenMPDirectiveName(DKind);
+ break;
+ }
+ const Sema::DeclareTargetContextInfo &DTCI =
+ Actions.ActOnOpenMPEndDeclareTargetDirective();
+ ParseOMPEndDeclareTargetDirective(DTCI.Kind, DKind, DTCI.Loc);
+ return nullptr;
+ }
case OMPD_unknown:
Diag(Tok, diag::err_omp_unknown_directive);
break;
case OMPD_parallel:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_task:
case OMPD_taskyield:
case OMPD_barrier:
@@ -2190,7 +2205,6 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
case OMPD_distribute:
- case OMPD_end_declare_target:
case OMPD_target_update:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
@@ -2206,6 +2220,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
case OMPD_target_teams_distribute_simd:
+ case OMPD_dispatch:
+ case OMPD_masked:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
@@ -2255,12 +2271,13 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// simd' | 'teams distribute parallel for simd' | 'teams distribute
/// parallel for' | 'target teams' | 'target teams distribute' | 'target
/// teams distribute parallel for' | 'target teams distribute parallel
-/// for simd' | 'target teams distribute simd' {clause}
+/// for simd' | 'target teams distribute simd' | 'masked' {clause}
/// annot_pragma_openmp_end
///
StmtResult
Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
- assert(Tok.is(tok::annot_pragma_openmp) && "Not an OpenMP directive!");
+ assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
+ "Not an OpenMP directive!");
ParsingOpenMPDirectiveRAII DirScope(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
SmallVector<OMPClause *, 5> Clauses;
@@ -2377,6 +2394,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_enter_data:
case OMPD_target_exit_data:
case OMPD_target_update:
+ case OMPD_interop:
if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
ParsedStmtContext()) {
Diag(Tok, diag::err_omp_immediate_directive)
@@ -2387,6 +2405,8 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
LLVM_FALLTHROUGH;
case OMPD_parallel:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
@@ -2427,7 +2447,9 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
- case OMPD_target_teams_distribute_simd: {
+ case OMPD_target_teams_distribute_simd:
+ case OMPD_dispatch:
+ case OMPD_masked: {
// Special processing for flush and depobj clauses.
Token ImplicitTok;
bool ImplicitClauseAllowed = false;
@@ -2521,6 +2543,11 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
HasAssociatedStatement = false;
}
+ if (DKind == OMPD_tile && !FirstClauses[unsigned(OMPC_sizes)].getInt()) {
+ Diag(Loc, diag::err_omp_required_clause)
+ << getOpenMPDirectiveName(OMPD_tile) << "sizes";
+ }
+
StmtResult AssociatedStmt;
if (HasAssociatedStatement) {
// The body is a block scope like in Lambdas and Blocks.
@@ -2529,7 +2556,15 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
// the captured region. Code elsewhere assumes that any FunctionScopeInfo
// should have at least one compound statement scope within it.
ParsingOpenMPDirectiveRAII NormalScope(*this, /*Value=*/false);
- AssociatedStmt = (Sema::CompoundScopeRAII(Actions), ParseStatement());
+ {
+ Sema::CompoundScopeRAII Scope(Actions);
+ AssociatedStmt = ParseStatement();
+
+ if (AssociatedStmt.isUsable() && isOpenMPLoopDirective(DKind) &&
+ getLangOpts().OpenMPIRBuilder)
+ AssociatedStmt =
+ Actions.ActOnOpenMPCanonicalLoop(AssociatedStmt.get());
+ }
AssociatedStmt = Actions.ActOnOpenMPRegionEnd(AssociatedStmt, Clauses);
} else if (DKind == OMPD_target_update || DKind == OMPD_target_enter_data ||
DKind == OMPD_target_exit_data) {
@@ -2550,6 +2585,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
}
case OMPD_declare_simd:
case OMPD_declare_target:
+ case OMPD_begin_declare_target:
case OMPD_end_declare_target:
case OMPD_requires:
case OMPD_begin_declare_variant:
@@ -2633,6 +2669,37 @@ bool Parser::ParseOpenMPSimpleVarList(
return !IsCorrect;
}
+OMPClause *Parser::ParseOpenMPSizesClause() {
+ SourceLocation ClauseNameLoc = ConsumeToken();
+ SmallVector<Expr *, 4> ValExprs;
+
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected) << tok::l_paren;
+ return nullptr;
+ }
+
+ while (true) {
+ ExprResult Val = ParseConstantExpression();
+ if (!Val.isUsable()) {
+ T.skipToEnd();
+ return nullptr;
+ }
+
+ ValExprs.push_back(Val.get());
+
+ if (Tok.is(tok::r_paren) || Tok.is(tok::annot_pragma_openmp_end))
+ break;
+
+ ExpectAndConsume(tok::comma);
+ }
+
+ T.consumeClose();
+
+ return Actions.ActOnOpenMPSizesClause(
+ ValExprs, ClauseNameLoc, T.getOpenLocation(), T.getCloseLocation());
+}
+
OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
SourceLocation Loc = Tok.getLocation();
ConsumeAnyToken();
@@ -2643,7 +2710,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
return nullptr;
SmallVector<Sema::UsesAllocatorsData, 4> Data;
do {
- ExprResult Allocator = ParseCXXIdExpression();
+ ExprResult Allocator =
+ getLangOpts().CPlusPlus ? ParseCXXIdExpression() : ParseExpression();
if (Allocator.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
StopBeforeMatch);
@@ -2655,7 +2723,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
BalancedDelimiterTracker T(*this, tok::l_paren,
tok::annot_pragma_openmp_end);
T.consumeOpen();
- ExprResult AllocatorTraits = ParseCXXIdExpression();
+ ExprResult AllocatorTraits =
+ getLangOpts().CPlusPlus ? ParseCXXIdExpression() : ParseExpression();
T.consumeClose();
if (AllocatorTraits.isInvalid()) {
SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
@@ -2727,6 +2796,10 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_allocator:
case OMPC_depobj:
case OMPC_detach:
+ case OMPC_novariants:
+ case OMPC_nocontext:
+ case OMPC_filter:
+ case OMPC_partial:
// OpenMP [2.5, Restrictions]
// At most one num_threads clause can appear on the directive.
// OpenMP [2.8.1, simd construct, Restrictions]
@@ -2749,13 +2822,17 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
// At most one allocator clause can appear on the directive.
// OpenMP 5.0, 2.10.1 task Construct, Restrictions.
// At most one detach clause can appear on the directive.
+ // OpenMP 5.1, 2.3.6 dispatch Construct, Restrictions.
+ // At most one novariants clause can appear on a dispatch directive.
+ // At most one nocontext clause can appear on a dispatch directive.
if (!FirstClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
ErrorFound = true;
}
- if (CKind == OMPC_ordered && PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
+ if ((CKind == OMPC_ordered || CKind == OMPC_partial) &&
+ PP.LookAhead(/*N=*/0).isNot(tok::l_paren))
Clause = ParseOpenMPClause(CKind, WrongDirective);
else
Clause = ParseOpenMPSingleExprClause(CKind, WrongDirective);
@@ -2818,7 +2895,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
case OMPC_dynamic_allocators:
- case OMPC_destroy:
+ case OMPC_full:
// OpenMP [2.7.1, Restrictions, p. 9]
// Only one ordered clause can appear on a loop directive.
// OpenMP [2.7.1, Restrictions, C/C++, p. 4]
@@ -2870,9 +2947,33 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_affinity:
Clause = ParseOpenMPVarListClause(DKind, CKind, WrongDirective);
break;
+ case OMPC_sizes:
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+
+ Clause = ParseOpenMPSizesClause();
+ break;
case OMPC_uses_allocators:
Clause = ParseOpenMPUsesAllocatorClause(DKind);
break;
+ case OMPC_destroy:
+ if (DKind != OMPD_interop) {
+ if (!FirstClause) {
+ Diag(Tok, diag::err_omp_more_one_clause)
+ << getOpenMPDirectiveName(DKind) << getOpenMPClauseName(CKind) << 0;
+ ErrorFound = true;
+ }
+ Clause = ParseOpenMPClause(CKind, WrongDirective);
+ break;
+ }
+ LLVM_FALLTHROUGH;
+ case OMPC_init:
+ case OMPC_use:
+ Clause = ParseOpenMPInteropClause(CKind, WrongDirective);
+ break;
case OMPC_device_type:
case OMPC_unknown:
skipUntilPragmaOpenMPEnd(DKind);
@@ -2969,6 +3070,144 @@ OMPClause *Parser::ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
return Actions.ActOnOpenMPSingleExprClause(Kind, Val.get(), Loc, LLoc, RLoc);
}
+/// Parsing of OpenMP clauses that use an interop-var.
+///
+/// init-clause:
+/// init([interop-modifier, ]interop-type[[, interop-type] ... ]:interop-var)
+///
+/// destroy-clause:
+/// destroy(interop-var)
+///
+/// use-clause:
+/// use(interop-var)
+///
+/// interop-modifier:
+/// prefer_type(preference-list)
+///
+/// preference-list:
+/// foreign-runtime-id [, foreign-runtime-id]...
+///
+/// foreign-runtime-id:
+/// <string-literal> | <constant-integral-expression>
+///
+/// interop-type:
+/// target | targetsync
+///
+OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
+ bool ParseOnly) {
+ SourceLocation Loc = ConsumeToken();
+ // Parse '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren, tok::annot_pragma_openmp_end);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ getOpenMPClauseName(Kind).data()))
+ return nullptr;
+
+ bool IsTarget = false;
+ bool IsTargetSync = false;
+ SmallVector<Expr *, 4> Prefs;
+
+ if (Kind == OMPC_init) {
+
+ // Parse optional interop-modifier.
+ if (Tok.is(tok::identifier) && PP.getSpelling(Tok) == "prefer_type") {
+ ConsumeToken();
+ BalancedDelimiterTracker PT(*this, tok::l_paren,
+ tok::annot_pragma_openmp_end);
+ if (PT.expectAndConsume(diag::err_expected_lparen_after, "prefer_type"))
+ return nullptr;
+
+ while (Tok.isNot(tok::r_paren)) {
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult LHS = ParseCastExpression(AnyCastExpr);
+ ExprResult PTExpr = Actions.CorrectDelayedTyposInExpr(
+ ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ PTExpr = Actions.ActOnFinishFullExpr(PTExpr.get(), Loc,
+ /*DiscardedValue=*/false);
+ if (PTExpr.isUsable())
+ Prefs.push_back(PTExpr.get());
+ else
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ }
+ PT.consumeClose();
+ }
+
+ if (!Prefs.empty()) {
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else
+ Diag(Tok, diag::err_omp_expected_punc_after_interop_mod);
+ }
+
+ // Parse the interop-types.
+ bool HasError = false;
+ while (Tok.is(tok::identifier)) {
+ if (PP.getSpelling(Tok) == "target") {
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // Each interop-type may be specified on an action-clause at most
+ // once.
+ if (IsTarget)
+ Diag(Tok, diag::warn_omp_more_one_interop_type) << "target";
+ IsTarget = true;
+ } else if (PP.getSpelling(Tok) == "targetsync") {
+ if (IsTargetSync)
+ Diag(Tok, diag::warn_omp_more_one_interop_type) << "targetsync";
+ IsTargetSync = true;
+ } else {
+ HasError = true;
+ Diag(Tok, diag::err_omp_expected_interop_type);
+ }
+ ConsumeToken();
+
+ if (!Tok.is(tok::comma))
+ break;
+ ConsumeToken();
+ }
+ if (!HasError && !IsTarget && !IsTargetSync)
+ Diag(Tok, diag::err_omp_expected_interop_type);
+
+ if (Tok.is(tok::colon))
+ ConsumeToken();
+ else if (IsTarget || IsTargetSync)
+ Diag(Tok, diag::warn_pragma_expected_colon) << "interop types";
+ }
+
+ // Parse the variable.
+ SourceLocation VarLoc = Tok.getLocation();
+ ExprResult InteropVarExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (!InteropVarExpr.isUsable()) {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
+
+ // Parse ')'.
+ SourceLocation RLoc = Tok.getLocation();
+ if (!T.consumeClose())
+ RLoc = T.getCloseLocation();
+
+ if (ParseOnly || !InteropVarExpr.isUsable() ||
+ (Kind == OMPC_init && !IsTarget && !IsTargetSync))
+ return nullptr;
+
+ if (Kind == OMPC_init)
+ return Actions.ActOnOpenMPInitClause(InteropVarExpr.get(), Prefs, IsTarget,
+ IsTargetSync, Loc, T.getOpenLocation(),
+ VarLoc, RLoc);
+ if (Kind == OMPC_use)
+ return Actions.ActOnOpenMPUseClause(InteropVarExpr.get(), Loc,
+ T.getOpenLocation(), VarLoc, RLoc);
+
+ if (Kind == OMPC_destroy)
+ return Actions.ActOnOpenMPDestroyClause(InteropVarExpr.get(), Loc,
+ T.getOpenLocation(), VarLoc, RLoc);
+
+ llvm_unreachable("Unexpected interop variable clause.");
+}
+
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
diff --git a/clang/lib/Parse/ParsePragma.cpp b/clang/lib/Parse/ParsePragma.cpp
index f9b852826775..42072fe63fc8 100644
--- a/clang/lib/Parse/ParsePragma.cpp
+++ b/clang/lib/Parse/ParsePragma.cpp
@@ -14,11 +14,13 @@
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/Token.h"
#include "clang/Parse/LoopHint.h"
#include "clang/Parse/ParseDiagnostic.h"
#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Scope.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
@@ -292,6 +294,10 @@ struct PragmaMaxTokensTotalHandler : public PragmaHandler {
Token &FirstToken) override;
};
+void markAsReinjectedForRelexing(llvm::MutableArrayRef<clang::Token> Toks) {
+ for (auto &T : Toks)
+ T.setFlag(clang::Token::IsReinjected);
+}
} // end namespace
void Parser::initializePragmaHandlers() {
@@ -399,9 +405,11 @@ void Parser::initializePragmaHandlers() {
UnrollHintHandler = std::make_unique<PragmaUnrollHintHandler>("unroll");
PP.AddPragmaHandler(UnrollHintHandler.get());
+ PP.AddPragmaHandler("GCC", UnrollHintHandler.get());
NoUnrollHintHandler = std::make_unique<PragmaUnrollHintHandler>("nounroll");
PP.AddPragmaHandler(NoUnrollHintHandler.get());
+ PP.AddPragmaHandler("GCC", NoUnrollHintHandler.get());
UnrollAndJamHintHandler =
std::make_unique<PragmaUnrollHintHandler>("unroll_and_jam");
@@ -517,9 +525,11 @@ void Parser::resetPragmaHandlers() {
LoopHintHandler.reset();
PP.RemovePragmaHandler(UnrollHintHandler.get());
+ PP.RemovePragmaHandler("GCC", UnrollHintHandler.get());
UnrollHintHandler.reset();
PP.RemovePragmaHandler(NoUnrollHintHandler.get());
+ PP.RemovePragmaHandler("GCC", NoUnrollHintHandler.get());
NoUnrollHintHandler.reset();
PP.RemovePragmaHandler(UnrollAndJamHintHandler.get());
@@ -771,22 +781,21 @@ void Parser::HandlePragmaOpenCLExtension() {
// overriding all previously issued extension directives, but only if the
// behavior is set to disable."
if (Name == "all") {
- if (State == Disable) {
+ if (State == Disable)
Opt.disableAll();
- Opt.enableSupportedCore(getLangOpts());
- } else {
+ else
PP.Diag(NameLoc, diag::warn_pragma_expected_predicate) << 1;
- }
} else if (State == Begin) {
if (!Opt.isKnown(Name) || !Opt.isSupported(Name, getLangOpts())) {
Opt.support(Name);
+ // FIXME: Default behavior of the extension pragma is not defined.
+ // Therefore, it should never be added by default.
+ Opt.acceptsPragma(Name);
}
- Actions.setCurrentOpenCLExtension(Name);
} else if (State == End) {
- if (Name != Actions.getCurrentOpenCLExtension())
- PP.Diag(NameLoc, diag::warn_pragma_begin_end_mismatch);
- Actions.setCurrentOpenCLExtension("");
- } else if (!Opt.isKnown(Name))
+ // There is no behavior for this directive. We only accept this for
+ // backward compatibility.
+ } else if (!Opt.isKnown(Name) || !Opt.isWithPragma(Name))
PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << Ident;
else if (Opt.isSupportedExtension(Name, getLangOpts()))
Opt.enable(Name, State == Enable);
@@ -2618,6 +2627,7 @@ void PragmaMSPragma::HandlePragma(Preprocessor &PP,
TokenVector.push_back(EoF);
// We must allocate this array with new because EnterTokenStream is going to
// delete it later.
+ markAsReinjectedForRelexing(TokenVector);
auto TokenArray = std::make_unique<Token[]>(TokenVector.size());
std::copy(TokenVector.begin(), TokenVector.end(), TokenArray.get());
auto Value = new (PP.getPreprocessorAllocator())
@@ -3175,6 +3185,7 @@ static bool ParseLoopHintValue(Preprocessor &PP, Token &Tok, Token PragmaName,
EOFTok.setLocation(Tok.getLocation());
ValueList.push_back(EOFTok); // Terminates expression for parsing.
+ markAsReinjectedForRelexing(ValueList);
Info.Toks = llvm::makeArrayRef(ValueList).copy(PP.getPreprocessorAllocator());
Info.PragmaName = PragmaName;
@@ -3631,6 +3642,7 @@ void PragmaAttributeHandler::HandlePragma(Preprocessor &PP,
EOFTok.setLocation(EndLoc);
AttributeTokens.push_back(EOFTok);
+ markAsReinjectedForRelexing(AttributeTokens);
Info->Tokens =
llvm::makeArrayRef(AttributeTokens).copy(PP.getPreprocessorAllocator());
}
diff --git a/clang/lib/Parse/ParseStmt.cpp b/clang/lib/Parse/ParseStmt.cpp
index 26a02575010c..ebfe048513b1 100644
--- a/clang/lib/Parse/ParseStmt.cpp
+++ b/clang/lib/Parse/ParseStmt.cpp
@@ -20,6 +20,8 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
+#include "llvm/ADT/STLExtras.h"
+
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -98,10 +100,15 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
ParenBraceBracketBalancer BalancerRAIIObj(*this);
+ // Because we're parsing either a statement or a declaration, the order of
+ // attribute parsing is important. [[]] attributes at the start of a
+ // statement are different from [[]] attributes that follow an __attribute__
+ // at the start of the statement. Thus, we're not using MaybeParseAttributes
+ // here because we don't want to allow arbitrary orderings.
ParsedAttributesWithRange Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs, nullptr, /*MightBeObjCMessageSend*/ true);
- if (!MaybeParseOpenCLUnrollHintAttribute(Attrs))
- return StmtError();
+ if (getLangOpts().OpenCL)
+ MaybeParseGNUAttributes(Attrs);
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
Stmts, StmtCtx, TrailingElseLoc, Attrs);
@@ -113,7 +120,7 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
if (Attrs.empty() || Res.isInvalid())
return Res;
- return Actions.ProcessStmtAttributes(Res.get(), Attrs, Attrs.Range);
+ return Actions.ActOnAttributedStmt(Attrs, Res.get());
}
namespace {
@@ -165,14 +172,13 @@ Retry:
switch (Kind) {
case tok::at: // May be a @try or @throw statement
{
- ProhibitAttributes(Attrs); // TODO: is it correct?
AtLoc = ConsumeToken(); // consume @
return ParseObjCAtStatement(AtLoc, StmtCtx);
}
case tok::code_completion:
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
cutOffParsing();
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
return StmtError();
case tok::identifier: {
@@ -210,7 +216,11 @@ Retry:
if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
(StmtCtx & ParsedStmtContext::AllowDeclarationsInC) !=
ParsedStmtContext()) &&
- (GNUAttributeLoc.isValid() || isDeclarationStatement())) {
+ ((GNUAttributeLoc.isValid() &&
+ !(!Attrs.empty() &&
+ llvm::all_of(
+ Attrs, [](ParsedAttr &Attr) { return Attr.isStmtAttr(); }))) ||
+ isDeclarationStatement())) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl;
if (GNUAttributeLoc.isValid()) {
@@ -391,7 +401,12 @@ Retry:
return HandlePragmaCaptured();
case tok::annot_pragma_openmp:
+ // Prohibit attributes that are not OpenMP attributes, but only before
+ // processing a #pragma omp clause.
ProhibitAttributes(Attrs);
+ LLVM_FALLTHROUGH;
+ case tok::annot_attr_openmp:
+ // Do not prohibit attributes if they were OpenMP attributes.
return ParseOpenMPDeclarativeOrExecutableDirective(StmtCtx);
case tok::annot_pragma_ms_pointers_to_members:
@@ -638,19 +653,12 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
// attributes as part of a statement in that case). That looks like a bug.
if (!getLangOpts().CPlusPlus || Tok.is(tok::semi))
attrs.takeAllFrom(TempAttrs);
- else if (isDeclarationStatement()) {
+ else {
StmtVector Stmts;
- // FIXME: We should do this whether or not we have a declaration
- // statement, but that doesn't work correctly (because ProhibitAttributes
- // can't handle GNU attributes), so only call it in the one case where
- // GNU attributes are allowed.
SubStmt = ParseStatementOrDeclarationAfterAttributes(Stmts, StmtCtx,
nullptr, TempAttrs);
if (!TempAttrs.empty() && !SubStmt.isInvalid())
- SubStmt = Actions.ProcessStmtAttributes(SubStmt.get(), TempAttrs,
- TempAttrs.Range);
- } else {
- Diag(Tok, diag::err_expected_after) << "__attribute__" << tok::semi;
+ SubStmt = Actions.ActOnAttributedStmt(TempAttrs, SubStmt.get());
}
}
@@ -715,8 +723,8 @@ StmtResult Parser::ParseCaseStatement(ParsedStmtContext StmtCtx,
ColonLoc = SourceLocation();
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteCase(getCurScope());
cutOffParsing();
+ Actions.CodeCompleteCase(getCurScope());
return StmtError();
}
@@ -1134,7 +1142,7 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
R = handleExprStmt(Res, SubStmtCtx);
if (R.isUsable())
- R = Actions.ProcessStmtAttributes(R.get(), attrs, attrs.Range);
+ R = Actions.ActOnAttributedStmt(attrs, R.get());
}
}
@@ -1461,8 +1469,8 @@ StmtResult Parser::ParseIfStatement(SourceLocation *TrailingElseLoc) {
// Pop the 'else' scope if needed.
InnerScope.Exit();
} else if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteAfterIf(getCurScope(), IsBracedThen);
cutOffParsing();
+ Actions.CodeCompleteAfterIf(getCurScope(), IsBracedThen);
return StmtError();
} else if (InnerStatementTrailingElseLoc.isValid()) {
Diag(InnerStatementTrailingElseLoc, diag::warn_dangling_else);
@@ -1816,10 +1824,10 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
FullExprArg ThirdPart(Actions);
if (Tok.is(tok::code_completion)) {
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
C99orCXXorObjC? Sema::PCC_ForInit
: Sema::PCC_Expression);
- cutOffParsing();
return StmtError();
}
@@ -1887,8 +1895,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ConsumeToken(); // consume 'in'
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
cutOffParsing();
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
return StmtError();
}
Collection = ParseExpression();
@@ -1923,8 +1931,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ConsumeToken(); // consume 'in'
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteObjCForCollection(getCurScope(), nullptr);
cutOffParsing();
+ Actions.CodeCompleteObjCForCollection(getCurScope(), nullptr);
return StmtError();
}
Collection = ParseExpression();
@@ -1948,7 +1956,6 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
// Parse the second part of the for specifier.
- getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
if (!ForEach && !ForRangeInfo.ParsedForRangeDecl() &&
!SecondPart.isInvalid()) {
// Parse the second part of the for specifier.
@@ -1964,7 +1971,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SecondPart =
ParseCXXCondition(nullptr, ForLoc, Sema::ConditionKind::Boolean,
- MightBeForRangeStmt ? &ForRangeInfo : nullptr);
+ MightBeForRangeStmt ? &ForRangeInfo : nullptr,
+ /*EnterForConditionScope*/ true);
if (ForRangeInfo.ParsedForRangeDecl()) {
Diag(FirstPart.get() ? FirstPart.get()->getBeginLoc()
@@ -1981,6 +1989,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
}
} else {
+ // We permit 'continue' and 'break' in the condition of a for loop.
+ getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
+
ExprResult SecondExpr = ParseExpression();
if (SecondExpr.isInvalid())
SecondPart = Sema::ConditionError();
@@ -1992,6 +2003,11 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
}
}
+ // Enter a break / continue scope, if we didn't already enter one while
+ // parsing the second part.
+ if (!(getCurScope()->getFlags() & Scope::ContinueScope))
+ getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
+
// Parse the third part of the for statement.
if (!ForEach && !ForRangeInfo.ParsedForRangeDecl()) {
if (Tok.isNot(tok::semi)) {
@@ -2177,9 +2193,9 @@ StmtResult Parser::ParseReturnStatement() {
PreferredType.enterReturn(Actions, Tok.getLocation());
// FIXME: Code completion for co_return.
if (Tok.is(tok::code_completion) && !IsCoreturn) {
+ cutOffParsing();
Actions.CodeCompleteExpression(getCurScope(),
PreferredType.get(Tok.getLocation()));
- cutOffParsing();
return StmtError();
}
@@ -2548,19 +2564,3 @@ void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
}
Braces.consumeClose();
}
-
-bool Parser::ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
- MaybeParseGNUAttributes(Attrs);
-
- if (Attrs.empty())
- return true;
-
- if (Attrs.begin()->getKind() != ParsedAttr::AT_OpenCLUnrollHint)
- return true;
-
- if (!(Tok.is(tok::kw_for) || Tok.is(tok::kw_while) || Tok.is(tok::kw_do))) {
- Diag(Tok, diag::err_opencl_unroll_hint_on_non_loop);
- return false;
- }
- return true;
-}
diff --git a/clang/lib/Parse/ParseStmtAsm.cpp b/clang/lib/Parse/ParseStmtAsm.cpp
index bdf40c291cb6..e520151dcad7 100644
--- a/clang/lib/Parse/ParseStmtAsm.cpp
+++ b/clang/lib/Parse/ParseStmtAsm.cpp
@@ -577,19 +577,22 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
TheTarget->createMCAsmInfo(*MRI, TT, MCOptions));
// Get the instruction descriptor.
std::unique_ptr<llvm::MCInstrInfo> MII(TheTarget->createMCInstrInfo());
- std::unique_ptr<llvm::MCObjectFileInfo> MOFI(new llvm::MCObjectFileInfo());
std::unique_ptr<llvm::MCSubtargetInfo> STI(
TheTarget->createMCSubtargetInfo(TT, TO.CPU, FeaturesStr));
// Target MCTargetDesc may not be linked in clang-based tools.
- if (!MAI || !MII || !MOFI || !STI) {
+
+ if (!MAI || !MII || !STI) {
Diag(AsmLoc, diag::err_msasm_unable_to_create_target)
<< "target MC unavailable";
return EmptyStmt();
}
llvm::SourceMgr TempSrcMgr;
- llvm::MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &TempSrcMgr);
- MOFI->InitMCObjectFileInfo(TheTriple, /*PIC*/ false, Ctx);
+ llvm::MCContext Ctx(TheTriple, MAI.get(), MRI.get(), STI.get(), &TempSrcMgr);
+ std::unique_ptr<llvm::MCObjectFileInfo> MOFI(
+ TheTarget->createMCObjectFileInfo(Ctx, /*PIC=*/false));
+ Ctx.setObjectFileInfo(MOFI.get());
+
std::unique_ptr<llvm::MemoryBuffer> Buffer =
llvm::MemoryBuffer::getMemBuffer(AsmString, "<MS inline asm>");
@@ -630,9 +633,9 @@ StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
SmallVector<std::pair<void *, bool>, 4> OpExprs;
SmallVector<std::string, 4> Constraints;
SmallVector<std::string, 4> Clobbers;
- if (Parser->parseMSInlineAsm(AsmLoc.getPtrEncoding(), AsmStringIR, NumOutputs,
- NumInputs, OpExprs, Constraints, Clobbers,
- MII.get(), IP.get(), Callback))
+ if (Parser->parseMSInlineAsm(AsmStringIR, NumOutputs, NumInputs, OpExprs,
+ Constraints, Clobbers, MII.get(), IP.get(),
+ Callback))
return StmtError();
// Filter out "fpsw" and "mxcsr". They aren't valid GCC asm clobber
diff --git a/clang/lib/Parse/ParseTentative.cpp b/clang/lib/Parse/ParseTentative.cpp
index 3bf2bc455bfe..c0bfbbde40ac 100644
--- a/clang/lib/Parse/ParseTentative.cpp
+++ b/clang/lib/Parse/ParseTentative.cpp
@@ -353,8 +353,8 @@ struct Parser::ConditionDeclarationOrInitStatementState {
if (CanBeForRangeDecl) {
// Skip until we hit a ')', ';', or a ':' with no matching '?'.
// The final case is a for range declaration, the rest are not.
+ unsigned QuestionColonDepth = 0;
while (true) {
- unsigned QuestionColonDepth = 0;
P.SkipUntil({tok::r_paren, tok::semi, tok::question, tok::colon},
StopBeforeMatch);
if (P.Tok.is(tok::question))
diff --git a/clang/lib/Parse/Parser.cpp b/clang/lib/Parse/Parser.cpp
index 9b0f921b4269..c81dd03ffaaa 100644
--- a/clang/lib/Parse/Parser.cpp
+++ b/clang/lib/Parse/Parser.cpp
@@ -49,10 +49,10 @@ IdentifierInfo *Parser::getSEHExceptKeyword() {
}
Parser::Parser(Preprocessor &pp, Sema &actions, bool skipFunctionBodies)
- : PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
- GreaterThanIsOperator(true), ColonIsSacred(false),
- InMessageExpression(false), TemplateParameterDepth(0),
- ParsingInObjCContainer(false) {
+ : PP(pp), PreferredType(pp.isCodeCompletionEnabled()), Actions(actions),
+ Diags(PP.getDiagnostics()), GreaterThanIsOperator(true),
+ ColonIsSacred(false), InMessageExpression(false),
+ TemplateParameterDepth(0), ParsingInObjCContainer(false) {
SkipFunctionBodies = pp.isCodeCompletionEnabled() || skipFunctionBodies;
Tok.startToken();
Tok.setKind(tok::eof);
@@ -309,6 +309,7 @@ bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags) {
return false;
case tok::annot_pragma_openmp:
+ case tok::annot_attr_openmp:
case tok::annot_pragma_openmp_end:
// Stop before an OpenMP pragma boundary.
if (OpenMPDirectiveParsing)
@@ -494,6 +495,7 @@ void Parser::Initialize() {
Ident_instancetype = nullptr;
Ident_final = nullptr;
Ident_sealed = nullptr;
+ Ident_abstract = nullptr;
Ident_override = nullptr;
Ident_GNU_final = nullptr;
Ident_import = nullptr;
@@ -503,10 +505,12 @@ void Parser::Initialize() {
Ident_vector = nullptr;
Ident_bool = nullptr;
+ Ident_Bool = nullptr;
Ident_pixel = nullptr;
if (getLangOpts().AltiVec || getLangOpts().ZVector) {
Ident_vector = &PP.getIdentifierTable().get("vector");
Ident_bool = &PP.getIdentifierTable().get("bool");
+ Ident_Bool = &PP.getIdentifierTable().get("_Bool");
}
if (getLangOpts().AltiVec)
Ident_pixel = &PP.getIdentifierTable().get("pixel");
@@ -795,6 +799,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_pragma_opencl_extension:
HandlePragmaOpenCLExtension();
return nullptr;
+ case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = AS_none;
return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
@@ -870,6 +875,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
SingleDecl = ParseObjCMethodDefinition();
break;
case tok::code_completion:
+ cutOffParsing();
if (CurParsedObjCImpl) {
// Code-complete Objective-C methods even without leading '-'/'+' prefix.
Actions.CodeCompleteObjCMethodDecl(getCurScope(),
@@ -879,7 +885,6 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Actions.CodeCompleteOrdinaryName(
getCurScope(),
CurParsedObjCImpl ? Sema::PCC_ObjCImplementation : Sema::PCC_Namespace);
- cutOffParsing();
return nullptr;
case tok::kw_import:
SingleDecl = ParseModuleImport(SourceLocation());
@@ -1079,8 +1084,6 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
DS, AnonRecord);
DS.complete(TheDecl);
- if (getLangOpts().OpenCL)
- Actions.setCurrentOpenCLExtensionForDecl(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
return Actions.BuildDeclaratorGroup(decls);
@@ -1213,7 +1216,7 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// a definition. Late parsed attributes are checked at the end.
if (Tok.isNot(tok::equal)) {
for (const ParsedAttr &AL : D.getAttributes())
- if (AL.isKnownToGCC() && !AL.isCXX11Attribute())
+ if (AL.isKnownToGCC() && !AL.isStandardAttributeSyntax())
Diag(AL.getLoc(), diag::warn_attribute_on_function_definition) << AL;
}
@@ -1695,6 +1698,11 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
break;
case Sema::NC_Type: {
+ if (TryAltiVecVectorToken())
+ // vector has been found as a type id when altivec is enabled but
+ // this is followed by a declaration specifier so this is really the
+ // altivec vector token. Leave it unannotated.
+ break;
SourceLocation BeginLoc = NameLoc;
if (SS.isNotEmpty())
BeginLoc = SS.getBeginLoc();
@@ -1736,6 +1744,11 @@ Parser::TryAnnotateName(CorrectionCandidateCallback *CCC) {
return ANK_Success;
case Sema::NC_NonType:
+ if (TryAltiVecVectorToken())
+ // vector has been found as a non-type id when altivec is enabled but
+ // this is followed by a declaration specifier so this is really the
+ // altivec vector token. Leave it unannotated.
+ break;
Tok.setKind(tok::annot_non_type);
setNonTypeAnnotation(Tok, Classification.getNonTypeDecl());
Tok.setLocation(NameLoc);
@@ -2114,21 +2127,21 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
for (Scope *S = getCurScope(); S; S = S->getParent()) {
if (S->getFlags() & Scope::FnScope) {
+ cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
Sema::PCC_RecoveryInFunction);
- cutOffParsing();
return PrevTokLocation;
}
if (S->getFlags() & Scope::ClassScope) {
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
cutOffParsing();
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
return PrevTokLocation;
}
}
- Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Namespace);
cutOffParsing();
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Namespace);
return PrevTokLocation;
}
@@ -2452,8 +2465,8 @@ bool Parser::ParseModuleName(
while (true) {
if (!Tok.is(tok::identifier)) {
if (Tok.is(tok::code_completion)) {
- Actions.CodeCompleteModuleImport(UseLoc, Path);
cutOffParsing();
+ Actions.CodeCompleteModuleImport(UseLoc, Path);
return true;
}
diff --git a/clang/lib/Rewrite/DeltaTree.cpp b/clang/lib/Rewrite/DeltaTree.cpp
index d27795c2f479..61467f84928f 100644
--- a/clang/lib/Rewrite/DeltaTree.cpp
+++ b/clang/lib/Rewrite/DeltaTree.cpp
@@ -458,7 +458,10 @@ void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
DeltaTreeNode::InsertResult InsertRes;
if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) {
- Root = MyRoot = new DeltaTreeInteriorNode(InsertRes);
+ Root = new DeltaTreeInteriorNode(InsertRes);
+#ifdef VERIFY_TREE
+ MyRoot = Root;
+#endif
}
#ifdef VERIFY_TREE
diff --git a/clang/lib/Sema/AnalysisBasedWarnings.cpp b/clang/lib/Sema/AnalysisBasedWarnings.cpp
index edd9742ed207..aa2602c8d925 100644
--- a/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1506,6 +1506,25 @@ static void diagnoseRepeatedUseOfWeak(Sema &S,
}
}
+namespace clang {
+namespace {
+typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
+typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
+typedef std::list<DelayedDiag> DiagList;
+
+struct SortDiagBySourceLocation {
+ SourceManager &SM;
+ SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
+
+ bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
+ // Although this call will be slow, this is only called when outputting
+ // multiple warnings.
+ return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
+ }
+};
+} // anonymous namespace
+} // namespace clang
+
namespace {
class UninitValsDiagReporter : public UninitVariablesHandler {
Sema &S;
@@ -1626,9 +1645,35 @@ private:
}
};
+/// Inter-procedural data for the called-once checker.
+class CalledOnceInterProceduralData {
+public:
+ // Add the delayed warning for the given block.
+ void addDelayedWarning(const BlockDecl *Block,
+ PartialDiagnosticAt &&Warning) {
+ DelayedBlockWarnings[Block].emplace_back(std::move(Warning));
+ }
+ // Report all of the warnings we've gathered for the given block.
+ void flushWarnings(const BlockDecl *Block, Sema &S) {
+ for (const PartialDiagnosticAt &Delayed : DelayedBlockWarnings[Block])
+ S.Diag(Delayed.first, Delayed.second);
+
+ discardWarnings(Block);
+ }
+ // Discard all of the warnings we've gathered for the given block.
+ void discardWarnings(const BlockDecl *Block) {
+ DelayedBlockWarnings.erase(Block);
+ }
+
+private:
+ using DelayedDiagnostics = SmallVector<PartialDiagnosticAt, 2>;
+ llvm::DenseMap<const BlockDecl *, DelayedDiagnostics> DelayedBlockWarnings;
+};
+
class CalledOnceCheckReporter : public CalledOnceCheckHandler {
public:
- CalledOnceCheckReporter(Sema &S) : S(S) {}
+ CalledOnceCheckReporter(Sema &S, CalledOnceInterProceduralData &Data)
+ : S(S), Data(Data) {}
void handleDoubleCall(const ParmVarDecl *Parameter, const Expr *Call,
const Expr *PrevCall, bool IsCompletionHandler,
bool Poised) override {
@@ -1649,14 +1694,24 @@ public:
<< Parameter << /* Captured */ false;
}
- void handleNeverCalled(const ParmVarDecl *Parameter, const Stmt *Where,
- NeverCalledReason Reason, bool IsCalledDirectly,
+ void handleNeverCalled(const ParmVarDecl *Parameter, const Decl *Function,
+ const Stmt *Where, NeverCalledReason Reason,
+ bool IsCalledDirectly,
bool IsCompletionHandler) override {
auto DiagToReport = IsCompletionHandler
? diag::warn_completion_handler_never_called_when
: diag::warn_called_once_never_called_when;
- S.Diag(Where->getBeginLoc(), DiagToReport)
- << Parameter << IsCalledDirectly << (unsigned)Reason;
+ PartialDiagnosticAt Warning(Where->getBeginLoc(), S.PDiag(DiagToReport)
+ << Parameter
+ << IsCalledDirectly
+ << (unsigned)Reason);
+
+ if (const auto *Block = dyn_cast<BlockDecl>(Function)) {
+ // We shouldn't report these warnings on blocks immediately
+ Data.addDelayedWarning(Block, std::move(Warning));
+ } else {
+ S.Diag(Warning.first, Warning.second);
+ }
}
void handleCapturedNeverCalled(const ParmVarDecl *Parameter,
@@ -1669,8 +1724,18 @@ public:
<< Parameter << /* Captured */ true;
}
+ void
+ handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl *Block) override {
+ Data.flushWarnings(Block, S);
+ }
+
+ void handleBlockWithNoGuarantees(const BlockDecl *Block) override {
+ Data.discardWarnings(Block);
+ }
+
private:
Sema &S;
+ CalledOnceInterProceduralData &Data;
};
constexpr unsigned CalledOnceWarnings[] = {
@@ -1703,25 +1768,6 @@ bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine &Diags,
}
} // anonymous namespace
-namespace clang {
-namespace {
-typedef SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
-typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
-typedef std::list<DelayedDiag> DiagList;
-
-struct SortDiagBySourceLocation {
- SourceManager &SM;
- SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
-
- bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
- // Although this call will be slow, this is only called when outputting
- // multiple warnings.
- return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
- }
-};
-} // anonymous namespace
-} // namespace clang
-
//===----------------------------------------------------------------------===//
// -Wthread-safety
//===----------------------------------------------------------------------===//
@@ -2107,54 +2153,68 @@ public:
// warnings on a function, method, or block.
//===----------------------------------------------------------------------===//
-clang::sema::AnalysisBasedWarnings::Policy::Policy() {
+sema::AnalysisBasedWarnings::Policy::Policy() {
enableCheckFallThrough = 1;
enableCheckUnreachable = 0;
enableThreadSafetyAnalysis = 0;
enableConsumedAnalysis = 0;
}
+/// InterProceduralData aims to be a storage of whatever data should be passed
+/// between analyses of different functions.
+///
+/// At the moment, its primary goal is to make the information gathered during
+/// the analysis of the blocks available during the analysis of the enclosing
+/// function. This is important due to the fact that blocks are analyzed before
+/// the enclosed function is even parsed fully, so it is not viable to access
+/// anything in the outer scope while analyzing the block. On the other hand,
+/// re-building CFG for blocks and re-analyzing them when we do have all the
+/// information (i.e. during the analysis of the enclosing function) seems to be
+/// ill-designed.
+class sema::AnalysisBasedWarnings::InterProceduralData {
+public:
+ // It is important to analyze blocks within functions because it's a very
+ // common pattern to capture completion handler parameters by blocks.
+ CalledOnceInterProceduralData CalledOnceData;
+};
+
static unsigned isEnabled(DiagnosticsEngine &D, unsigned diag) {
return (unsigned)!D.isIgnored(diag, SourceLocation());
}
-clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
- : S(s),
- NumFunctionsAnalyzed(0),
- NumFunctionsWithBadCFGs(0),
- NumCFGBlocks(0),
- MaxCFGBlocksPerFunction(0),
- NumUninitAnalysisFunctions(0),
- NumUninitAnalysisVariables(0),
- MaxUninitAnalysisVariablesPerFunction(0),
- NumUninitAnalysisBlockVisits(0),
- MaxUninitAnalysisBlockVisitsPerFunction(0) {
+sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
+ : S(s), IPData(std::make_unique<InterProceduralData>()),
+ NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0),
+ MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0),
+ NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0),
+ NumUninitAnalysisBlockVisits(0),
+ MaxUninitAnalysisBlockVisitsPerFunction(0) {
using namespace diag;
DiagnosticsEngine &D = S.getDiagnostics();
DefaultPolicy.enableCheckUnreachable =
- isEnabled(D, warn_unreachable) ||
- isEnabled(D, warn_unreachable_break) ||
- isEnabled(D, warn_unreachable_return) ||
- isEnabled(D, warn_unreachable_loop_increment);
+ isEnabled(D, warn_unreachable) || isEnabled(D, warn_unreachable_break) ||
+ isEnabled(D, warn_unreachable_return) ||
+ isEnabled(D, warn_unreachable_loop_increment);
- DefaultPolicy.enableThreadSafetyAnalysis =
- isEnabled(D, warn_double_lock);
+ DefaultPolicy.enableThreadSafetyAnalysis = isEnabled(D, warn_double_lock);
DefaultPolicy.enableConsumedAnalysis =
- isEnabled(D, warn_use_in_invalid_state);
+ isEnabled(D, warn_use_in_invalid_state);
}
+// We need this here for unique_ptr with forward declared class.
+sema::AnalysisBasedWarnings::~AnalysisBasedWarnings() = default;
+
static void flushDiagnostics(Sema &S, const sema::FunctionScopeInfo *fscope) {
for (const auto &D : fscope->PossiblyUnreachableDiags)
S.Diag(D.Loc, D.PD);
}
-void clang::sema::
-AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
- sema::FunctionScopeInfo *fscope,
- const Decl *D, QualType BlockType) {
+void clang::sema::AnalysisBasedWarnings::IssueWarnings(
+ sema::AnalysisBasedWarnings::Policy P, sema::FunctionScopeInfo *fscope,
+ const Decl *D, QualType BlockType) {
// We avoid doing analysis-based warnings when there are errors for
// two reasons:
@@ -2343,10 +2403,10 @@ AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
}
// Check for violations of "called once" parameter properties.
- if (S.getLangOpts().ObjC &&
+ if (S.getLangOpts().ObjC && !S.getLangOpts().CPlusPlus &&
shouldAnalyzeCalledOnceParameters(Diags, D->getBeginLoc())) {
if (AC.getCFG()) {
- CalledOnceCheckReporter Reporter(S);
+ CalledOnceCheckReporter Reporter(S, IPData->CalledOnceData);
checkCalledOnceParameters(
AC, Reporter,
shouldAnalyzeCalledOnceConventions(Diags, D->getBeginLoc()));
diff --git a/clang/lib/Sema/CodeCompleteConsumer.cpp b/clang/lib/Sema/CodeCompleteConsumer.cpp
index 678a09ba1003..3ab2a18f5e8d 100644
--- a/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -755,7 +755,7 @@ bool clang::operator<(const CodeCompletionResult &X,
std::string XSaved, YSaved;
StringRef XStr = X.getOrderedName(XSaved);
StringRef YStr = Y.getOrderedName(YSaved);
- int cmp = XStr.compare_lower(YStr);
+ int cmp = XStr.compare_insensitive(YStr);
if (cmp)
return cmp < 0;
diff --git a/clang/lib/Sema/DeclSpec.cpp b/clang/lib/Sema/DeclSpec.cpp
index da42db3e8f7b..72d9ea6dd3bf 100644
--- a/clang/lib/Sema/DeclSpec.cpp
+++ b/clang/lib/Sema/DeclSpec.cpp
@@ -625,7 +625,8 @@ bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
// OpenCL v1.2 s6.8 changes this to "The auto and register storage-class
// specifiers are not supported."
if (S.getLangOpts().OpenCL &&
- !S.getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers")) {
+ !S.getOpenCLOptions().isAvailableOption(
+ "cl_clang_storage_class_specifiers", S.getLangOpts())) {
switch (SC) {
case SCS_extern:
case SCS_private_extern:
@@ -1474,6 +1475,7 @@ bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc,
case VS_GNU_Final:
case VS_Sealed:
case VS_Final: VS_finalLoc = Loc; break;
+ case VS_Abstract: VS_abstractLoc = Loc; break;
}
return false;
@@ -1486,5 +1488,6 @@ const char *VirtSpecifiers::getSpecifierName(Specifier VS) {
case VS_Final: return "final";
case VS_GNU_Final: return "__final";
case VS_Sealed: return "sealed";
+ case VS_Abstract: return "abstract";
}
}
diff --git a/clang/lib/Sema/JumpDiagnostics.cpp b/clang/lib/Sema/JumpDiagnostics.cpp
index d33b14a79dc1..999c2a481459 100644
--- a/clang/lib/Sema/JumpDiagnostics.cpp
+++ b/clang/lib/Sema/JumpDiagnostics.cpp
@@ -11,13 +11,14 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtOpenMP.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/BitVector.h"
using namespace clang;
@@ -29,6 +30,10 @@ namespace {
/// int a[n];
/// L:
///
+/// We also detect jumps out of protected scopes when it's not possible to do
+/// cleanups properly. Indirect jumps and ASM jumps can't do cleanups because
+/// the target is unknown. Return statements with \c [[clang::musttail]] cannot
+/// handle any cleanups due to the nature of a tail call.
class JumpScopeChecker {
Sema &S;
@@ -68,6 +73,7 @@ class JumpScopeChecker {
SmallVector<Stmt*, 4> IndirectJumps;
SmallVector<Stmt*, 4> AsmJumps;
+ SmallVector<AttributedStmt *, 4> MustTailStmts;
SmallVector<LabelDecl*, 4> IndirectJumpTargets;
SmallVector<LabelDecl*, 4> AsmJumpTargets;
public:
@@ -81,6 +87,7 @@ private:
void VerifyJumps();
void VerifyIndirectOrAsmJumps(bool IsAsmGoto);
+ void VerifyMustTailStmts();
void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
void DiagnoseIndirectOrAsmJump(Stmt *IG, unsigned IGScope, LabelDecl *Target,
unsigned TargetScope);
@@ -88,6 +95,7 @@ private:
unsigned JumpDiag, unsigned JumpDiagWarning,
unsigned JumpDiagCXX98Compat);
void CheckGotoStmt(GotoStmt *GS);
+ const Attr *GetMustTailAttr(AttributedStmt *AS);
unsigned GetDeepestCommonScope(unsigned A, unsigned B);
};
@@ -109,6 +117,7 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s)
VerifyJumps();
VerifyIndirectOrAsmJumps(false);
VerifyIndirectOrAsmJumps(true);
+ VerifyMustTailStmts();
}
/// GetDeepestCommonScope - Finds the innermost scope enclosing the
@@ -580,6 +589,15 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S,
LabelAndGotoScopes[S] = ParentScope;
break;
+ case Stmt::AttributedStmtClass: {
+ AttributedStmt *AS = cast<AttributedStmt>(S);
+ if (GetMustTailAttr(AS)) {
+ LabelAndGotoScopes[AS] = ParentScope;
+ MustTailStmts.push_back(AS);
+ }
+ break;
+ }
+
default:
if (auto *ED = dyn_cast<OMPExecutableDirective>(S)) {
if (!ED->isStandaloneDirective()) {
@@ -947,6 +965,9 @@ void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
if (!ToScopesWarning.empty()) {
S.Diag(DiagLoc, JumpDiagWarning);
NoteJumpIntoScopes(ToScopesWarning);
+ assert(isa<LabelStmt>(To));
+ LabelStmt *Label = cast<LabelStmt>(To);
+ Label->setSideEntry(true);
}
// Handle errors.
@@ -971,6 +992,24 @@ void JumpScopeChecker::CheckGotoStmt(GotoStmt *GS) {
}
}
+void JumpScopeChecker::VerifyMustTailStmts() {
+ for (AttributedStmt *AS : MustTailStmts) {
+ for (unsigned I = LabelAndGotoScopes[AS]; I; I = Scopes[I].ParentScope) {
+ if (Scopes[I].OutDiag) {
+ S.Diag(AS->getBeginLoc(), diag::err_musttail_scope);
+ S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
+ }
+ }
+ }
+}
+
+const Attr *JumpScopeChecker::GetMustTailAttr(AttributedStmt *AS) {
+ ArrayRef<const Attr *> Attrs = AS->getAttrs();
+ const auto *Iter =
+ llvm::find_if(Attrs, [](const Attr *A) { return isa<MustTailAttr>(A); });
+ return Iter != Attrs.end() ? *Iter : nullptr;
+}
+
void Sema::DiagnoseInvalidJumps(Stmt *Body) {
(void)JumpScopeChecker(Body, *this);
}
diff --git a/clang/lib/Sema/MultiplexExternalSemaSource.cpp b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
index 252008cda15d..072775642d75 100644
--- a/clang/lib/Sema/MultiplexExternalSemaSource.cpp
+++ b/clang/lib/Sema/MultiplexExternalSemaSource.cpp
@@ -10,7 +10,6 @@
//
//===----------------------------------------------------------------------===//
#include "clang/Sema/MultiplexExternalSemaSource.h"
-#include "clang/AST/DeclContextInternals.h"
#include "clang/Sema/Lookup.h"
using namespace clang;
@@ -269,7 +268,7 @@ void MultiplexExternalSemaSource::ReadExtVectorDecls(
}
void MultiplexExternalSemaSource::ReadDeclsToCheckForDeferredDiags(
- llvm::SmallVector<Decl *, 4> &Decls) {
+ llvm::SmallSetVector<Decl *, 4> &Decls) {
for(size_t i = 0; i < Sources.size(); ++i)
Sources[i]->ReadDeclsToCheckForDeferredDiags(Decls);
}
diff --git a/clang/lib/Sema/OpenCLBuiltins.td b/clang/lib/Sema/OpenCLBuiltins.td
index 745363a6b43f..cd704fe395a9 100644
--- a/clang/lib/Sema/OpenCLBuiltins.td
+++ b/clang/lib/Sema/OpenCLBuiltins.td
@@ -50,9 +50,29 @@ class AbstractExtension<string _Ext> {
// Extension associated to a builtin function.
class FunctionExtension<string _Ext> : AbstractExtension<_Ext>;
+// Extension associated to a type. This enables implicit conditionalization of
+// builtin function overloads containing a type that depends on an extension.
+// During overload resolution, when a builtin function overload contains a type
+// with a TypeExtension, those overloads are skipped when the extension is
+// disabled.
+class TypeExtension<string _Ext> : AbstractExtension<_Ext>;
+
+// TypeExtension definitions.
+def NoTypeExt : TypeExtension<"">;
+def Fp16TypeExt : TypeExtension<"cl_khr_fp16">;
+def Fp64TypeExt : TypeExtension<"cl_khr_fp64">;
+
// FunctionExtension definitions.
def FuncExtNone : FunctionExtension<"">;
def FuncExtKhrSubgroups : FunctionExtension<"cl_khr_subgroups">;
+def FuncExtKhrSubgroupExtendedTypes : FunctionExtension<"cl_khr_subgroup_extended_types">;
+def FuncExtKhrSubgroupNonUniformVote : FunctionExtension<"cl_khr_subgroup_non_uniform_vote">;
+def FuncExtKhrSubgroupBallot : FunctionExtension<"cl_khr_subgroup_ballot">;
+def FuncExtKhrSubgroupNonUniformArithmetic: FunctionExtension<"cl_khr_subgroup_non_uniform_arithmetic">;
+def FuncExtKhrSubgroupShuffle : FunctionExtension<"cl_khr_subgroup_shuffle">;
+def FuncExtKhrSubgroupShuffleRelative : FunctionExtension<"cl_khr_subgroup_shuffle_relative">;
+def FuncExtKhrSubgroupClusteredReduce : FunctionExtension<"cl_khr_subgroup_clustered_reduce">;
+def FuncExtKhrExtendedBitOps : FunctionExtension<"cl_khr_extended_bit_ops">;
def FuncExtKhrGlobalInt32BaseAtomics : FunctionExtension<"cl_khr_global_int32_base_atomics">;
def FuncExtKhrGlobalInt32ExtendedAtomics : FunctionExtension<"cl_khr_global_int32_extended_atomics">;
def FuncExtKhrLocalInt32BaseAtomics : FunctionExtension<"cl_khr_local_int32_base_atomics">;
@@ -63,6 +83,9 @@ def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_imag
def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
+// Not a real extension, but a workaround to add C++ for OpenCL specific builtins.
+def FuncExtOpenCLCxx : FunctionExtension<"__cplusplus">;
+
// Multiple extensions
def FuncExtKhrMipmapWritesAndWrite3d : FunctionExtension<"cl_khr_mipmap_image_writes cl_khr_3d_image_writes">;
@@ -73,10 +96,10 @@ def ArmIntegerDotProductAccumulateInt16 : FunctionExtension<"cl_arm_integ
def ArmIntegerDotProductAccumulateSaturateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_saturate_int8">;
// Qualified Type. These map to ASTContext::QualType.
-class QualType<string _Name, bit _IsAbstract=0> {
- // Name of the field or function in a clang::ASTContext
- // E.g. Name="IntTy" for the int type, and "getIntPtrType()" for an intptr_t
- string Name = _Name;
+class QualType<string _TypeExpr, bit _IsAbstract=0> {
+ // Expression to obtain the QualType inside OCL2Qual.
+ // E.g. TypeExpr="Context.IntTy" for the int type.
+ string TypeExpr = _TypeExpr;
// Some QualTypes in this file represent an abstract type for which there is
// no corresponding AST QualType, e.g. a GenType or an `image2d_t` type
// without access qualifiers.
@@ -95,11 +118,11 @@ class IntList<string _Name, list<int> _List> {
// OpenCL C basic data types (int, float, image2d_t, ...).
// Its child classes can represent concrete types (e.g. VectorType) or
// abstract types (e.g. GenType).
-class Type<string _Name, QualType _QTName> {
+class Type<string _Name, QualType _QTExpr> {
// Name of the Type.
string Name = _Name;
// QualType associated with this type.
- QualType QTName = _QTName;
+ QualType QTExpr = _QTExpr;
// Size of the vector (if applicable).
int VecWidth = 1;
// Is a pointer.
@@ -112,10 +135,12 @@ class Type<string _Name, QualType _QTName> {
string AccessQualifier = "";
// Address space.
string AddrSpace = DefaultAS.Name;
+ // Extension that needs to be enabled to expose a builtin that uses this type.
+ TypeExtension Extension = NoTypeExt;
}
// OpenCL vector types (e.g. int2, int3, int16, float8, ...).
-class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTName> {
+class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTExpr> {
let VecWidth = _VecWidth;
let AccessQualifier = "";
// Inherited fields
@@ -123,11 +148,12 @@ class VectorType<Type _Ty, int _VecWidth> : Type<_Ty.Name, _Ty.QTName> {
let IsConst = _Ty.IsConst;
let IsVolatile = _Ty.IsVolatile;
let AddrSpace = _Ty.AddrSpace;
+ let Extension = _Ty.Extension;
}
// OpenCL pointer types (e.g. int*, float*, ...).
class PointerType<Type _Ty, AddressSpace _AS = DefaultAS> :
- Type<_Ty.Name, _Ty.QTName> {
+ Type<_Ty.Name, _Ty.QTExpr> {
let AddrSpace = _AS.Name;
// Inherited fields
let VecWidth = _Ty.VecWidth;
@@ -135,10 +161,11 @@ class PointerType<Type _Ty, AddressSpace _AS = DefaultAS> :
let IsConst = _Ty.IsConst;
let IsVolatile = _Ty.IsVolatile;
let AccessQualifier = _Ty.AccessQualifier;
+ let Extension = _Ty.Extension;
}
// OpenCL const types (e.g. const int).
-class ConstType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
+class ConstType<Type _Ty> : Type<_Ty.Name, _Ty.QTExpr> {
let IsConst = 1;
// Inherited fields
let VecWidth = _Ty.VecWidth;
@@ -146,10 +173,11 @@ class ConstType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
let IsVolatile = _Ty.IsVolatile;
let AccessQualifier = _Ty.AccessQualifier;
let AddrSpace = _Ty.AddrSpace;
+ let Extension = _Ty.Extension;
}
// OpenCL volatile types (e.g. volatile int).
-class VolatileType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
+class VolatileType<Type _Ty> : Type<_Ty.Name, _Ty.QTExpr> {
let IsVolatile = 1;
// Inherited fields
let VecWidth = _Ty.VecWidth;
@@ -157,11 +185,12 @@ class VolatileType<Type _Ty> : Type<_Ty.Name, _Ty.QTName> {
let IsConst = _Ty.IsConst;
let AccessQualifier = _Ty.AccessQualifier;
let AddrSpace = _Ty.AddrSpace;
+ let Extension = _Ty.Extension;
}
// OpenCL image types (e.g. image2d).
class ImageType<Type _Ty, string _AccessQualifier> :
- Type<_Ty.Name, QualType<_Ty.QTName.Name#_AccessQualifier#"Ty", 0>> {
+ Type<_Ty.Name, QualType<_Ty.QTExpr.TypeExpr # _AccessQualifier # "Ty", 0>> {
let VecWidth = 0;
let AccessQualifier = _AccessQualifier;
// Inherited fields
@@ -169,6 +198,17 @@ class ImageType<Type _Ty, string _AccessQualifier> :
let IsConst = _Ty.IsConst;
let IsVolatile = _Ty.IsVolatile;
let AddrSpace = _Ty.AddrSpace;
+ let Extension = _Ty.Extension;
+}
+
+// OpenCL enum type (e.g. memory_scope).
+class EnumType<string _Name> :
+ Type<_Name, QualType<"getOpenCLEnumType(S, \"" # _Name # "\")", 0>> {
+}
+
+// OpenCL typedef type (e.g. cl_mem_fence_flags).
+class TypedefType<string _Name> :
+ Type<_Name, QualType<"getOpenCLTypedefType(S, \"" # _Name # "\")", 0>> {
}
// List of Types.
@@ -250,23 +290,27 @@ class Builtin<string _Name, list<Type> _Signature, list<bit> _Attributes = Attr.
//===----------------------------------------------------------------------===//
// OpenCL v1.0/1.2/2.0 s6.1.1: Built-in Scalar Data Types.
-def Bool : Type<"bool", QualType<"BoolTy">>;
-def Char : Type<"char", QualType<"CharTy">>;
-def UChar : Type<"uchar", QualType<"UnsignedCharTy">>;
-def Short : Type<"short", QualType<"ShortTy">>;
-def UShort : Type<"ushort", QualType<"UnsignedShortTy">>;
-def Int : Type<"int", QualType<"IntTy">>;
-def UInt : Type<"uint", QualType<"UnsignedIntTy">>;
-def Long : Type<"long", QualType<"LongTy">>;
-def ULong : Type<"ulong", QualType<"UnsignedLongTy">>;
-def Float : Type<"float", QualType<"FloatTy">>;
-def Double : Type<"double", QualType<"DoubleTy">>;
-def Half : Type<"half", QualType<"HalfTy">>;
-def Size : Type<"size_t", QualType<"getSizeType()">>;
-def PtrDiff : Type<"ptrdiff_t", QualType<"getPointerDiffType()">>;
-def IntPtr : Type<"intptr_t", QualType<"getIntPtrType()">>;
-def UIntPtr : Type<"uintptr_t", QualType<"getUIntPtrType()">>;
-def Void : Type<"void", QualType<"VoidTy">>;
+def Bool : Type<"bool", QualType<"Context.BoolTy">>;
+def Char : Type<"char", QualType<"Context.CharTy">>;
+def UChar : Type<"uchar", QualType<"Context.UnsignedCharTy">>;
+def Short : Type<"short", QualType<"Context.ShortTy">>;
+def UShort : Type<"ushort", QualType<"Context.UnsignedShortTy">>;
+def Int : Type<"int", QualType<"Context.IntTy">>;
+def UInt : Type<"uint", QualType<"Context.UnsignedIntTy">>;
+def Long : Type<"long", QualType<"Context.LongTy">>;
+def ULong : Type<"ulong", QualType<"Context.UnsignedLongTy">>;
+def Float : Type<"float", QualType<"Context.FloatTy">>;
+let Extension = Fp64TypeExt in {
+ def Double : Type<"double", QualType<"Context.DoubleTy">>;
+}
+let Extension = Fp16TypeExt in {
+ def Half : Type<"half", QualType<"Context.HalfTy">>;
+}
+def Size : Type<"size_t", QualType<"Context.getSizeType()">>;
+def PtrDiff : Type<"ptrdiff_t", QualType<"Context.getPointerDiffType()">>;
+def IntPtr : Type<"intptr_t", QualType<"Context.getIntPtrType()">>;
+def UIntPtr : Type<"uintptr_t", QualType<"Context.getUIntPtrType()">>;
+def Void : Type<"void", QualType<"Context.VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.2: Built-in Vector Data Types.
// Built-in vector data types are created by TableGen's OpenCLBuiltinEmitter.
@@ -274,36 +318,43 @@ def Void : Type<"void", QualType<"VoidTy">>;
// OpenCL v1.0/1.2/2.0 s6.1.3: Other Built-in Data Types.
// The image definitions are "abstract". They should not be used without
// specifying an access qualifier (RO/WO/RW).
-def Image1d : Type<"image1d_t", QualType<"OCLImage1d", 1>>;
-def Image2d : Type<"image2d_t", QualType<"OCLImage2d", 1>>;
-def Image3d : Type<"image3d_t", QualType<"OCLImage3d", 1>>;
-def Image1dArray : Type<"image1d_array_t", QualType<"OCLImage1dArray", 1>>;
-def Image1dBuffer : Type<"image1d_buffer_t", QualType<"OCLImage1dBuffer", 1>>;
-def Image2dArray : Type<"image2d_array_t", QualType<"OCLImage2dArray", 1>>;
-def Image2dDepth : Type<"image2d_depth_t", QualType<"OCLImage2dDepth", 1>>;
-def Image2dArrayDepth : Type<"image2d_array_depth_t", QualType<"OCLImage2dArrayDepth", 1>>;
-def Image2dMsaa : Type<"image2d_msaa_t", QualType<"OCLImage2dMSAA", 1>>;
-def Image2dArrayMsaa : Type<"image2d_array_msaa_t", QualType<"OCLImage2dArrayMSAA", 1>>;
-def Image2dMsaaDepth : Type<"image2d_msaa_depth_t", QualType<"OCLImage2dMSAADepth", 1>>;
-def Image2dArrayMsaaDepth : Type<"image2d_array_msaa_depth_t", QualType<"OCLImage2dArrayMSAADepth", 1>>;
-
-def Sampler : Type<"sampler_t", QualType<"OCLSamplerTy">>;
-def ClkEvent : Type<"clk_event_t", QualType<"OCLClkEventTy">>;
-def Event : Type<"event_t", QualType<"OCLEventTy">>;
-def Queue : Type<"queue_t", QualType<"OCLQueueTy">>;
-def ReserveId : Type<"reserve_id_t", QualType<"OCLReserveIDTy">>;
+def Image1d : Type<"image1d_t", QualType<"Context.OCLImage1d", 1>>;
+def Image2d : Type<"image2d_t", QualType<"Context.OCLImage2d", 1>>;
+def Image3d : Type<"image3d_t", QualType<"Context.OCLImage3d", 1>>;
+def Image1dArray : Type<"image1d_array_t", QualType<"Context.OCLImage1dArray", 1>>;
+def Image1dBuffer : Type<"image1d_buffer_t", QualType<"Context.OCLImage1dBuffer", 1>>;
+def Image2dArray : Type<"image2d_array_t", QualType<"Context.OCLImage2dArray", 1>>;
+def Image2dDepth : Type<"image2d_depth_t", QualType<"Context.OCLImage2dDepth", 1>>;
+def Image2dArrayDepth : Type<"image2d_array_depth_t", QualType<"Context.OCLImage2dArrayDepth", 1>>;
+def Image2dMsaa : Type<"image2d_msaa_t", QualType<"Context.OCLImage2dMSAA", 1>>;
+def Image2dArrayMsaa : Type<"image2d_array_msaa_t", QualType<"Context.OCLImage2dArrayMSAA", 1>>;
+def Image2dMsaaDepth : Type<"image2d_msaa_depth_t", QualType<"Context.OCLImage2dMSAADepth", 1>>;
+def Image2dArrayMsaaDepth : Type<"image2d_array_msaa_depth_t", QualType<"Context.OCLImage2dArrayMSAADepth", 1>>;
+
+def Sampler : Type<"sampler_t", QualType<"Context.OCLSamplerTy">>;
+def ClkEvent : Type<"clk_event_t", QualType<"Context.OCLClkEventTy">>;
+def Event : Type<"event_t", QualType<"Context.OCLEventTy">>;
+def Queue : Type<"queue_t", QualType<"Context.OCLQueueTy">>;
+def ReserveId : Type<"reserve_id_t", QualType<"Context.OCLReserveIDTy">>;
+def MemFenceFlags : TypedefType<"cl_mem_fence_flags">;
+def ClkProfilingInfo : TypedefType<"clk_profiling_info">;
+def NDRange : TypedefType<"ndrange_t">;
// OpenCL v2.0 s6.13.11: Atomic integer and floating-point types.
-def AtomicInt : Type<"atomic_int", QualType<"getAtomicType(Context.IntTy)">>;
-def AtomicUInt : Type<"atomic_uint", QualType<"getAtomicType(Context.UnsignedIntTy)">>;
-def AtomicLong : Type<"atomic_long", QualType<"getAtomicType(Context.LongTy)">>;
-def AtomicULong : Type<"atomic_ulong", QualType<"getAtomicType(Context.UnsignedLongTy)">>;
-def AtomicFloat : Type<"atomic_float", QualType<"getAtomicType(Context.FloatTy)">>;
-def AtomicDouble : Type<"atomic_double", QualType<"getAtomicType(Context.DoubleTy)">>;
-def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"getAtomicType(Context.getIntPtrType())">>;
-def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"getAtomicType(Context.getUIntPtrType())">>;
-def AtomicSize : Type<"atomic_size_t", QualType<"getAtomicType(Context.getSizeType())">>;
-def AtomicPtrDiff : Type<"atomic_ptrdiff_t", QualType<"getAtomicType(Context.getPointerDiffType())">>;
+def AtomicInt : Type<"atomic_int", QualType<"Context.getAtomicType(Context.IntTy)">>;
+def AtomicUInt : Type<"atomic_uint", QualType<"Context.getAtomicType(Context.UnsignedIntTy)">>;
+def AtomicLong : Type<"atomic_long", QualType<"Context.getAtomicType(Context.LongTy)">>;
+def AtomicULong : Type<"atomic_ulong", QualType<"Context.getAtomicType(Context.UnsignedLongTy)">>;
+def AtomicFloat : Type<"atomic_float", QualType<"Context.getAtomicType(Context.FloatTy)">>;
+def AtomicDouble : Type<"atomic_double", QualType<"Context.getAtomicType(Context.DoubleTy)">>;
+def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"Context.getAtomicType(Context.getIntPtrType())">>;
+def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"Context.getAtomicType(Context.getUIntPtrType())">>;
+def AtomicSize : Type<"atomic_size_t", QualType<"Context.getAtomicType(Context.getSizeType())">>;
+def AtomicPtrDiff : Type<"atomic_ptrdiff_t", QualType<"Context.getAtomicType(Context.getPointerDiffType())">>;
+
+def AtomicFlag : TypedefType<"atomic_flag">;
+def MemoryOrder : EnumType<"memory_order">;
+def MemoryScope : EnumType<"memory_scope">;
//===----------------------------------------------------------------------===//
// Definitions of OpenCL gentype variants
@@ -319,15 +370,10 @@ def AtomicPtrDiff : Type<"atomic_ptrdiff_t", QualType<"getAtomicType(Con
def VecAndScalar: IntList<"VecAndScalar", [1, 2, 3, 4, 8, 16]>;
def VecNoScalar : IntList<"VecNoScalar", [2, 3, 4, 8, 16]>;
def Vec1 : IntList<"Vec1", [1]>;
-def Vec2 : IntList<"Vec2", [2]>;
-def Vec4 : IntList<"Vec4", [4]>;
-def Vec8 : IntList<"Vec8", [8]>;
-def Vec16 : IntList<"Vec16", [16]>;
def Vec1234 : IntList<"Vec1234", [1, 2, 3, 4]>;
// Type lists.
def TLAll : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULong, Float, Double, Half]>;
-def TLAllUnsigned : TypeList<[UChar, UChar, UShort, UShort, UInt, UInt, ULong, ULong, UInt, ULong, UShort]>;
def TLFloat : TypeList<[Float, Double, Half]>;
def TLSignedInts : TypeList<[Char, Short, Int, Long]>;
def TLUnsignedInts : TypeList<[UChar, UShort, UInt, ULong]>;
@@ -344,6 +390,7 @@ def TLAllInts : TypeList<[Char, UChar, Short, UShort, Int, UInt, Long, ULo
// GenType definitions for multiple base types (e.g. all floating point types,
// or all integer types).
// All types
+def AGenType1 : GenericType<"AGenType1", TLAll, Vec1>;
def AGenTypeN : GenericType<"AGenTypeN", TLAll, VecAndScalar>;
def AGenTypeNNoScalar : GenericType<"AGenTypeNNoScalar", TLAll, VecNoScalar>;
// All integer
@@ -360,6 +407,9 @@ def UGenTypeN : GenericType<"UGenTypeN", TLUnsignedInts, VecAndScal
def FGenTypeN : GenericType<"FGenTypeN", TLFloat, VecAndScalar>;
// (u)int, (u)long, and all floats
def IntLongFloatGenType1 : GenericType<"IntLongFloatGenType1", TLIntLongFloats, Vec1>;
+// (u)char and (u)short
+def CharShortGenType1 : GenericType<"CharShortGenType1",
+ TypeList<[Char, UChar, Short, UShort]>, Vec1>;
// GenType definitions for every single base type (e.g. fp32 only).
// Names are like: GenTypeFloatVecAndScalar.
@@ -551,12 +601,13 @@ foreach name = ["half_divide", "half_powr",
foreach name = ["abs"] in {
def : Builtin<name, [AI2UGenTypeN, AIGenTypeN], Attr.Const>;
}
-foreach name = ["clz", "popcount"] in {
- def : Builtin<name, [AIGenTypeN, AIGenTypeN], Attr.Const>;
+def : Builtin<"clz", [AIGenTypeN, AIGenTypeN], Attr.Const>;
+let MinVersion = CL12 in {
+ def : Builtin<"popcount", [AIGenTypeN, AIGenTypeN], Attr.Const>;
}
let MinVersion = CL20 in {
foreach name = ["ctz"] in {
- def : Builtin<name, [AIGenTypeN, AIGenTypeN]>;
+ def : Builtin<name, [AIGenTypeN, AIGenTypeN], Attr.Const>;
}
}
@@ -623,12 +674,18 @@ foreach name = ["step"] in {
}
// --- 3 arguments ---
-foreach name = ["clamp", "mix"] in {
+foreach name = ["clamp"] in {
def : Builtin<name, [FGenTypeN, FGenTypeN, FGenTypeN, FGenTypeN], Attr.Const>;
def : Builtin<name, [GenTypeFloatVecNoScalar, GenTypeFloatVecNoScalar, Float, Float], Attr.Const>;
def : Builtin<name, [GenTypeDoubleVecNoScalar, GenTypeDoubleVecNoScalar, Double, Double], Attr.Const>;
def : Builtin<name, [GenTypeHalfVecNoScalar, GenTypeHalfVecNoScalar, Half, Half], Attr.Const>;
}
+foreach name = ["mix"] in {
+ def : Builtin<name, [FGenTypeN, FGenTypeN, FGenTypeN, FGenTypeN], Attr.Const>;
+ def : Builtin<name, [GenTypeFloatVecNoScalar, GenTypeFloatVecNoScalar, GenTypeFloatVecNoScalar, Float], Attr.Const>;
+ def : Builtin<name, [GenTypeDoubleVecNoScalar, GenTypeDoubleVecNoScalar, GenTypeDoubleVecNoScalar, Double], Attr.Const>;
+ def : Builtin<name, [GenTypeHalfVecNoScalar, GenTypeHalfVecNoScalar, GenTypeHalfVecNoScalar, Half], Attr.Const>;
+}
foreach name = ["smoothstep"] in {
def : Builtin<name, [FGenTypeN, FGenTypeN, FGenTypeN, FGenTypeN], Attr.Const>;
def : Builtin<name, [GenTypeFloatVecNoScalar, Float, Float, GenTypeFloatVecNoScalar], Attr.Const>;
@@ -886,6 +943,29 @@ foreach AS = [ConstantAS] in {
}
}
+// OpenCL v3.0 s6.15.8 - Synchronization Functions.
+def : Builtin<"barrier", [Void, MemFenceFlags], Attr.Convergent>;
+let MinVersion = CL20 in {
+ def : Builtin<"work_group_barrier", [Void, MemFenceFlags], Attr.Convergent>;
+ def : Builtin<"work_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+}
+
+// OpenCL v3.0 s6.15.9 - Legacy Explicit Memory Fence Functions.
+def : Builtin<"mem_fence", [Void, MemFenceFlags]>;
+def : Builtin<"read_mem_fence", [Void, MemFenceFlags]>;
+def : Builtin<"write_mem_fence", [Void, MemFenceFlags]>;
+
+// OpenCL v3.0 s6.15.10 - Address Space Qualifier Functions.
+// to_global, to_local, to_private are declared in Builtins.def.
+
+let MinVersion = CL20 in {
+ // The OpenCL 3.0 specification defines these with a "gentype" argument indicating any builtin
+ // type or user-defined type, which cannot be represented currently. Hence we slightly diverge
+ // by providing only the following overloads with a void pointer.
+ def : Builtin<"get_fence", [MemFenceFlags, PointerType<Void, GenericAS>]>;
+ def : Builtin<"get_fence", [MemFenceFlags, PointerType<ConstType<Void>, GenericAS>]>;
+}
+
//--------------------------------------------------------------------
// OpenCL v1.1 s6.11.10, v1.2 s6.12.10, v2.0 s6.13.10: Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
// OpenCL Extension v2.0 s5.1.7 and s6.1.7: Async Copies from Global to Local Memory, Local to Global Memory, and Prefetch
@@ -986,6 +1066,7 @@ let Extension = FuncExtKhrInt64ExtendedAtomics in {
}
// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
foreach AS = [GlobalAS, LocalAS] in {
+ def : Builtin<"atomic_xchg", [Float, PointerType<VolatileType<Float>, AS>, Float]>;
foreach Type = [Int, UInt] in {
foreach name = ["atomic_add", "atomic_sub", "atomic_xchg",
"atomic_min", "atomic_max", "atomic_and",
@@ -1000,8 +1081,27 @@ foreach AS = [GlobalAS, LocalAS] in {
}
}
}
+
+let Extension = FuncExtOpenCLCxx in {
+ foreach Type = [Int, UInt] in {
+ foreach name = ["atomic_add", "atomic_sub", "atomic_xchg",
+ "atomic_min", "atomic_max", "atomic_and",
+ "atomic_or", "atomic_xor"] in {
+ def : Builtin<name, [Type, PointerType<VolatileType<Type>, GenericAS>, Type]>;
+ }
+ foreach name = ["atomic_inc", "atomic_dec"] in {
+ def : Builtin<name, [Type, PointerType<VolatileType<Type>, GenericAS>]>;
+ }
+ foreach name = ["atomic_cmpxchg"] in {
+ def : Builtin<name, [Type, PointerType<VolatileType<Type>, GenericAS>, Type, Type]>;
+ }
+ }
+}
+
// OpenCL v2.0 s6.13.11 - Atomic Functions.
let MinVersion = CL20 in {
+ def : Builtin<"atomic_work_item_fence", [Void, MemFenceFlags, MemoryOrder, MemoryScope]>;
+
foreach TypePair = [[AtomicInt, Int], [AtomicUInt, UInt],
[AtomicLong, Long], [AtomicULong, ULong],
[AtomicFloat, Float], [AtomicDouble, Double]] in {
@@ -1009,55 +1109,104 @@ let MinVersion = CL20 in {
[Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
def : Builtin<"atomic_store",
[Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_store_explicit",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder]>;
+ def : Builtin<"atomic_store_explicit",
+ [Void, PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder, MemoryScope]>;
def : Builtin<"atomic_load",
[TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>]>;
+ def : Builtin<"atomic_load_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, MemoryOrder]>;
+ def : Builtin<"atomic_load_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, MemoryOrder, MemoryScope]>;
def : Builtin<"atomic_exchange",
[TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_exchange_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder]>;
+ def : Builtin<"atomic_exchange_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[1], MemoryOrder, MemoryScope]>;
foreach Variant = ["weak", "strong"] in {
def : Builtin<"atomic_compare_exchange_" # Variant,
[Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
PointerType<TypePair[1], GenericAS>, TypePair[1]]>;
+ def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
+ [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
+ PointerType<TypePair[1], GenericAS>, TypePair[1], MemoryOrder, MemoryOrder]>;
+ def : Builtin<"atomic_compare_exchange_" # Variant # "_explicit",
+ [Bool, PointerType<VolatileType<TypePair[0]>, GenericAS>,
+ PointerType<TypePair[1], GenericAS>, TypePair[1], MemoryOrder, MemoryOrder, MemoryScope]>;
}
}
foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
[AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
- [AtomicIntPtr, IntPtr, PtrDiff],
[AtomicUIntPtr, UIntPtr, PtrDiff]] in {
foreach ModOp = ["add", "sub"] in {
def : Builtin<"atomic_fetch_" # ModOp,
[TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder]>;
+ def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder, MemoryScope]>;
}
}
foreach TypePair = [[AtomicInt, Int, Int], [AtomicUInt, UInt, UInt],
- [AtomicLong, Long, Long], [AtomicULong, ULong, ULong],
- [AtomicIntPtr, IntPtr, IntPtr],
- [AtomicUIntPtr, UIntPtr, UIntPtr]] in {
+ [AtomicLong, Long, Long], [AtomicULong, ULong, ULong]] in {
foreach ModOp = ["or", "xor", "and", "min", "max"] in {
def : Builtin<"atomic_fetch_" # ModOp,
[TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2]]>;
+ def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder]>;
+ def : Builtin<"atomic_fetch_" # ModOp # "_explicit",
+ [TypePair[1], PointerType<VolatileType<TypePair[0]>, GenericAS>, TypePair[2], MemoryOrder, MemoryScope]>;
}
}
+
+ def : Builtin<"atomic_flag_clear",
+ [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>]>;
+ def : Builtin<"atomic_flag_clear_explicit",
+ [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder]>;
+ def : Builtin<"atomic_flag_clear_explicit",
+ [Void, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder, MemoryScope]>;
+
+ def : Builtin<"atomic_flag_test_and_set",
+ [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>]>;
+ def : Builtin<"atomic_flag_test_and_set_explicit",
+ [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder]>;
+ def : Builtin<"atomic_flag_test_and_set_explicit",
+ [Bool, PointerType<VolatileType<AtomicFlag>, GenericAS>, MemoryOrder, MemoryScope]>;
}
//--------------------------------------------------------------------
// OpenCL v1.1 s6.11.12, v1.2 s6.12.12, v2.0 s6.13.12 - Miscellaneous Vector Functions
// --- Table 19 ---
-foreach VSize1 = [Vec2, Vec4, Vec8, Vec16] in {
- foreach VSize2 = [Vec2, Vec4, Vec8, Vec16] in {
- def : Builtin<"shuffle", [GenericType<"TLAll" # VSize1.Name, TLAll, VSize1>,
- GenericType<"TLAll" # VSize2.Name, TLAll, VSize2>,
- GenericType<"TLAllUnsigned" # VSize1.Name, TLAllUnsigned, VSize1>],
- Attr.Const>;
+foreach VSize1 = [2, 4, 8, 16] in {
+ foreach VSize2 = [2, 4, 8, 16] in {
+ foreach VecAndMaskType = [[Char, UChar], [UChar, UChar],
+ [Short, UShort], [UShort, UShort],
+ [Int, UInt], [UInt, UInt],
+ [Long, ULong], [ULong, ULong],
+ [Float, UInt], [Double, ULong], [Half, UShort]] in {
+ def : Builtin<"shuffle", [VectorType<VecAndMaskType[0], VSize1>,
+ VectorType<VecAndMaskType[0], VSize2>,
+ VectorType<VecAndMaskType[1], VSize1>],
+ Attr.Const>;
+ }
}
}
-foreach VSize1 = [Vec2, Vec4, Vec8, Vec16] in {
- foreach VSize2 = [Vec2, Vec4, Vec8, Vec16] in {
- def : Builtin<"shuffle2", [GenericType<"TLAll" # VSize1.Name, TLAll, VSize1>,
- GenericType<"TLAll" # VSize2.Name, TLAll, VSize2>,
- GenericType<"TLAll" # VSize2.Name, TLAll, VSize2>,
- GenericType<"TLAllUnsigned" # VSize1.Name, TLAllUnsigned, VSize1>],
- Attr.Const>;
+foreach VSize1 = [2, 4, 8, 16] in {
+ foreach VSize2 = [2, 4, 8, 16] in {
+ foreach VecAndMaskType = [[Char, UChar], [UChar, UChar],
+ [Short, UShort], [UShort, UShort],
+ [Int, UInt], [UInt, UInt],
+ [Long, ULong], [ULong, ULong],
+ [Float, UInt], [Double, ULong], [Half, UShort]] in {
+ def : Builtin<"shuffle2", [VectorType<VecAndMaskType[0], VSize1>,
+ VectorType<VecAndMaskType[0], VSize2>,
+ VectorType<VecAndMaskType[0], VSize2>,
+ VectorType<VecAndMaskType[1], VSize1>],
+ Attr.Const>;
+ }
}
}
@@ -1092,24 +1241,26 @@ foreach coordTy = [Int, Float] in {
}
// --- Table 23: Sampler-less Read Functions ---
-foreach aQual = ["RO", "RW"] in {
- foreach imgTy = [Image2d, Image1dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- }
- foreach imgTy = [Image3d, Image2dArray] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- }
- foreach imgTy = [Image1d, Image1dBuffer] in {
- def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
- def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+let MinVersion = CL12 in {
+ foreach aQual = ["RO", "RW"] in {
+ foreach imgTy = [Image2d, Image1dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ }
+ foreach imgTy = [Image3d, Image2dArray] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ }
+ foreach imgTy = [Image1d, Image1dBuffer] in {
+ def : Builtin<"read_imagef", [VectorType<Float, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imagei", [VectorType<Int, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ def : Builtin<"read_imageui", [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ }
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
}
- def : Builtin<"read_imagef", [Float, ImageType<Image2dDepth, aQual>, VectorType<Int, 2>], Attr.Pure>;
- def : Builtin<"read_imagef", [Float, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>], Attr.Pure>;
}
// --- Table 24: Image Write Functions ---
@@ -1150,21 +1301,21 @@ foreach aQual = ["RO", "WO", "RW"] in {
Image2dArrayDepth] in {
foreach name = ["get_image_width", "get_image_channel_data_type",
"get_image_channel_order"] in {
- def : Builtin<name, [Int, ImageType<imgTy, aQual>]>;
+ def : Builtin<name, [Int, ImageType<imgTy, aQual>], Attr.Const>;
}
}
foreach imgTy = [Image2d, Image3d, Image2dArray, Image2dDepth,
Image2dArrayDepth] in {
- def : Builtin<"get_image_height", [Int, ImageType<imgTy, aQual>]>;
+ def : Builtin<"get_image_height", [Int, ImageType<imgTy, aQual>], Attr.Const>;
}
- def : Builtin<"get_image_depth", [Int, ImageType<Image3d, aQual>]>;
+ def : Builtin<"get_image_depth", [Int, ImageType<Image3d, aQual>], Attr.Const>;
foreach imgTy = [Image2d, Image2dArray, Image2dDepth,
Image2dArrayDepth] in {
- def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>]>;
+ def : Builtin<"get_image_dim", [VectorType<Int, 2>, ImageType<imgTy, aQual>], Attr.Const>;
}
- def : Builtin<"get_image_dim", [VectorType<Int, 4>, ImageType<Image3d, aQual>]>;
+ def : Builtin<"get_image_dim", [VectorType<Int, 4>, ImageType<Image3d, aQual>], Attr.Const>;
foreach imgTy = [Image1dArray, Image2dArray, Image2dArrayDepth] in {
- def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>]>;
+ def : Builtin<"get_image_array_size", [Size, ImageType<imgTy, aQual>], Attr.Const>;
}
}
@@ -1187,16 +1338,18 @@ foreach aQual = ["RO"] in {
}
// OpenCL extension v2.0 s5.1.10: Built-in Image Sampler-less Read Functions
// --- Table 9 ---
-foreach aQual = ["RO", "RW"] in {
- foreach name = ["read_imageh"] in {
- foreach imgTy = [Image2d, Image1dArray] in {
- def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
- }
- foreach imgTy = [Image3d, Image2dArray] in {
- def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
- }
- foreach imgTy = [Image1d, Image1dBuffer] in {
- def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+let MinVersion = CL12 in {
+ foreach aQual = ["RO", "RW"] in {
+ foreach name = ["read_imageh"] in {
+ foreach imgTy = [Image2d, Image1dArray] in {
+ def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 2>], Attr.Pure>;
+ }
+ foreach imgTy = [Image3d, Image2dArray] in {
+ def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, VectorType<Int, 4>], Attr.Pure>;
+ }
+ foreach imgTy = [Image1d, Image1dBuffer] in {
+ def : Builtin<name, [VectorType<Half, 4>, ImageType<imgTy, aQual>, Int], Attr.Pure>;
+ }
}
}
}
@@ -1257,20 +1410,38 @@ def : Builtin<"is_valid_reserve_id", [Bool, ReserveId]>;
// Defined in Builtins.def
// --- Table 33 ---
-def : Builtin<"enqueue_marker",
- [Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
-
-// --- Table 34 ---
-def : Builtin<"retain_event", [Void, ClkEvent]>;
-def : Builtin<"release_event", [Void, ClkEvent]>;
-def : Builtin<"create_user_event", [ClkEvent]>;
-def : Builtin<"is_valid_event", [Bool, ClkEvent]>;
-def : Builtin<"set_user_event_status", [Void, ClkEvent, Int]>;
-// TODO: capture_event_profiling_info
-
-// --- Table 35 ---
-def : Builtin<"get_default_queue", [Queue]>;
-// TODO: ndrange functions
+let MinVersion = CL20 in {
+ def : Builtin<"enqueue_marker",
+ [Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
+
+ // --- Table 34 ---
+ def : Builtin<"retain_event", [Void, ClkEvent]>;
+ def : Builtin<"release_event", [Void, ClkEvent]>;
+ def : Builtin<"create_user_event", [ClkEvent]>;
+ def : Builtin<"is_valid_event", [Bool, ClkEvent]>;
+ def : Builtin<"set_user_event_status", [Void, ClkEvent, Int]>;
+ def : Builtin<"capture_event_profiling_info",
+ [Void, ClkEvent, ClkProfilingInfo, PointerType<Void, GlobalAS>]>;
+
+ // --- Table 35 ---
+ def : Builtin<"get_default_queue", [Queue]>;
+
+ def : Builtin<"ndrange_1D", [NDRange, Size]>;
+ def : Builtin<"ndrange_1D", [NDRange, Size, Size]>;
+ def : Builtin<"ndrange_1D", [NDRange, Size, Size, Size]>;
+ def : Builtin<"ndrange_2D", [NDRange, PointerType<ConstType<Size>, PrivateAS>]>;
+ def : Builtin<"ndrange_2D", [NDRange, PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>]>;
+ def : Builtin<"ndrange_2D", [NDRange, PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>]>;
+ def : Builtin<"ndrange_3D", [NDRange, PointerType<ConstType<Size>, PrivateAS>]>;
+ def : Builtin<"ndrange_3D", [NDRange, PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>]>;
+ def : Builtin<"ndrange_3D", [NDRange, PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>,
+ PointerType<ConstType<Size>, PrivateAS>]>;
+}
//--------------------------------------------------------------------
@@ -1324,11 +1495,11 @@ let Extension = FuncExtKhrMipmapImage in {
}
foreach name = ["read_imagei"] in {
def : Builtin<name, [VectorType<Int, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, VectorType<Float, 4>, VectorType<Float, 4>], Attr.Pure>;
- def : Builtin<name, [VectorType<Float, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
+ def : Builtin<name, [VectorType<Int, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
}
foreach name = ["read_imageui"] in {
def : Builtin<name, [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, VectorType<Float, 4>, VectorType<Float, 4>], Attr.Pure>;
- def : Builtin<name, [VectorType<Float, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
+ def : Builtin<name, [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
}
}
foreach imgTy = [Image1dArray] in {
@@ -1361,8 +1532,8 @@ let Extension = FuncExtKhrMipmapImage in {
}
foreach imgTy = [Image2dArrayDepth] in {
foreach name = ["read_imagef"] in {
- def : Builtin<name, [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
- def : Builtin<name, [VectorType<UInt, 4>, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, VectorType<Float, 2>, VectorType<Float, 2>], Attr.Pure>;
+ def : Builtin<name, [Float, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, Float], Attr.Pure>;
+ def : Builtin<name, [Float, ImageType<imgTy, aQual>, Sampler, VectorType<Float, 4>, VectorType<Float, 2>, VectorType<Float, 2>], Attr.Pure>;
}
}
}
@@ -1464,7 +1635,10 @@ let Extension = FuncExtKhrSubgroups in {
}
// --- Table 28.2.2 ---
-// TODO: sub_group_barrier
+let Extension = FuncExtKhrSubgroups in {
+ def : Builtin<"sub_group_barrier", [Void, MemFenceFlags], Attr.Convergent>;
+ def : Builtin<"sub_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+}
// --- Table 28.2.4 ---
let Extension = FuncExtKhrSubgroups in {
@@ -1482,6 +1656,124 @@ let Extension = FuncExtKhrSubgroups in {
}
}
+// OpenCL Extension v3.0 s38 - Extended Subgroup Functions
+
+// Section 38.4.1 - cl_khr_subgroup_extended_types
+let Extension = FuncExtKhrSubgroupExtendedTypes in {
+ // For sub_group_broadcast, add scalar char, uchar, short, and ushort support,
+ def : Builtin<"sub_group_broadcast", [CharShortGenType1, CharShortGenType1, UInt], Attr.Convergent>;
+ // gentype may additionally be one of the supported built-in vector data types.
+ def : Builtin<"sub_group_broadcast", [AGenTypeNNoScalar, AGenTypeNNoScalar, UInt], Attr.Convergent>;
+
+ foreach name = ["sub_group_reduce_", "sub_group_scan_exclusive_",
+ "sub_group_scan_inclusive_"] in {
+ foreach op = ["add", "min", "max"] in {
+ def : Builtin<name # op, [CharShortGenType1, CharShortGenType1], Attr.Convergent>;
+ }
+ }
+}
+
+// Section 38.5.1 - cl_khr_subgroup_non_uniform_vote
+let Extension = FuncExtKhrSubgroupNonUniformVote in {
+ def : Builtin<"sub_group_elect", [Int]>;
+ def : Builtin<"sub_group_non_uniform_all", [Int, Int]>;
+ def : Builtin<"sub_group_non_uniform_any", [Int, Int]>;
+ def : Builtin<"sub_group_non_uniform_all_equal", [Int, AGenType1]>;
+}
+
+// Section 38.6.1 - cl_khr_subgroup_ballot
+let Extension = FuncExtKhrSubgroupBallot in {
+ def : Builtin<"sub_group_non_uniform_broadcast", [AGenTypeN, AGenTypeN, UInt]>;
+ def : Builtin<"sub_group_broadcast_first", [AGenType1, AGenType1]>;
+ def : Builtin<"sub_group_ballot", [VectorType<UInt, 4>, Int]>;
+ def : Builtin<"sub_group_inverse_ballot", [Int, VectorType<UInt, 4>], Attr.Const>;
+ def : Builtin<"sub_group_ballot_bit_extract", [Int, VectorType<UInt, 4>, UInt], Attr.Const>;
+ def : Builtin<"sub_group_ballot_bit_count", [UInt, VectorType<UInt, 4>], Attr.Const>;
+ def : Builtin<"sub_group_ballot_inclusive_scan", [UInt, VectorType<UInt, 4>]>;
+ def : Builtin<"sub_group_ballot_exclusive_scan", [UInt, VectorType<UInt, 4>]>;
+ def : Builtin<"sub_group_ballot_find_lsb", [UInt, VectorType<UInt, 4>]>;
+ def : Builtin<"sub_group_ballot_find_msb", [UInt, VectorType<UInt, 4>]>;
+
+ foreach op = ["eq", "ge", "gt", "le", "lt"] in {
+ def : Builtin<"get_sub_group_" # op # "_mask", [VectorType<UInt, 4>], Attr.Const>;
+ }
+}
+
+// Section 38.7.1 - cl_khr_subgroup_non_uniform_arithmetic
+let Extension = FuncExtKhrSubgroupNonUniformArithmetic in {
+ foreach name = ["reduce_", "scan_exclusive_", "scan_inclusive_"] in {
+ foreach op = ["add", "min", "max", "mul"] in {
+ def : Builtin<"sub_group_non_uniform_" # name # op, [AGenType1, AGenType1]>;
+ }
+ foreach op = ["and", "or", "xor"] in {
+ def : Builtin<"sub_group_non_uniform_" # name # op, [AIGenType1, AIGenType1]>;
+ }
+ foreach op = ["and", "or", "xor"] in {
+ def : Builtin<"sub_group_non_uniform_" # name # "logical_" # op, [Int, Int]>;
+ }
+ }
+}
+
+// Section 38.8.1 - cl_khr_subgroup_shuffle
+let Extension = FuncExtKhrSubgroupShuffle in {
+ def : Builtin<"sub_group_shuffle", [AGenType1, AGenType1, UInt]>;
+ def : Builtin<"sub_group_shuffle_xor", [AGenType1, AGenType1, UInt]>;
+}
+
+// Section 38.9.1 - cl_khr_subgroup_shuffle_relative
+let Extension = FuncExtKhrSubgroupShuffleRelative in {
+ def : Builtin<"sub_group_shuffle_up", [AGenType1, AGenType1, UInt]>;
+ def : Builtin<"sub_group_shuffle_down", [AGenType1, AGenType1, UInt]>;
+}
+
+// Section 38.10.1 - cl_khr_subgroup_clustered_reduce
+let Extension = FuncExtKhrSubgroupClusteredReduce in {
+ foreach op = ["add", "min", "max", "mul"] in {
+ def : Builtin<"sub_group_clustered_reduce_" # op, [AGenType1, AGenType1, UInt]>;
+ }
+ foreach op = ["and", "or", "xor"] in {
+ def : Builtin<"sub_group_clustered_reduce_" # op, [AIGenType1, AIGenType1, UInt]>;
+ }
+ foreach op = ["and", "or", "xor"] in {
+ def : Builtin<"sub_group_clustered_reduce_logical_" # op, [Int, Int, UInt]>;
+ }
+}
+
+// Section 40.3.1 - cl_khr_extended_bit_ops
+let Extension = FuncExtKhrExtendedBitOps in {
+ def : Builtin<"bitfield_insert", [AIGenTypeN, AIGenTypeN, AIGenTypeN, UInt, UInt], Attr.Const>;
+ def : Builtin<"bitfield_extract_signed", [SGenTypeN, SGenTypeN, UInt, UInt], Attr.Const>;
+ def : Builtin<"bitfield_extract_signed", [SGenTypeN, UGenTypeN, UInt, UInt], Attr.Const>;
+ def : Builtin<"bitfield_extract_unsigned", [UGenTypeN, SGenTypeN, UInt, UInt], Attr.Const>;
+ def : Builtin<"bitfield_extract_unsigned", [UGenTypeN, UGenTypeN, UInt, UInt], Attr.Const>;
+ def : Builtin<"bit_reverse", [AIGenTypeN, AIGenTypeN], Attr.Const>;
+}
+
+// Section 42.3 - cl_khr_integer_dot_product
+let Extension = FunctionExtension<"__opencl_c_integer_dot_product_input_4x8bit"> in {
+ def : Builtin<"dot", [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>], Attr.Const>;
+ def : Builtin<"dot", [Int, VectorType<Char, 4>, VectorType<Char, 4>], Attr.Const>;
+ def : Builtin<"dot", [Int, VectorType<UChar, 4>, VectorType<Char, 4>], Attr.Const>;
+ def : Builtin<"dot", [Int, VectorType<Char, 4>, VectorType<UChar, 4>], Attr.Const>;
+
+ def : Builtin<"dot_acc_sat", [UInt, VectorType<UChar, 4>, VectorType<UChar, 4>, UInt], Attr.Const>;
+ def : Builtin<"dot_acc_sat", [Int, VectorType<Char, 4>, VectorType<Char, 4>, Int], Attr.Const>;
+ def : Builtin<"dot_acc_sat", [Int, VectorType<UChar, 4>, VectorType<Char, 4>, Int], Attr.Const>;
+ def : Builtin<"dot_acc_sat", [Int, VectorType<Char, 4>, VectorType<UChar, 4>, Int], Attr.Const>;
+}
+
+let Extension = FunctionExtension<"__opencl_c_integer_dot_product_input_4x8bit_packed"> in {
+ def : Builtin<"dot_4x8packed_uu_uint", [UInt, UInt, UInt], Attr.Const>;
+ def : Builtin<"dot_4x8packed_ss_int", [Int, UInt, UInt], Attr.Const>;
+ def : Builtin<"dot_4x8packed_us_int", [Int, UInt, UInt], Attr.Const>;
+ def : Builtin<"dot_4x8packed_su_int", [Int, UInt, UInt], Attr.Const>;
+
+ def : Builtin<"dot_acc_sat_4x8packed_uu_uint", [UInt, UInt, UInt, UInt], Attr.Const>;
+ def : Builtin<"dot_acc_sat_4x8packed_ss_int", [Int, UInt, UInt, Int], Attr.Const>;
+ def : Builtin<"dot_acc_sat_4x8packed_us_int", [Int, UInt, UInt, Int], Attr.Const>;
+ def : Builtin<"dot_acc_sat_4x8packed_su_int", [Int, UInt, UInt, Int], Attr.Const>;
+}
+
//--------------------------------------------------------------------
// Arm extensions.
let Extension = ArmIntegerDotProductInt8 in {
diff --git a/clang/lib/Sema/ParsedAttr.cpp b/clang/lib/Sema/ParsedAttr.cpp
index 3ef8498baffd..ed03b0c7f688 100644
--- a/clang/lib/Sema/ParsedAttr.cpp
+++ b/clang/lib/Sema/ParsedAttr.cpp
@@ -159,6 +159,14 @@ bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Decl *D) const {
return getInfo().diagAppertainsToDecl(S, *this, D);
}
+bool ParsedAttr::diagnoseAppertainsTo(Sema &S, const Stmt *St) const {
+ return getInfo().diagAppertainsToStmt(S, *this, St);
+}
+
+bool ParsedAttr::diagnoseMutualExclusion(Sema &S, const Decl *D) const {
+ return getInfo().diagMutualExclusion(S, *this, D);
+}
+
bool ParsedAttr::appliesToDecl(const Decl *D,
attr::SubjectMatchRule MatchRule) const {
return checkAttributeMatchRuleAppliesTo(D, MatchRule);
@@ -204,3 +212,35 @@ bool ParsedAttr::hasVariadicArg() const {
// whether it's truly variadic or not.
return getInfo().OptArgs == 15;
}
+
+static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
+ // FIXME: Include the type in the argument list.
+ return AL.getNumArgs() + AL.hasParsedType();
+}
+
+template <typename Compare>
+static bool checkAttributeNumArgsImpl(Sema &S, const ParsedAttr &AL,
+ unsigned Num, unsigned Diag,
+ Compare Comp) {
+ if (Comp(getNumAttributeArgs(AL), Num)) {
+ S.Diag(AL.getLoc(), Diag) << AL << Num;
+ return false;
+ }
+ return true;
+}
+
+bool ParsedAttr::checkExactlyNumArgs(Sema &S, unsigned Num) const {
+ return checkAttributeNumArgsImpl(S, *this, Num,
+ diag::err_attribute_wrong_number_arguments,
+ std::not_equal_to<unsigned>());
+}
+bool ParsedAttr::checkAtLeastNumArgs(Sema &S, unsigned Num) const {
+ return checkAttributeNumArgsImpl(S, *this, Num,
+ diag::err_attribute_too_few_arguments,
+ std::less<unsigned>());
+}
+bool ParsedAttr::checkAtMostNumArgs(Sema &S, unsigned Num) const {
+ return checkAttributeNumArgsImpl(S, *this, Num,
+ diag::err_attribute_too_many_arguments,
+ std::greater<unsigned>());
+}
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index 55cb3aee6194..5d3de06e9576 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -14,6 +14,7 @@
#include "UsedDeclVisitor.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
@@ -21,12 +22,14 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Stack.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/CXXFieldCollector.h"
#include "clang/Sema/DelayedDiagnostic.h"
@@ -54,6 +57,26 @@ SourceLocation Sema::getLocForEndOfToken(SourceLocation Loc, unsigned Offset) {
ModuleLoader &Sema::getModuleLoader() const { return PP.getModuleLoader(); }
+DarwinSDKInfo *
+Sema::getDarwinSDKInfoForAvailabilityChecking(SourceLocation Loc,
+ StringRef Platform) {
+ if (CachedDarwinSDKInfo)
+ return CachedDarwinSDKInfo->get();
+ auto SDKInfo = parseDarwinSDKInfo(
+ PP.getFileManager().getVirtualFileSystem(),
+ PP.getHeaderSearchInfo().getHeaderSearchOpts().Sysroot);
+ if (SDKInfo && *SDKInfo) {
+ CachedDarwinSDKInfo = std::make_unique<DarwinSDKInfo>(std::move(**SDKInfo));
+ return CachedDarwinSDKInfo->get();
+ }
+ if (!SDKInfo)
+ llvm::consumeError(SDKInfo.takeError());
+ Diag(Loc, diag::warn_missing_sdksettings_for_availability_checking)
+ << Platform;
+ CachedDarwinSDKInfo = std::unique_ptr<DarwinSDKInfo>();
+ return nullptr;
+}
+
IdentifierInfo *
Sema::InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned int Index) {
@@ -182,6 +205,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this),
ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr),
CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) {
+ assert(pp.TUKind == TUKind);
TUScope = nullptr;
isConstantEvaluatedOverride = false;
@@ -298,7 +322,6 @@ void Sema::Initialize() {
if (getLangOpts().OpenCL) {
getOpenCLOptions().addSupport(
Context.getTargetInfo().getSupportedOpenCLOpts(), getLangOpts());
- getOpenCLOptions().enableSupportedCore(getLangOpts());
addImplicitTypedef("sampler_t", Context.OCLSamplerTy);
addImplicitTypedef("event_t", Context.OCLEventTy);
if (getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) {
@@ -308,25 +331,12 @@ void Sema::Initialize() {
addImplicitTypedef("atomic_int", Context.getAtomicType(Context.IntTy));
addImplicitTypedef("atomic_uint",
Context.getAtomicType(Context.UnsignedIntTy));
- auto AtomicLongT = Context.getAtomicType(Context.LongTy);
- addImplicitTypedef("atomic_long", AtomicLongT);
- auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
- addImplicitTypedef("atomic_ulong", AtomicULongT);
addImplicitTypedef("atomic_float",
Context.getAtomicType(Context.FloatTy));
- auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
- addImplicitTypedef("atomic_double", AtomicDoubleT);
// OpenCLC v2.0, s6.13.11.6 requires that atomic_flag is implemented as
// 32-bit integer and OpenCLC v2.0, s6.1.1 int is always 32-bit wide.
addImplicitTypedef("atomic_flag", Context.getAtomicType(Context.IntTy));
- auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
- addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
- auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
- addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
- auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
- addImplicitTypedef("atomic_size_t", AtomicSizeT);
- auto AtomicPtrDiffT = Context.getAtomicType(Context.getPointerDiffType());
- addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
+
// OpenCL v2.0 s6.13.11.6:
// - The atomic_long and atomic_ulong types are supported if the
@@ -339,31 +349,50 @@ void Sema::Initialize() {
// atomic_intptr_t, atomic_uintptr_t, atomic_size_t and
// atomic_ptrdiff_t are supported if the cl_khr_int64_base_atomics and
// cl_khr_int64_extended_atomics extensions are supported.
- std::vector<QualType> Atomic64BitTypes;
- Atomic64BitTypes.push_back(AtomicLongT);
- Atomic64BitTypes.push_back(AtomicULongT);
- Atomic64BitTypes.push_back(AtomicDoubleT);
- if (Context.getTypeSize(AtomicSizeT) == 64) {
- Atomic64BitTypes.push_back(AtomicSizeT);
- Atomic64BitTypes.push_back(AtomicIntPtrT);
- Atomic64BitTypes.push_back(AtomicUIntPtrT);
- Atomic64BitTypes.push_back(AtomicPtrDiffT);
+
+ auto AddPointerSizeDependentTypes = [&]() {
+ auto AtomicSizeT = Context.getAtomicType(Context.getSizeType());
+ auto AtomicIntPtrT = Context.getAtomicType(Context.getIntPtrType());
+ auto AtomicUIntPtrT = Context.getAtomicType(Context.getUIntPtrType());
+ auto AtomicPtrDiffT =
+ Context.getAtomicType(Context.getPointerDiffType());
+ addImplicitTypedef("atomic_size_t", AtomicSizeT);
+ addImplicitTypedef("atomic_intptr_t", AtomicIntPtrT);
+ addImplicitTypedef("atomic_uintptr_t", AtomicUIntPtrT);
+ addImplicitTypedef("atomic_ptrdiff_t", AtomicPtrDiffT);
+ };
+
+ if (Context.getTypeSize(Context.getSizeType()) == 32) {
+ AddPointerSizeDependentTypes();
}
- for (auto &I : Atomic64BitTypes)
- setOpenCLExtensionForType(I,
- "cl_khr_int64_base_atomics cl_khr_int64_extended_atomics");
- setOpenCLExtensionForType(AtomicDoubleT, "cl_khr_fp64");
+ std::vector<QualType> Atomic64BitTypes;
+ if (getOpenCLOptions().isSupported("cl_khr_int64_base_atomics",
+ getLangOpts()) &&
+ getOpenCLOptions().isSupported("cl_khr_int64_extended_atomics",
+ getLangOpts())) {
+ if (getOpenCLOptions().isSupported("cl_khr_fp64", getLangOpts())) {
+ auto AtomicDoubleT = Context.getAtomicType(Context.DoubleTy);
+ addImplicitTypedef("atomic_double", AtomicDoubleT);
+ Atomic64BitTypes.push_back(AtomicDoubleT);
+ }
+ auto AtomicLongT = Context.getAtomicType(Context.LongTy);
+ auto AtomicULongT = Context.getAtomicType(Context.UnsignedLongTy);
+ addImplicitTypedef("atomic_long", AtomicLongT);
+ addImplicitTypedef("atomic_ulong", AtomicULongT);
+
+
+ if (Context.getTypeSize(Context.getSizeType()) == 64) {
+ AddPointerSizeDependentTypes();
+ }
+ }
}
- setOpenCLExtensionForType(Context.DoubleTy, "cl_khr_fp64");
-#define GENERIC_IMAGE_TYPE_EXT(Type, Id, Ext) \
- setOpenCLExtensionForType(Context.Id, Ext);
-#include "clang/Basic/OpenCLImageTypes.def"
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- addImplicitTypedef(#ExtType, Context.Id##Ty); \
- setOpenCLExtensionForType(Context.Id##Ty, #Ext);
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ if (getOpenCLOptions().isSupported(#Ext, getLangOpts())) { \
+ addImplicitTypedef(#ExtType, Context.Id##Ty); \
+ }
#include "clang/Basic/OpenCLExtensionTypes.def"
}
@@ -385,6 +414,12 @@ void Sema::Initialize() {
#include "clang/Basic/PPCTypes.def"
}
+ if (Context.getTargetInfo().hasRISCVVTypes()) {
+#define RVV_TYPE(Name, Id, SingletonId) \
+ addImplicitTypedef(Name, Context.SingletonId);
+#include "clang/Basic/RISCVVTypes.def"
+ }
+
if (Context.getTargetInfo().hasBuiltinMSVaList()) {
DeclarationName MSVaList = &Context.Idents.get("__builtin_ms_va_list");
if (IdResolver.begin(MSVaList) == IdResolver.end())
@@ -537,6 +572,13 @@ void Sema::diagnoseZeroToNullptrConversion(CastKind Kind, const Expr* E) {
if (E->IgnoreParenImpCasts()->getType()->isNullPtrType())
return;
+ // Don't diagnose the conversion from a 0 literal to a null pointer argument
+ // in a synthesized call to operator<=>.
+ if (!CodeSynthesisContexts.empty() &&
+ CodeSynthesisContexts.back().Kind ==
+ CodeSynthesisContext::RewritingOperatorAsSpaceship)
+ return;
+
// If it is a macro from system header, and if the macro name is not "NULL",
// do not warn.
SourceLocation MaybeMacroLoc = E->getBeginLoc();
@@ -557,13 +599,14 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
const CXXCastPath *BasePath,
CheckedConversionKind CCK) {
#ifndef NDEBUG
- if (VK == VK_RValue && !E->isRValue()) {
+ if (VK == VK_PRValue && !E->isPRValue()) {
switch (Kind) {
default:
- llvm_unreachable(("can't implicitly cast lvalue to rvalue with this cast "
- "kind: " +
- std::string(CastExpr::getCastKindName(Kind)))
- .c_str());
+ llvm_unreachable(
+ ("can't implicitly cast glvalue to prvalue with this cast "
+ "kind: " +
+ std::string(CastExpr::getCastKindName(Kind)))
+ .c_str());
case CK_Dependent:
case CK_LValueToRValue:
case CK_ArrayToPointerDecay:
@@ -573,8 +616,8 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
break;
}
}
- assert((VK == VK_RValue || Kind == CK_Dependent || !E->isRValue()) &&
- "can't cast rvalue to lvalue");
+ assert((VK == VK_PRValue || Kind == CK_Dependent || !E->isPRValue()) &&
+ "can't cast prvalue to glvalue");
#endif
diagnoseNullableToNonnullConversion(Ty, E->getType(), E->getBeginLoc());
@@ -586,16 +629,36 @@ ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
if (ExprTy == TypeTy)
return E;
- // C++1z [conv.array]: The temporary materialization conversion is applied.
- // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
- if (Kind == CK_ArrayToPointerDecay && getLangOpts().CPlusPlus &&
- E->getValueKind() == VK_RValue) {
- // The temporary is an lvalue in C++98 and an xvalue otherwise.
- ExprResult Materialized = CreateMaterializeTemporaryExpr(
- E->getType(), E, !getLangOpts().CPlusPlus11);
- if (Materialized.isInvalid())
- return ExprError();
- E = Materialized.get();
+ if (Kind == CK_ArrayToPointerDecay) {
+ // C++1z [conv.array]: The temporary materialization conversion is applied.
+ // We also use this to fuel C++ DR1213, which applies to C++11 onwards.
+ if (getLangOpts().CPlusPlus && E->isPRValue()) {
+ // The temporary is an lvalue in C++98 and an xvalue otherwise.
+ ExprResult Materialized = CreateMaterializeTemporaryExpr(
+ E->getType(), E, !getLangOpts().CPlusPlus11);
+ if (Materialized.isInvalid())
+ return ExprError();
+ E = Materialized.get();
+ }
+ // C17 6.7.1p6 footnote 124: The implementation can treat any register
+ // declaration simply as an auto declaration. However, whether or not
+ // addressable storage is actually used, the address of any part of an
+ // object declared with storage-class specifier register cannot be
+ // computed, either explicitly(by use of the unary & operator as discussed
+ // in 6.5.3.2) or implicitly(by converting an array name to a pointer as
+ // discussed in 6.3.2.1).Thus, the only operator that can be applied to an
+ // array declared with storage-class specifier register is sizeof.
+ if (VK == VK_PRValue && !getLangOpts().CPlusPlus && !E->isPRValue()) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (VD->getStorageClass() == SC_Register) {
+ Diag(E->getExprLoc(), diag::err_typecheck_address_of)
+ << /*register variable*/ 3 << E->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+ }
}
if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
@@ -792,8 +855,21 @@ static void checkUndefinedButUsed(Sema &S) {
// FIXME: We can promote this to an error. The function or variable can't
// be defined anywhere else, so the program must necessarily violate the
// one definition rule.
- S.Diag(VD->getLocation(), diag::warn_undefined_internal)
- << isa<VarDecl>(VD) << VD;
+ bool IsImplicitBase = false;
+ if (const auto *BaseD = dyn_cast<FunctionDecl>(VD)) {
+ auto *DVAttr = BaseD->getAttr<OMPDeclareVariantAttr>();
+ if (DVAttr && !DVAttr->getTraitInfo().isExtensionActive(
+ llvm::omp::TraitProperty::
+ implementation_extension_disable_implicit_base)) {
+ const auto *Func = cast<FunctionDecl>(
+ cast<DeclRefExpr>(DVAttr->getVariantFuncRef())->getDecl());
+ IsImplicitBase = BaseD->isImplicit() &&
+ Func->getIdentifier()->isMangledOpenMPVariantName();
+ }
+ }
+ if (!S.getLangOpts().OpenMP || !IsImplicitBase)
+ S.Diag(VD->getLocation(), diag::warn_undefined_internal)
+ << isa<VarDecl>(VD) << VD;
} else if (auto *FD = dyn_cast<FunctionDecl>(VD)) {
(void)FD;
assert(FD->getMostRecentDecl()->isInlined() &&
@@ -1548,6 +1624,8 @@ public:
DeferredDiagnosticsEmitter(Sema &S)
: Inherited(S), ShouldEmitRootNode(false), InOMPDeviceContext(0) {}
+ bool shouldVisitDiscardedStmt() const { return false; }
+
void VisitOMPTargetDirective(OMPTargetDirective *Node) {
++InOMPDeviceContext;
Inherited::VisitOMPTargetDirective(Node);
@@ -1733,11 +1811,12 @@ Sema::SemaDiagnosticBuilder::~SemaDiagnosticBuilder() {
}
}
-Sema::SemaDiagnosticBuilder Sema::targetDiag(SourceLocation Loc,
- unsigned DiagID) {
+Sema::SemaDiagnosticBuilder
+Sema::targetDiag(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD) {
+ FD = FD ? FD : getCurFunctionDecl();
if (LangOpts.OpenMP)
- return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID)
- : diagIfOpenMPHostCode(Loc, DiagID);
+ return LangOpts.OpenMPIsDevice ? diagIfOpenMPDeviceCode(Loc, DiagID, FD)
+ : diagIfOpenMPHostCode(Loc, DiagID, FD);
if (getLangOpts().CUDA)
return getLangOpts().CUDAIsDevice ? CUDADiagIfDeviceCode(Loc, DiagID)
: CUDADiagIfHostCode(Loc, DiagID);
@@ -1746,7 +1825,7 @@ Sema::SemaDiagnosticBuilder Sema::targetDiag(SourceLocation Loc,
return SYCLDiagIfDeviceCode(Loc, DiagID);
return SemaDiagnosticBuilder(SemaDiagnosticBuilder::K_Immediate, Loc, DiagID,
- getCurFunctionDecl(), *this);
+ FD, *this);
}
Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
@@ -1754,7 +1833,7 @@ Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
bool IsError = Diags.getDiagnosticIDs()->isDefaultMappingAsError(DiagID);
bool ShouldDefer = getLangOpts().CUDA && LangOpts.GPUDeferDiag &&
DiagnosticIDs::isDeferrable(DiagID) &&
- (DeferHint || !IsError);
+ (DeferHint || DeferDiags || !IsError);
auto SetIsLastErrorImmediate = [&](bool Flag) {
if (IsError)
IsLastErrorImmediate = Flag;
@@ -1765,15 +1844,14 @@ Sema::SemaDiagnosticBuilder Sema::Diag(SourceLocation Loc, unsigned DiagID,
DiagID, getCurFunctionDecl(), *this);
}
- SemaDiagnosticBuilder DB =
- getLangOpts().CUDAIsDevice
- ? CUDADiagIfDeviceCode(Loc, DiagID)
- : CUDADiagIfHostCode(Loc, DiagID);
+ SemaDiagnosticBuilder DB = getLangOpts().CUDAIsDevice
+ ? CUDADiagIfDeviceCode(Loc, DiagID)
+ : CUDADiagIfHostCode(Loc, DiagID);
SetIsLastErrorImmediate(DB.isImmediate());
return DB;
}
-void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
+void Sema::checkDeviceDecl(ValueDecl *D, SourceLocation Loc) {
if (isUnevaluatedContext())
return;
@@ -1791,13 +1869,17 @@ void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
return;
}
+ // Try to associate errors with the lexical context, if that is a function, or
+ // the value declaration otherwise.
+ FunctionDecl *FD =
+ isa<FunctionDecl>(C) ? cast<FunctionDecl>(C) : dyn_cast<FunctionDecl>(D);
auto CheckType = [&](QualType Ty) {
if (Ty->isDependentType())
return;
if (Ty->isExtIntType()) {
if (!Context.getTargetInfo().hasExtIntType()) {
- targetDiag(Loc, diag::err_device_unsupported_type)
+ targetDiag(Loc, diag::err_device_unsupported_type, FD)
<< D << false /*show bit size*/ << 0 /*bitsize*/
<< Ty << Context.getTargetInfo().getTriple().str();
}
@@ -1810,11 +1892,12 @@ void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
!Context.getTargetInfo().hasFloat128Type()) ||
(Ty->isIntegerType() && Context.getTypeSize(Ty) == 128 &&
!Context.getTargetInfo().hasInt128Type())) {
- targetDiag(Loc, diag::err_device_unsupported_type)
+ if (targetDiag(Loc, diag::err_device_unsupported_type, FD)
<< D << true /*show bit size*/
<< static_cast<unsigned>(Context.getTypeSize(Ty)) << Ty
- << Context.getTargetInfo().getTriple().str();
- targetDiag(D->getLocation(), diag::note_defined_here) << D;
+ << Context.getTargetInfo().getTriple().str())
+ D->setInvalidDecl();
+ targetDiag(D->getLocation(), diag::note_defined_here, FD) << D;
}
};
@@ -1826,6 +1909,8 @@ void Sema::checkDeviceDecl(const ValueDecl *D, SourceLocation Loc) {
CheckType(ParamTy);
CheckType(FPTy->getReturnType());
}
+ if (const auto *FNPTy = dyn_cast<FunctionNoProtoType>(Ty))
+ CheckType(FNPTy->getReturnType());
}
/// Looks through the macro-expansion chain for the given
@@ -1913,6 +1998,9 @@ void Sema::RecordParsingTemplateParameterDepth(unsigned Depth) {
// Check that the type of the VarDecl has an accessible copy constructor and
// resolve its destructor's exception specification.
+// This also performs initialization of block variables when they are moved
+// to the heap. It uses the same rules as applicable for implicit moves
+// according to the C++ standard in effect ([class.copy.elision]p3).
static void checkEscapingByref(VarDecl *VD, Sema &S) {
QualType T = VD->getType();
EnterExpressionEvaluationContext scope(
@@ -1920,9 +2008,18 @@ static void checkEscapingByref(VarDecl *VD, Sema &S) {
SourceLocation Loc = VD->getLocation();
Expr *VarRef =
new (S.Context) DeclRefExpr(S.Context, VD, false, T, VK_LValue, Loc);
- ExprResult Result = S.PerformMoveOrCopyInitialization(
- InitializedEntity::InitializeBlock(Loc, T, false), VD, VD->getType(),
- VarRef, /*AllowNRVO=*/true);
+ ExprResult Result;
+ auto IE = InitializedEntity::InitializeBlock(Loc, T, false);
+ if (S.getLangOpts().CPlusPlus2b) {
+ auto *E = ImplicitCastExpr::Create(S.Context, T, CK_NoOp, VarRef, nullptr,
+ VK_XValue, FPOptionsOverride());
+ Result = S.PerformCopyInitialization(IE, SourceLocation(), E);
+ } else {
+ Result = S.PerformMoveOrCopyInitialization(
+ IE, Sema::NamedReturnInfo{VD, Sema::NamedReturnInfo::MoveEligible},
+ VarRef);
+ }
+
if (!Result.isInvalid()) {
Result = S.MaybeCreateExprWithCleanups(Result);
Expr *Init = Result.getAs<Expr>();
@@ -2047,6 +2144,11 @@ void Sema::setFunctionHasIndirectGoto() {
FunctionScopes.back()->setHasIndirectGoto();
}
+void Sema::setFunctionHasMustTail() {
+ if (!FunctionScopes.empty())
+ FunctionScopes.back()->setHasMustTail();
+}
+
BlockScopeInfo *Sema::getCurBlock() {
if (FunctionScopes.empty())
return nullptr;
@@ -2287,13 +2389,11 @@ bool Sema::tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
/// ill-formed expression.
static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
const SourceLocation FinalNoteLoc) {
- int ShownOverloads = 0;
- int SuppressedOverloads = 0;
+ unsigned ShownOverloads = 0;
+ unsigned SuppressedOverloads = 0;
for (UnresolvedSetImpl::iterator It = Overloads.begin(),
DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
- // FIXME: Magic number for max shown overloads stolen from
- // OverloadCandidateSet::NoteCandidates.
- if (ShownOverloads >= 4 && S.Diags.getShowOverloads() == Ovl_Best) {
+ if (ShownOverloads >= S.Diags.getNumOverloadCandidatesToShow()) {
++SuppressedOverloads;
continue;
}
@@ -2309,6 +2409,8 @@ static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
++ShownOverloads;
}
+ S.Diags.overloadCandidatesShown(ShownOverloads);
+
if (SuppressedOverloads)
S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
<< SuppressedOverloads;
@@ -2433,114 +2535,3 @@ const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
Sema::getMismatchingDeleteExpressions() const {
return DeleteExprs;
}
-
-void Sema::setOpenCLExtensionForType(QualType T, llvm::StringRef ExtStr) {
- if (ExtStr.empty())
- return;
- llvm::SmallVector<StringRef, 1> Exts;
- ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
- auto CanT = T.getCanonicalType().getTypePtr();
- for (auto &I : Exts)
- OpenCLTypeExtMap[CanT].insert(I.str());
-}
-
-void Sema::setOpenCLExtensionForDecl(Decl *FD, StringRef ExtStr) {
- llvm::SmallVector<StringRef, 1> Exts;
- ExtStr.split(Exts, " ", /* limit */ -1, /* keep empty */ false);
- if (Exts.empty())
- return;
- for (auto &I : Exts)
- OpenCLDeclExtMap[FD].insert(I.str());
-}
-
-void Sema::setCurrentOpenCLExtensionForType(QualType T) {
- if (CurrOpenCLExtension.empty())
- return;
- setOpenCLExtensionForType(T, CurrOpenCLExtension);
-}
-
-void Sema::setCurrentOpenCLExtensionForDecl(Decl *D) {
- if (CurrOpenCLExtension.empty())
- return;
- setOpenCLExtensionForDecl(D, CurrOpenCLExtension);
-}
-
-std::string Sema::getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD) {
- if (!OpenCLDeclExtMap.empty())
- return getOpenCLExtensionsFromExtMap(FD, OpenCLDeclExtMap);
-
- return "";
-}
-
-std::string Sema::getOpenCLExtensionsFromTypeExtMap(FunctionType *FT) {
- if (!OpenCLTypeExtMap.empty())
- return getOpenCLExtensionsFromExtMap(FT, OpenCLTypeExtMap);
-
- return "";
-}
-
-template <typename T, typename MapT>
-std::string Sema::getOpenCLExtensionsFromExtMap(T *FDT, MapT &Map) {
- auto Loc = Map.find(FDT);
- return llvm::join(Loc->second, " ");
-}
-
-bool Sema::isOpenCLDisabledDecl(Decl *FD) {
- auto Loc = OpenCLDeclExtMap.find(FD);
- if (Loc == OpenCLDeclExtMap.end())
- return false;
- for (auto &I : Loc->second) {
- if (!getOpenCLOptions().isEnabled(I))
- return true;
- }
- return false;
-}
-
-template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
-bool Sema::checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc,
- DiagInfoT DiagInfo, MapT &Map,
- unsigned Selector,
- SourceRange SrcRange) {
- auto Loc = Map.find(D);
- if (Loc == Map.end())
- return false;
- bool Disabled = false;
- for (auto &I : Loc->second) {
- if (I != CurrOpenCLExtension && !getOpenCLOptions().isEnabled(I)) {
- Diag(DiagLoc, diag::err_opencl_requires_extension) << Selector << DiagInfo
- << I << SrcRange;
- Disabled = true;
- }
- }
- return Disabled;
-}
-
-bool Sema::checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType QT) {
- // Check extensions for declared types.
- Decl *Decl = nullptr;
- if (auto TypedefT = dyn_cast<TypedefType>(QT.getTypePtr()))
- Decl = TypedefT->getDecl();
- if (auto TagT = dyn_cast<TagType>(QT.getCanonicalType().getTypePtr()))
- Decl = TagT->getDecl();
- auto Loc = DS.getTypeSpecTypeLoc();
-
- // Check extensions for vector types.
- // e.g. double4 is not allowed when cl_khr_fp64 is absent.
- if (QT->isExtVectorType()) {
- auto TypePtr = QT->castAs<ExtVectorType>()->getElementType().getTypePtr();
- return checkOpenCLDisabledTypeOrDecl(TypePtr, Loc, QT, OpenCLTypeExtMap);
- }
-
- if (checkOpenCLDisabledTypeOrDecl(Decl, Loc, QT, OpenCLDeclExtMap))
- return true;
-
- // Check extensions for builtin types.
- return checkOpenCLDisabledTypeOrDecl(QT.getCanonicalType().getTypePtr(), Loc,
- QT, OpenCLTypeExtMap);
-}
-
-bool Sema::checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E) {
- IdentifierInfo *FnName = D.getIdentifier();
- return checkOpenCLDisabledTypeOrDecl(&D, E.getBeginLoc(), FnName,
- OpenCLDeclExtMap, 1, D.getSourceRange());
-}
diff --git a/clang/lib/Sema/SemaAccess.cpp b/clang/lib/Sema/SemaAccess.cpp
index be30445d143c..580305c1110b 100644
--- a/clang/lib/Sema/SemaAccess.cpp
+++ b/clang/lib/Sema/SemaAccess.cpp
@@ -84,6 +84,20 @@ struct EffectiveContext {
: Inner(DC),
Dependent(DC->isDependentContext()) {
+ // An implicit deduction guide is semantically in the context enclosing the
+ // class template, but for access purposes behaves like the constructor
+ // from which it was produced.
+ if (auto *DGD = dyn_cast<CXXDeductionGuideDecl>(DC)) {
+ if (DGD->isImplicit()) {
+ DC = DGD->getCorrespondingConstructor();
+ if (!DC) {
+ // The copy deduction candidate doesn't have a corresponding
+ // constructor.
+ DC = cast<DeclContext>(DGD->getDeducedTemplate()->getTemplatedDecl());
+ }
+ }
+ }
+
// C++11 [class.access.nest]p1:
// A nested class is a member and as such has the same access
// rights as any other member.
@@ -1294,17 +1308,18 @@ static bool IsMicrosoftUsingDeclarationAccessBug(Sema& S,
SourceLocation AccessLoc,
AccessTarget &Entity) {
if (UsingShadowDecl *Shadow =
- dyn_cast<UsingShadowDecl>(Entity.getTargetDecl())) {
- const NamedDecl *OrigDecl = Entity.getTargetDecl()->getUnderlyingDecl();
- if (Entity.getTargetDecl()->getAccess() == AS_private &&
- (OrigDecl->getAccess() == AS_public ||
- OrigDecl->getAccess() == AS_protected)) {
- S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible)
- << Shadow->getUsingDecl()->getQualifiedNameAsString()
- << OrigDecl->getQualifiedNameAsString();
- return true;
+ dyn_cast<UsingShadowDecl>(Entity.getTargetDecl()))
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(Shadow->getIntroducer())) {
+ const NamedDecl *OrigDecl = Entity.getTargetDecl()->getUnderlyingDecl();
+ if (Entity.getTargetDecl()->getAccess() == AS_private &&
+ (OrigDecl->getAccess() == AS_public ||
+ OrigDecl->getAccess() == AS_protected)) {
+ S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible)
+ << UD->getQualifiedNameAsString()
+ << OrigDecl->getQualifiedNameAsString();
+ return true;
+ }
}
- }
return false;
}
diff --git a/clang/lib/Sema/SemaAttr.cpp b/clang/lib/Sema/SemaAttr.cpp
index 6e441a0ded0d..fe8f02f02368 100644
--- a/clang/lib/Sema/SemaAttr.cpp
+++ b/clang/lib/Sema/SemaAttr.cpp
@@ -269,8 +269,10 @@ void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
AlignPackStack.Act(PragmaLoc, Action, StringRef(), Info);
}
-void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action,
- PragmaClangSectionKind SecKind, StringRef SecName) {
+void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc,
+ PragmaClangSectionAction Action,
+ PragmaClangSectionKind SecKind,
+ StringRef SecName) {
PragmaClangSection *CSec;
int SectionFlags = ASTContext::PSF_Read;
switch (SecKind) {
@@ -301,6 +303,13 @@ void Sema::ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionA
return;
}
+ if (llvm::Error E = isValidSectionSpecifier(SecName)) {
+ Diag(PragmaLoc, diag::err_pragma_section_invalid_for_target)
+ << toString(std::move(E));
+ CSec->Valid = false;
+ return;
+ }
+
if (UnifySection(SecName, SectionFlags, PragmaLoc))
return;
@@ -867,12 +876,33 @@ void Sema::ActOnPragmaAttributeAttribute(
}
Rules.clear();
} else {
- for (const auto &Rule : StrictSubjectMatchRuleSet) {
- if (Rules.erase(Rule.first)) {
+ // Each rule in Rules must be a strict subset of the attribute's
+ // SubjectMatch rules. I.e. we're allowed to use
+ // `apply_to=variables(is_global)` on an attrubute with SubjectList<[Var]>,
+ // but should not allow `apply_to=variables` on an attribute which has
+ // `SubjectList<[GlobalVar]>`.
+ for (const auto &StrictRule : StrictSubjectMatchRuleSet) {
+ // First, check for exact match.
+ if (Rules.erase(StrictRule.first)) {
// Add the rule to the set of attribute receivers only if it's supported
// in the current language mode.
- if (Rule.second)
- SubjectMatchRules.push_back(Rule.first);
+ if (StrictRule.second)
+ SubjectMatchRules.push_back(StrictRule.first);
+ }
+ }
+ // Check remaining rules for subset matches.
+ auto RulesToCheck = Rules;
+ for (const auto &Rule : RulesToCheck) {
+ attr::SubjectMatchRule MatchRule = attr::SubjectMatchRule(Rule.first);
+ if (auto ParentRule = getParentAttrMatcherRule(MatchRule)) {
+ if (llvm::any_of(StrictSubjectMatchRuleSet,
+ [ParentRule](const auto &StrictRule) {
+ return StrictRule.first == *ParentRule &&
+ StrictRule.second; // IsEnabled
+ })) {
+ SubjectMatchRules.push_back(MatchRule);
+ Rules.erase(MatchRule);
+ }
}
}
}
@@ -1180,3 +1210,55 @@ void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
if (Stack->empty())
FreeVisContext();
}
+
+template <typename Ty>
+static bool checkCommonAttributeFeatures(Sema& S, const Ty *Node,
+ const ParsedAttr& A) {
+ // Several attributes carry different semantics than the parsing requires, so
+ // those are opted out of the common argument checks.
+ //
+ // We also bail on unknown and ignored attributes because those are handled
+ // as part of the target-specific handling logic.
+ if (A.getKind() == ParsedAttr::UnknownAttribute)
+ return false;
+ // Check whether the attribute requires specific language extensions to be
+ // enabled.
+ if (!A.diagnoseLangOpts(S))
+ return true;
+ // Check whether the attribute appertains to the given subject.
+ if (!A.diagnoseAppertainsTo(S, Node))
+ return true;
+ // Check whether the attribute is mutually exclusive with other attributes
+ // that have already been applied to the declaration.
+ if (!A.diagnoseMutualExclusion(S, Node))
+ return true;
+ // Check whether the attribute exists in the target architecture.
+ if (S.CheckAttrTarget(A))
+ return true;
+
+ if (A.hasCustomParsing())
+ return false;
+
+ if (A.getMinArgs() == A.getMaxArgs()) {
+ // If there are no optional arguments, then checking for the argument count
+ // is trivial.
+ if (!A.checkExactlyNumArgs(S, A.getMinArgs()))
+ return true;
+ } else {
+ // There are optional arguments, so checking is slightly more involved.
+ if (A.getMinArgs() && !A.checkAtLeastNumArgs(S, A.getMinArgs()))
+ return true;
+ else if (!A.hasVariadicArg() && A.getMaxArgs() &&
+ !A.checkAtMostNumArgs(S, A.getMaxArgs()))
+ return true;
+ }
+
+ return false;
+}
+
+bool Sema::checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A) {
+ return ::checkCommonAttributeFeatures(*this, D, A);
+}
+bool Sema::checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A) {
+ return ::checkCommonAttributeFeatures(*this, S, A);
+}
diff --git a/clang/lib/Sema/SemaAvailability.cpp b/clang/lib/Sema/SemaAvailability.cpp
index 74c4b9e16f74..bb704b1066cf 100644
--- a/clang/lib/Sema/SemaAvailability.cpp
+++ b/clang/lib/Sema/SemaAvailability.cpp
@@ -666,13 +666,6 @@ public:
SemaRef.Context.getTargetInfo().getPlatformMinVersion());
}
- bool TraverseDecl(Decl *D) {
- // Avoid visiting nested functions to prevent duplicate warnings.
- if (!D || isa<FunctionDecl>(D))
- return true;
- return Base::TraverseDecl(D);
- }
-
bool TraverseStmt(Stmt *S) {
if (!S)
return true;
@@ -686,17 +679,11 @@ public:
bool TraverseIfStmt(IfStmt *If);
- bool TraverseLambdaExpr(LambdaExpr *E) { return true; }
-
// for 'case X:' statements, don't bother looking at the 'X'; it can't lead
// to any useful diagnostics.
bool TraverseCaseStmt(CaseStmt *CS) { return TraverseStmt(CS->getSubStmt()); }
- bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) {
- if (PRE->isClassReceiver())
- DiagnoseDeclAvailability(PRE->getClassReceiver(), PRE->getReceiverLocation());
- return true;
- }
+ bool VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *PRE) { return true; }
bool VisitObjCMessageExpr(ObjCMessageExpr *Msg) {
if (ObjCMethodDecl *D = Msg->getMethodDecl()) {
@@ -919,6 +906,17 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
DiagnoseUnguardedAvailability(*this, D).IssueDiagnostics(Body);
}
+FunctionScopeInfo *Sema::getCurFunctionAvailabilityContext() {
+ if (FunctionScopes.empty())
+ return nullptr;
+
+ // Conservatively search the entire current function scope context for
+ // availability violations. This ensures we always correctly analyze nested
+ // classes, blocks, lambdas, etc. that may or may not be inside if(@available)
+ // checks themselves.
+ return FunctionScopes.front();
+}
+
void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
@@ -941,11 +939,8 @@ void Sema::DiagnoseAvailabilityOfDecl(NamedDecl *D,
// We need to know the @available context in the current function to
// diagnose this use, let DiagnoseUnguardedAvailabilityViolations do that
// when we're done parsing the current function.
- if (getCurFunctionOrMethodDecl()) {
- getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
- return;
- } else if (getCurBlock() || getCurLambda()) {
- getCurFunction()->HasPotentialAvailabilityViolations = true;
+ if (FunctionScopeInfo *Context = getCurFunctionAvailabilityContext()) {
+ Context->HasPotentialAvailabilityViolations = true;
return;
}
}
diff --git a/clang/lib/Sema/SemaCUDA.cpp b/clang/lib/Sema/SemaCUDA.cpp
index ee91eb4c5deb..75364c10c154 100644
--- a/clang/lib/Sema/SemaCUDA.cpp
+++ b/clang/lib/Sema/SemaCUDA.cpp
@@ -26,6 +26,14 @@
#include "llvm/ADT/SmallVector.h"
using namespace clang;
+template <typename AttrT> static bool hasExplicitAttr(const VarDecl *D) {
+ if (!D)
+ return false;
+ if (auto *A = D->getAttr<AttrT>())
+ return !A->isImplicit();
+ return false;
+}
+
void Sema::PushForceCUDAHostDevice() {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
ForceCUDAHostDeviceDepth++;
@@ -133,6 +141,38 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
return CFT_Host;
}
+/// IdentifyTarget - Determine the CUDA compilation target for this variable.
+Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
+ if (Var->hasAttr<HIPManagedAttr>())
+ return CVT_Unified;
+ if (Var->isConstexpr() && !hasExplicitAttr<CUDAConstantAttr>(Var))
+ return CVT_Both;
+ if (Var->getType().isConstQualified() && Var->hasAttr<CUDAConstantAttr>() &&
+ !hasExplicitAttr<CUDAConstantAttr>(Var))
+ return CVT_Both;
+ if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() ||
+ Var->hasAttr<CUDASharedAttr>() ||
+ Var->getType()->isCUDADeviceBuiltinSurfaceType() ||
+ Var->getType()->isCUDADeviceBuiltinTextureType())
+ return CVT_Device;
+ // Function-scope static variable without explicit device or constant
+ // attribute are emitted
+ // - on both sides in host device functions
+ // - on device side in device or global functions
+ if (auto *FD = dyn_cast<FunctionDecl>(Var->getDeclContext())) {
+ switch (IdentifyCUDATarget(FD)) {
+ case CFT_HostDevice:
+ return CVT_Both;
+ case CFT_Device:
+ case CFT_Global:
+ return CVT_Device;
+ default:
+ return CVT_Host;
+ }
+ }
+ return CVT_Host;
+}
+
// * CUDA Call preference table
//
// F - from,
@@ -512,44 +552,78 @@ bool Sema::isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *DD) {
return true;
}
+namespace {
+enum CUDAInitializerCheckKind {
+ CICK_DeviceOrConstant, // Check initializer for device/constant variable
+ CICK_Shared, // Check initializer for shared variable
+};
+
+bool IsDependentVar(VarDecl *VD) {
+ if (VD->getType()->isDependentType())
+ return true;
+ if (const auto *Init = VD->getInit())
+ return Init->isValueDependent();
+ return false;
+}
+
+// Check whether a variable has an allowed initializer for a CUDA device side
+// variable with global storage. \p VD may be a host variable to be checked for
+// potential promotion to device side variable.
+//
+// CUDA/HIP allows only empty constructors as initializers for global
+// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
+// __shared__ variables whether they are local or not (they all are implicitly
+// static in CUDA). One exception is that CUDA allows constant initializers
+// for __constant__ and __device__ variables.
+bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
+ CUDAInitializerCheckKind CheckKind) {
+ assert(!VD->isInvalidDecl() && VD->hasGlobalStorage());
+ assert(!IsDependentVar(VD) && "do not check dependent var");
+ const Expr *Init = VD->getInit();
+ auto IsEmptyInit = [&](const Expr *Init) {
+ if (!Init)
+ return true;
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(Init)) {
+ return S.isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
+ }
+ return false;
+ };
+ auto IsConstantInit = [&](const Expr *Init) {
+ assert(Init);
+ return Init->isConstantInitializer(S.Context,
+ VD->getType()->isReferenceType());
+ };
+ auto HasEmptyDtor = [&](VarDecl *VD) {
+ if (const auto *RD = VD->getType()->getAsCXXRecordDecl())
+ return S.isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
+ return true;
+ };
+ if (CheckKind == CICK_Shared)
+ return IsEmptyInit(Init) && HasEmptyDtor(VD);
+ return S.LangOpts.GPUAllowDeviceInit ||
+ ((IsEmptyInit(Init) || IsConstantInit(Init)) && HasEmptyDtor(VD));
+}
+} // namespace
+
void Sema::checkAllowedCUDAInitializer(VarDecl *VD) {
- if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage())
+ // Do not check dependent variables since the ctor/dtor/initializer are not
+ // determined. Do it after instantiation.
+ if (VD->isInvalidDecl() || !VD->hasInit() || !VD->hasGlobalStorage() ||
+ IsDependentVar(VD))
return;
const Expr *Init = VD->getInit();
- if (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>() ||
- VD->hasAttr<CUDASharedAttr>()) {
- if (LangOpts.GPUAllowDeviceInit)
+ bool IsSharedVar = VD->hasAttr<CUDASharedAttr>();
+ bool IsDeviceOrConstantVar =
+ !IsSharedVar &&
+ (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>());
+ if (IsDeviceOrConstantVar || IsSharedVar) {
+ if (HasAllowedCUDADeviceStaticInitializer(
+ *this, VD, IsSharedVar ? CICK_Shared : CICK_DeviceOrConstant))
return;
- bool AllowedInit = false;
- if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(Init))
- AllowedInit =
- isEmptyCudaConstructor(VD->getLocation(), CE->getConstructor());
- // We'll allow constant initializers even if it's a non-empty
- // constructor according to CUDA rules. This deviates from NVCC,
- // but allows us to handle things like constexpr constructors.
- if (!AllowedInit &&
- (VD->hasAttr<CUDADeviceAttr>() || VD->hasAttr<CUDAConstantAttr>())) {
- auto *Init = VD->getInit();
- AllowedInit =
- ((VD->getType()->isDependentType() || Init->isValueDependent()) &&
- VD->isConstexpr()) ||
- Init->isConstantInitializer(Context,
- VD->getType()->isReferenceType());
- }
-
- // Also make sure that destructor, if there is one, is empty.
- if (AllowedInit)
- if (CXXRecordDecl *RD = VD->getType()->getAsCXXRecordDecl())
- AllowedInit =
- isEmptyCudaDestructor(VD->getLocation(), RD->getDestructor());
-
- if (!AllowedInit) {
- Diag(VD->getLocation(), VD->hasAttr<CUDASharedAttr>()
- ? diag::err_shared_var_init
- : diag::err_dynamic_var_init)
- << Init->getSourceRange();
- VD->setInvalidDecl();
- }
+ Diag(VD->getLocation(),
+ IsSharedVar ? diag::err_shared_var_init : diag::err_dynamic_var_init)
+ << Init->getSourceRange();
+ VD->setInvalidDecl();
} else {
// This is a host-side global variable. Check that the initializer is
// callable from the host side.
@@ -632,9 +706,19 @@ void Sema::maybeAddCUDAHostDeviceAttrs(FunctionDecl *NewD,
NewD->addAttr(CUDADeviceAttr::CreateImplicit(Context));
}
+// TODO: `__constant__` memory may be a limited resource for certain targets.
+// A safeguard may be needed at the end of compilation pipeline if
+// `__constant__` memory usage goes beyond limit.
void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
- if (getLangOpts().CUDAIsDevice && VD->isConstexpr() &&
- (VD->isFileVarDecl() || VD->isStaticDataMember())) {
+ // Do not promote dependent variables since the cotr/dtor/initializer are
+ // not determined. Do it after instantiation.
+ if (getLangOpts().CUDAIsDevice && !VD->hasAttr<CUDAConstantAttr>() &&
+ !VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
+ (VD->isFileVarDecl() || VD->isStaticDataMember()) &&
+ !IsDependentVar(VD) &&
+ (VD->isConstexpr() || (VD->getType().isConstQualified() &&
+ HasAllowedCUDADeviceStaticInitializer(
+ *this, VD, CICK_DeviceOrConstant)))) {
VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
}
}
diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp
index a4421d2b68af..1c8f6329bd67 100644
--- a/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -227,12 +227,19 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
return true;
}
- // Fixed enum types are complete, but they aren't valid as scopes
- // until we see a definition, so awkwardly pull out this special
- // case.
- auto *EnumD = dyn_cast<EnumDecl>(tag);
- if (!EnumD)
- return false;
+ if (auto *EnumD = dyn_cast<EnumDecl>(tag))
+ // Fixed enum types and scoped enum instantiations are complete, but they
+ // aren't valid as scopes until we see or instantiate their definition.
+ return RequireCompleteEnumDecl(EnumD, loc, &SS);
+
+ return false;
+}
+
+/// Require that the EnumDecl is completed with its enumerators defined or
+/// instantiated. SS, if provided, is the ScopeRef parsed.
+///
+bool Sema::RequireCompleteEnumDecl(EnumDecl *EnumD, SourceLocation L,
+ CXXScopeSpec *SS) {
if (EnumD->isCompleteDefinition()) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
@@ -241,8 +248,8 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = !isSFINAEContext();
- diagnoseMissingImport(loc, SuggestedDef, MissingImportKind::Definition,
- /*Recover*/TreatAsComplete);
+ diagnoseMissingImport(L, SuggestedDef, MissingImportKind::Definition,
+ /*Recover*/ TreatAsComplete);
return !TreatAsComplete;
}
return false;
@@ -253,19 +260,26 @@ bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
if (EnumDecl *Pattern = EnumD->getInstantiatedFromMemberEnum()) {
MemberSpecializationInfo *MSI = EnumD->getMemberSpecializationInfo();
if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
- if (InstantiateEnum(loc, EnumD, Pattern,
+ if (InstantiateEnum(L, EnumD, Pattern,
getTemplateInstantiationArgs(EnumD),
TSK_ImplicitInstantiation)) {
- SS.SetInvalid(SS.getRange());
+ if (SS)
+ SS->SetInvalid(SS->getRange());
return true;
}
return false;
}
}
- Diag(loc, diag::err_incomplete_nested_name_spec)
- << type << SS.getRange();
- SS.SetInvalid(SS.getRange());
+ if (SS) {
+ Diag(L, diag::err_incomplete_nested_name_spec)
+ << QualType(EnumD->getTypeForDecl(), 0) << SS->getRange();
+ SS->SetInvalid(SS->getRange());
+ } else {
+ Diag(L, diag::err_incomplete_enum) << QualType(EnumD->getTypeForDecl(), 0);
+ Diag(EnumD->getLocation(), diag::note_declared_at);
+ }
+
return true;
}
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index 671820afd485..cac43075f860 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
@@ -23,6 +23,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/SmallVector.h"
#include <set>
using namespace clang;
@@ -60,6 +61,15 @@ namespace {
ValueKind(Expr::getValueKindForType(destType)),
Kind(CK_Dependent), IsARCUnbridgedCast(false) {
+ // C++ [expr.type]/8.2.2:
+ // If a pr-value initially has the type cv-T, where T is a
+ // cv-unqualified non-class, non-array type, the type of the
+ // expression is adjusted to T prior to any further analysis.
+ if (!S.Context.getLangOpts().ObjC && !DestType->isRecordType() &&
+ !DestType->isArrayType()) {
+ DestType = DestType.getUnqualifiedType();
+ }
+
if (const BuiltinType *placeholder =
src.get()->getType()->getAsPlaceholderType()) {
PlaceholderKind = placeholder->getKind();
@@ -746,7 +756,7 @@ static TryCastResult getCastAwayConstnessCastKind(CastAwayConstnessKind CACK,
void CastOperation::CheckDynamicCast() {
CheckNoDerefRAII NoderefCheck(*this);
- if (ValueKind == VK_RValue)
+ if (ValueKind == VK_PRValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
@@ -814,7 +824,7 @@ void CastOperation::CheckDynamicCast() {
} else {
// If we're dynamic_casting from a prvalue to an rvalue reference, we need
// to materialize the prvalue before we bind the reference to it.
- if (SrcExpr.get()->isRValue())
+ if (SrcExpr.get()->isPRValue())
SrcExpr = Self.CreateMaterializeTemporaryExpr(
SrcType, SrcExpr.get(), /*IsLValueReference*/ false);
SrcPointee = SrcType;
@@ -913,7 +923,7 @@ void CastOperation::CheckDynamicCast() {
void CastOperation::CheckConstCast() {
CheckNoDerefRAII NoderefCheck(*this);
- if (ValueKind == VK_RValue)
+ if (ValueKind == VK_PRValue)
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else if (isPlaceholder())
SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.get());
@@ -1035,13 +1045,97 @@ static void DiagnoseReinterpretUpDownCast(Sema &Self, const Expr *SrcExpr,
<< FixItHint::CreateReplacement(BeginLoc, "static_cast");
}
+static bool argTypeIsABIEquivalent(QualType SrcType, QualType DestType,
+ ASTContext &Context) {
+ if (SrcType->isPointerType() && DestType->isPointerType())
+ return true;
+
+ // Allow integral type mismatch if their size are equal.
+ if (SrcType->isIntegralType(Context) && DestType->isIntegralType(Context))
+ if (Context.getTypeInfoInChars(SrcType).Width ==
+ Context.getTypeInfoInChars(DestType).Width)
+ return true;
+
+ return Context.hasSameUnqualifiedType(SrcType, DestType);
+}
+
+static bool checkCastFunctionType(Sema &Self, const ExprResult &SrcExpr,
+ QualType DestType) {
+ if (Self.Diags.isIgnored(diag::warn_cast_function_type,
+ SrcExpr.get()->getExprLoc()))
+ return true;
+
+ QualType SrcType = SrcExpr.get()->getType();
+ const FunctionType *SrcFTy = nullptr;
+ const FunctionType *DstFTy = nullptr;
+ if (((SrcType->isBlockPointerType() || SrcType->isFunctionPointerType()) &&
+ DestType->isFunctionPointerType()) ||
+ (SrcType->isMemberFunctionPointerType() &&
+ DestType->isMemberFunctionPointerType())) {
+ SrcFTy = SrcType->getPointeeType()->castAs<FunctionType>();
+ DstFTy = DestType->getPointeeType()->castAs<FunctionType>();
+ } else if (SrcType->isFunctionType() && DestType->isFunctionReferenceType()) {
+ SrcFTy = SrcType->castAs<FunctionType>();
+ DstFTy = DestType.getNonReferenceType()->castAs<FunctionType>();
+ } else {
+ return true;
+ }
+ assert(SrcFTy && DstFTy);
+
+ auto IsVoidVoid = [](const FunctionType *T) {
+ if (!T->getReturnType()->isVoidType())
+ return false;
+ if (const auto *PT = T->getAs<FunctionProtoType>())
+ return !PT->isVariadic() && PT->getNumParams() == 0;
+ return false;
+ };
+
+ // Skip if either function type is void(*)(void)
+ if (IsVoidVoid(SrcFTy) || IsVoidVoid(DstFTy))
+ return true;
+
+ // Check return type.
+ if (!argTypeIsABIEquivalent(SrcFTy->getReturnType(), DstFTy->getReturnType(),
+ Self.Context))
+ return false;
+
+ // Check if either has unspecified number of parameters
+ if (SrcFTy->isFunctionNoProtoType() || DstFTy->isFunctionNoProtoType())
+ return true;
+
+ // Check parameter types.
+
+ const auto *SrcFPTy = cast<FunctionProtoType>(SrcFTy);
+ const auto *DstFPTy = cast<FunctionProtoType>(DstFTy);
+
+ // In a cast involving function types with a variable argument list only the
+ // types of initial arguments that are provided are considered.
+ unsigned NumParams = SrcFPTy->getNumParams();
+ unsigned DstNumParams = DstFPTy->getNumParams();
+ if (NumParams > DstNumParams) {
+ if (!DstFPTy->isVariadic())
+ return false;
+ NumParams = DstNumParams;
+ } else if (NumParams < DstNumParams) {
+ if (!SrcFPTy->isVariadic())
+ return false;
+ }
+
+ for (unsigned i = 0; i < NumParams; ++i)
+ if (!argTypeIsABIEquivalent(SrcFPTy->getParamType(i),
+ DstFPTy->getParamType(i), Self.Context))
+ return false;
+
+ return true;
+}
+
/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
/// valid.
/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
/// like this:
/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
void CastOperation::CheckReinterpretCast() {
- if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload))
+ if (ValueKind == VK_PRValue && !isPlaceholder(BuiltinType::Overload))
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
else
checkNonOverloadPlaceholders();
@@ -1072,6 +1166,10 @@ void CastOperation::CheckReinterpretCast() {
if (Self.getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
checkObjCConversion(Sema::CCK_OtherCast);
DiagnoseReinterpretUpDownCast(Self, SrcExpr.get(), DestType, OpRange);
+
+ if (!checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
+ << SrcExpr.get()->getType() << DestType << OpRange;
} else {
SrcExpr = ExprError();
}
@@ -1109,7 +1207,7 @@ void CastOperation::CheckStaticCast() {
return;
}
- if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ if (ValueKind == VK_PRValue && !DestType->isRecordType() &&
!isPlaceholder(BuiltinType::Overload)) {
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
@@ -1356,6 +1454,14 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
DestPointer->getPointeeType()->getAs<RecordType>())
msg = diag::err_bad_cxx_cast_unrelated_class;
+ if (SrcType->isMatrixType() && DestType->isMatrixType()) {
+ if (Self.CheckMatrixCast(OpRange, DestType, SrcType, Kind)) {
+ SrcExpr = ExprError();
+ return TC_Failed;
+ }
+ return TC_Success;
+ }
+
// We tried everything. Everything! Nothing works! :-(
return TC_NotApplicable;
}
@@ -1800,7 +1906,7 @@ static TryCastResult TryConstCast(Sema &Self, ExprResult &SrcExpr,
return TC_NotApplicable;
}
- if (isa<RValueReferenceType>(DestTypeTmp) && SrcExpr.get()->isRValue()) {
+ if (isa<RValueReferenceType>(DestTypeTmp) && SrcExpr.get()->isPRValue()) {
if (!SrcType->isRecordType()) {
// Cannot const_cast non-class prvalue to rvalue reference type. But if
// this is C-style, static_cast can do this.
@@ -2072,7 +2178,8 @@ static bool fixOverloadedReinterpretCastExpr(Sema &Self, QualType DestType,
// like it?
if (Self.ResolveAndFixSingleFunctionTemplateSpecialization(
Result,
- Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
+ Expr::getValueKindForType(DestType) ==
+ VK_PRValue // Convert Fun to Ptr
) &&
Result.isUsable())
return true;
@@ -2239,6 +2346,16 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
return TC_Success;
}
+ if (Self.LangOpts.OpenCL && !CStyle) {
+ if (DestType->isExtVectorType() || SrcType->isExtVectorType()) {
+ // FIXME: Allow for reinterpret cast between 3 and 4 element vectors
+ if (Self.areVectorTypesSameSize(SrcType, DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+ }
+ }
+
// Otherwise, pick a reasonable diagnostic.
if (!destIsVector)
msg = diag::err_bad_cxx_cast_vector_to_scalar_different_size;
@@ -2507,6 +2624,19 @@ void CastOperation::checkAddressSpaceCast(QualType SrcType, QualType DestType) {
}
}
+bool Sema::ShouldSplatAltivecScalarInCast(const VectorType *VecTy) {
+ bool SrcCompatXL = this->getLangOpts().getAltivecSrcCompat() ==
+ LangOptions::AltivecSrcCompatKind::XL;
+ VectorType::VectorKind VKind = VecTy->getVectorKind();
+
+ if ((VKind == VectorType::AltiVecVector) ||
+ (SrcCompatXL && ((VKind == VectorType::AltiVecBool) ||
+ (VKind == VectorType::AltiVecPixel)))) {
+ return true;
+ }
+ return false;
+}
+
void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
bool ListInitialization) {
assert(Self.getLangOpts().CPlusPlus);
@@ -2552,7 +2682,7 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
return;
}
- if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ if (ValueKind == VK_PRValue && !DestType->isRecordType() &&
!isPlaceholder(BuiltinType::Overload)) {
SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.get());
if (SrcExpr.isInvalid())
@@ -2561,9 +2691,9 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
// AltiVec vector initialization with a single literal.
if (const VectorType *vecTy = DestType->getAs<VectorType>())
- if (vecTy->getVectorKind() == VectorType::AltiVecVector
- && (SrcExpr.get()->getType()->isIntegerType()
- || SrcExpr.get()->getType()->isFloatingType())) {
+ if (Self.ShouldSplatAltivecScalarInCast(vecTy) &&
+ (SrcExpr.get()->getType()->isIntegerType() ||
+ SrcExpr.get()->getType()->isFloatingType())) {
Kind = CK_VectorSplat;
SrcExpr = Self.prepareVectorSplat(DestType, SrcExpr.get());
return;
@@ -2645,6 +2775,11 @@ void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
if (isValidCast(tcr)) {
if (Kind == CK_BitCast)
checkCastAlign();
+
+ if (!checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
+ << SrcExpr.get()->getType() << DestType << OpRange;
+
} else {
SrcExpr = ExprError();
}
@@ -2765,7 +2900,8 @@ void CastOperation::CheckCStyleCast() {
return;
}
- if (!DestType->isScalarType() && !DestType->isVectorType()) {
+ if (!DestType->isScalarType() && !DestType->isVectorType() &&
+ !DestType->isMatrixType()) {
const RecordType *DestRecordTy = DestType->getAs<RecordType>();
if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
@@ -2803,7 +2939,7 @@ void CastOperation::CheckCStyleCast() {
}
Self.Diag(OpRange.getBegin(),
diag::err_opencl_cast_non_zero_to_event_t)
- << CastInt.toString(10) << SrcExpr.get()->getSourceRange();
+ << toString(CastInt, 10) << SrcExpr.get()->getSourceRange();
SrcExpr = ExprError();
return;
}
@@ -2816,10 +2952,11 @@ void CastOperation::CheckCStyleCast() {
return;
}
- // The type we're casting to is known to be a scalar or vector.
+ // The type we're casting to is known to be a scalar, a vector, or a matrix.
- // Require the operand to be a scalar or vector.
- if (!SrcType->isScalarType() && !SrcType->isVectorType()) {
+ // Require the operand to be a scalar, a vector, or a matrix.
+ if (!SrcType->isScalarType() && !SrcType->isVectorType() &&
+ !SrcType->isMatrixType()) {
Self.Diag(SrcExpr.get()->getExprLoc(),
diag::err_typecheck_expect_scalar_operand)
<< SrcType << SrcExpr.get()->getSourceRange();
@@ -2832,9 +2969,15 @@ void CastOperation::CheckCStyleCast() {
return;
}
+ if (DestType->getAs<MatrixType>() || SrcType->getAs<MatrixType>()) {
+ if (Self.CheckMatrixCast(OpRange, DestType, SrcType, Kind))
+ SrcExpr = ExprError();
+ return;
+ }
+
if (const VectorType *DestVecTy = DestType->getAs<VectorType>()) {
- if (DestVecTy->getVectorKind() == VectorType::AltiVecVector &&
- (SrcType->isIntegerType() || SrcType->isFloatingType())) {
+ if (Self.ShouldSplatAltivecScalarInCast(DestVecTy) &&
+ (SrcType->isIntegerType() || SrcType->isFloatingType())) {
Kind = CK_VectorSplat;
SrcExpr = Self.prepareVectorSplat(DestType, SrcExpr.get());
} else if (Self.CheckVectorCast(OpRange, DestType, SrcType, Kind)) {
@@ -2916,8 +3059,8 @@ void CastOperation::CheckCStyleCast() {
}
}
- if (Self.getLangOpts().OpenCL &&
- !Self.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
+ if (Self.getLangOpts().OpenCL && !Self.getOpenCLOptions().isAvailableOption(
+ "cl_khr_fp16", Self.getLangOpts())) {
if (DestType->isHalfType()) {
Self.Diag(SrcExpr.get()->getBeginLoc(), diag::err_opencl_cast_to_half)
<< DestType << SrcExpr.get()->getSourceRange();
@@ -2957,6 +3100,10 @@ void CastOperation::CheckCStyleCast() {
}
}
+ if (!checkCastFunctionType(Self, SrcExpr, DestType))
+ Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
+ << SrcType << DestType << OpRange;
+
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
DiagnoseCallingConvCast(Self, SrcExpr, DestType, OpRange);
DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
@@ -2979,7 +3126,7 @@ void CastOperation::CheckBuiltinBitCast() {
return;
}
- if (SrcExpr.get()->isRValue())
+ if (SrcExpr.get()->isPRValue())
SrcExpr = Self.CreateMaterializeTemporaryExpr(SrcType, SrcExpr.get(),
/*IsLValueReference=*/false);
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 2d3d36f4adad..de75c10417e7 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -91,6 +91,7 @@
#include <algorithm>
#include <bitset>
#include <cassert>
+#include <cctype>
#include <cstddef>
#include <cstdint>
#include <functional>
@@ -252,7 +253,7 @@ static bool SemaBuiltinAlignment(Sema &S, CallExpr *TheCall, unsigned ID) {
}
if (llvm::APSInt::compareValues(AlignValue, MaxValue) > 0) {
S.Diag(AlignOp->getExprLoc(), diag::err_alignment_too_big)
- << MaxValue.toString(10);
+ << toString(MaxValue, 10);
return true;
}
if (!AlignValue.isPowerOf2()) {
@@ -771,8 +772,8 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(DiagID)
- << FunctionName << ObjectSize.toString(/*Radix=*/10)
- << UsedSize.getValue().toString(/*Radix=*/10));
+ << FunctionName << toString(ObjectSize, /*Radix=*/10)
+ << toString(UsedSize.getValue(), /*Radix=*/10));
}
static bool SemaBuiltinSEHScopeCheck(Sema &SemaRef, CallExpr *TheCall,
@@ -837,7 +838,7 @@ static bool checkOpenCLBlockArgs(Sema &S, Expr *BlockArg) {
}
static bool checkOpenCLSubgroupExt(Sema &S, CallExpr *Call) {
- if (!S.getOpenCLOptions().isEnabled("cl_khr_subgroups")) {
+ if (!S.getOpenCLOptions().isSupported("cl_khr_subgroups", S.getLangOpts())) {
S.Diag(Call->getBeginLoc(), diag::err_opencl_requires_extension)
<< 1 << Call->getDirectCallee() << "cl_khr_subgroups";
return true;
@@ -1429,6 +1430,9 @@ bool Sema::CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return CheckPPCBuiltinFunctionCall(TI, BuiltinID, TheCall);
case llvm::Triple::amdgcn:
return CheckAMDGCNBuiltinFunctionCall(BuiltinID, TheCall);
+ case llvm::Triple::riscv32:
+ case llvm::Triple::riscv64:
+ return CheckRISCVBuiltinFunctionCall(TI, BuiltinID, TheCall);
}
}
@@ -1550,6 +1554,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
Diag(TheCall->getBeginLoc(), diag::warn_alloca)
<< TheCall->getDirectCallee();
break;
+ case Builtin::BI__arithmetic_fence:
+ if (SemaBuiltinArithmeticFence(TheCall))
+ return ExprError();
+ break;
case Builtin::BI__assume:
case Builtin::BI__builtin_assume:
if (SemaBuiltinAssume(TheCall))
@@ -1962,6 +1970,26 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
case Builtin::BI__builtin_matrix_column_major_store:
return SemaBuiltinMatrixColumnMajorStore(TheCall, TheCallResult);
+
+ case Builtin::BI__builtin_get_device_side_mangled_name: {
+ auto Check = [](CallExpr *TheCall) {
+ if (TheCall->getNumArgs() != 1)
+ return false;
+ auto *DRE = dyn_cast<DeclRefExpr>(TheCall->getArg(0)->IgnoreImpCasts());
+ if (!DRE)
+ return false;
+ auto *D = DRE->getDecl();
+ if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D))
+ return false;
+ return D->hasAttr<CUDAGlobalAttr>() || D->hasAttr<CUDADeviceAttr>() ||
+ D->hasAttr<CUDAConstantAttr>() || D->hasAttr<HIPManagedAttr>();
+ };
+ if (!Check(TheCall)) {
+ Diag(TheCall->getBeginLoc(),
+ diag::err_hip_invalid_args_builtin_mangled_name);
+ return ExprError();
+ }
+ }
}
// Since the target specific builtins for each arch overlap, only check those
@@ -2623,7 +2651,10 @@ static bool isValidBPFPreserveEnumValueArg(Expr *Arg) {
return false;
const auto *CE = dyn_cast<CStyleCastExpr>(UO->getSubExpr());
- if (!CE || CE->getCastKind() != CK_IntegralToPointer)
+ if (!CE)
+ return false;
+ if (CE->getCastKind() != CK_IntegralToPointer &&
+ CE->getCastKind() != CK_NullToPointer)
return false;
// The integer must be from an EnumConstantDecl.
@@ -3228,34 +3259,81 @@ static QualType DecodePPCMMATypeFromStr(ASTContext &Context, const char *&Str,
}
}
+static bool isPPC_64Builtin(unsigned BuiltinID) {
+ // These builtins only work on PPC 64bit targets.
+ switch (BuiltinID) {
+ case PPC::BI__builtin_divde:
+ case PPC::BI__builtin_divdeu:
+ case PPC::BI__builtin_bpermd:
+ case PPC::BI__builtin_ppc_ldarx:
+ case PPC::BI__builtin_ppc_stdcx:
+ case PPC::BI__builtin_ppc_tdw:
+ case PPC::BI__builtin_ppc_trapd:
+ case PPC::BI__builtin_ppc_cmpeqb:
+ case PPC::BI__builtin_ppc_setb:
+ case PPC::BI__builtin_ppc_mulhd:
+ case PPC::BI__builtin_ppc_mulhdu:
+ case PPC::BI__builtin_ppc_maddhd:
+ case PPC::BI__builtin_ppc_maddhdu:
+ case PPC::BI__builtin_ppc_maddld:
+ case PPC::BI__builtin_ppc_load8r:
+ case PPC::BI__builtin_ppc_store8r:
+ case PPC::BI__builtin_ppc_insert_exp:
+ case PPC::BI__builtin_ppc_extract_sig:
+ return true;
+ }
+ return false;
+}
+
+static bool SemaFeatureCheck(Sema &S, CallExpr *TheCall,
+ StringRef FeatureToCheck, unsigned DiagID,
+ StringRef DiagArg = "") {
+ if (S.Context.getTargetInfo().hasFeature(FeatureToCheck))
+ return false;
+
+ if (DiagArg.empty())
+ S.Diag(TheCall->getBeginLoc(), DiagID) << TheCall->getSourceRange();
+ else
+ S.Diag(TheCall->getBeginLoc(), DiagID)
+ << DiagArg << TheCall->getSourceRange();
+
+ return true;
+}
+
+/// Returns true if the argument consists of one contiguous run of 1s with any
+/// number of 0s on either side. The 1s are allowed to wrap from LSB to MSB, so
+/// 0x000FFF0, 0x0000FFFF, 0xFF0000FF, 0x0 are all runs. 0x0F0F0000 is not,
+/// since all 1s are not contiguous.
+bool Sema::SemaValueIsRunOfOnes(CallExpr *TheCall, unsigned ArgNum) {
+ llvm::APSInt Result;
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ // Check contiguous run of 1s, 0xFF0000FF is also a run of 1s.
+ if (Result.isShiftedMask() || (~Result).isShiftedMask())
+ return false;
+
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_argument_not_contiguous_bit_field)
+ << ArgNum << Arg->getSourceRange();
+}
+
bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall) {
unsigned i = 0, l = 0, u = 0;
- bool Is64BitBltin = BuiltinID == PPC::BI__builtin_divde ||
- BuiltinID == PPC::BI__builtin_divdeu ||
- BuiltinID == PPC::BI__builtin_bpermd;
bool IsTarget64Bit = TI.getTypeWidth(TI.getIntPtrType()) == 64;
- bool IsBltinExtDiv = BuiltinID == PPC::BI__builtin_divwe ||
- BuiltinID == PPC::BI__builtin_divweu ||
- BuiltinID == PPC::BI__builtin_divde ||
- BuiltinID == PPC::BI__builtin_divdeu;
+ llvm::APSInt Result;
- if (Is64BitBltin && !IsTarget64Bit)
+ if (isPPC_64Builtin(BuiltinID) && !IsTarget64Bit)
return Diag(TheCall->getBeginLoc(), diag::err_64_bit_builtin_32_bit_tgt)
<< TheCall->getSourceRange();
- if ((IsBltinExtDiv && !TI.hasFeature("extdiv")) ||
- (BuiltinID == PPC::BI__builtin_bpermd && !TI.hasFeature("bpermd")))
- return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
- << TheCall->getSourceRange();
-
- auto SemaVSXCheck = [&](CallExpr *TheCall) -> bool {
- if (!TI.hasFeature("vsx"))
- return Diag(TheCall->getBeginLoc(), diag::err_ppc_builtin_only_on_pwr7)
- << TheCall->getSourceRange();
- return false;
- };
-
switch (BuiltinID) {
default: return false;
case PPC::BI__builtin_altivec_crypto_vshasigmaw:
@@ -3281,11 +3359,22 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_vsx_xxpermdi:
case PPC::BI__builtin_vsx_xxsldwi:
return SemaBuiltinVSX(TheCall);
+ case PPC::BI__builtin_divwe:
+ case PPC::BI__builtin_divweu:
+ case PPC::BI__builtin_divde:
+ case PPC::BI__builtin_divdeu:
+ return SemaFeatureCheck(*this, TheCall, "extdiv",
+ diag::err_ppc_builtin_only_on_arch, "7");
+ case PPC::BI__builtin_bpermd:
+ return SemaFeatureCheck(*this, TheCall, "bpermd",
+ diag::err_ppc_builtin_only_on_arch, "7");
case PPC::BI__builtin_unpack_vector_int128:
- return SemaVSXCheck(TheCall) ||
+ return SemaFeatureCheck(*this, TheCall, "vsx",
+ diag::err_ppc_builtin_only_on_arch, "7") ||
SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
case PPC::BI__builtin_pack_vector_int128:
- return SemaVSXCheck(TheCall);
+ return SemaFeatureCheck(*this, TheCall, "vsx",
+ diag::err_ppc_builtin_only_on_arch, "7");
case PPC::BI__builtin_altivec_vgnb:
return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
case PPC::BI__builtin_altivec_vec_replace_elt:
@@ -3304,7 +3393,59 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
return SemaBuiltinConstantArgRange(TheCall, 2, 0, 7);
case PPC::BI__builtin_vsx_xxpermx:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 7);
-#define CUSTOM_BUILTIN(Name, Types, Acc) \
+ case PPC::BI__builtin_ppc_tw:
+ case PPC::BI__builtin_ppc_tdw:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 1, 31);
+ case PPC::BI__builtin_ppc_cmpeqb:
+ case PPC::BI__builtin_ppc_setb:
+ case PPC::BI__builtin_ppc_maddhd:
+ case PPC::BI__builtin_ppc_maddhdu:
+ case PPC::BI__builtin_ppc_maddld:
+ return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
+ diag::err_ppc_builtin_only_on_arch, "9");
+ case PPC::BI__builtin_ppc_cmprb:
+ return SemaFeatureCheck(*this, TheCall, "isa-v30-instructions",
+ diag::err_ppc_builtin_only_on_arch, "9") ||
+ SemaBuiltinConstantArgRange(TheCall, 0, 0, 1);
+ // For __rlwnm, __rlwimi and __rldimi, the last parameter mask must
+ // be a constant that represents a contiguous bit field.
+ case PPC::BI__builtin_ppc_rlwnm:
+ return SemaBuiltinConstantArg(TheCall, 1, Result) ||
+ SemaValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_ppc_rlwimi:
+ case PPC::BI__builtin_ppc_rldimi:
+ return SemaBuiltinConstantArg(TheCall, 2, Result) ||
+ SemaValueIsRunOfOnes(TheCall, 3);
+ case PPC::BI__builtin_ppc_extract_exp:
+ case PPC::BI__builtin_ppc_extract_sig:
+ case PPC::BI__builtin_ppc_insert_exp:
+ return SemaFeatureCheck(*this, TheCall, "power9-vector",
+ diag::err_ppc_builtin_only_on_arch, "9");
+ case PPC::BI__builtin_ppc_mtfsb0:
+ case PPC::BI__builtin_ppc_mtfsb1:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
+ case PPC::BI__builtin_ppc_mtfsf:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 255);
+ case PPC::BI__builtin_ppc_mtfsfi:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 7) ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 0, 15);
+ case PPC::BI__builtin_ppc_alignx:
+ return SemaBuiltinConstantArgPower2(TheCall, 0);
+ case PPC::BI__builtin_ppc_rdlam:
+ return SemaValueIsRunOfOnes(TheCall, 2);
+ case PPC::BI__builtin_ppc_icbt:
+ case PPC::BI__builtin_ppc_sthcx:
+ case PPC::BI__builtin_ppc_stbcx:
+ case PPC::BI__builtin_ppc_lharx:
+ case PPC::BI__builtin_ppc_lbarx:
+ return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
+ diag::err_ppc_builtin_only_on_arch, "8");
+ case PPC::BI__builtin_vsx_ldrmb:
+ case PPC::BI__builtin_vsx_strmb:
+ return SemaFeatureCheck(*this, TheCall, "isa-v207-instructions",
+ diag::err_ppc_builtin_only_on_arch, "8") ||
+ SemaBuiltinConstantArgRange(TheCall, 1, 1, 16);
+#define CUSTOM_BUILTIN(Name, Intr, Types, Acc) \
case PPC::BI__builtin_##Name: \
return SemaBuiltinPPCMMACall(TheCall, Types);
#include "clang/Basic/BuiltinsPPC.def"
@@ -3356,20 +3497,27 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
if (!ArgExpr->EvaluateAsInt(ArgResult, Context))
return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
<< ArgExpr->getType();
- int ord = ArgResult.Val.getInt().getZExtValue();
+ auto Ord = ArgResult.Val.getInt().getZExtValue();
// Check valididty of memory ordering as per C11 / C++11's memody model.
- switch (static_cast<llvm::AtomicOrderingCABI>(ord)) {
+ // Only fence needs check. Atomic dec/inc allow all memory orders.
+ if (!llvm::isValidAtomicOrderingCABI(Ord))
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << ArgExpr->getSourceRange();
+ switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
+ case llvm::AtomicOrderingCABI::relaxed:
+ case llvm::AtomicOrderingCABI::consume:
+ if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
+ return Diag(ArgExpr->getBeginLoc(),
+ diag::warn_atomic_op_has_invalid_memory_order)
+ << ArgExpr->getSourceRange();
+ break;
case llvm::AtomicOrderingCABI::acquire:
case llvm::AtomicOrderingCABI::release:
case llvm::AtomicOrderingCABI::acq_rel:
case llvm::AtomicOrderingCABI::seq_cst:
break;
- default: {
- return Diag(ArgExpr->getBeginLoc(),
- diag::warn_atomic_op_has_invalid_memory_order)
- << ArgExpr->getSourceRange();
- }
}
Arg = TheCall->getArg(ScopeIndex);
@@ -3383,6 +3531,198 @@ bool Sema::CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID,
return false;
}
+bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
+ llvm::APSInt Result;
+
+ // We can't check the value of a dependent argument.
+ Expr *Arg = TheCall->getArg(ArgNum);
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ return false;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, ArgNum, Result))
+ return true;
+
+ int64_t Val = Result.getSExtValue();
+ if ((Val >= 0 && Val <= 3) || (Val >= 5 && Val <= 7))
+ return false;
+
+ return Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_invalid_lmul)
+ << Arg->getSourceRange();
+}
+
+bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
+ unsigned BuiltinID,
+ CallExpr *TheCall) {
+ // CodeGenFunction can also detect this, but this gives a better error
+ // message.
+ bool FeatureMissing = false;
+ SmallVector<StringRef> ReqFeatures;
+ StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
+ Features.split(ReqFeatures, ',');
+
+ // Check if each required feature is included
+ for (StringRef F : ReqFeatures) {
+ if (TI.hasFeature(F))
+ continue;
+
+ // If the feature is 64bit, alter the string so it will print better in
+ // the diagnostic.
+ if (F == "64bit")
+ F = "RV64";
+
+ // Convert features like "zbr" and "experimental-zbr" to "Zbr".
+ F.consume_front("experimental-");
+ std::string FeatureStr = F.str();
+ FeatureStr[0] = std::toupper(FeatureStr[0]);
+
+ // Error message
+ FeatureMissing = true;
+ Diag(TheCall->getBeginLoc(), diag::err_riscv_builtin_requires_extension)
+ << TheCall->getSourceRange() << StringRef(FeatureStr);
+ }
+
+ if (FeatureMissing)
+ return true;
+
+ switch (BuiltinID) {
+ case RISCV::BI__builtin_rvv_vsetvli:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3) ||
+ CheckRISCVLMUL(TheCall, 2);
+ case RISCV::BI__builtin_rvv_vsetvlimax:
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
+ CheckRISCVLMUL(TheCall, 1);
+ case RISCV::BI__builtin_rvv_vget_v_i8m2_i8m1:
+ case RISCV::BI__builtin_rvv_vget_v_i16m2_i16m1:
+ case RISCV::BI__builtin_rvv_vget_v_i32m2_i32m1:
+ case RISCV::BI__builtin_rvv_vget_v_i64m2_i64m1:
+ case RISCV::BI__builtin_rvv_vget_v_f32m2_f32m1:
+ case RISCV::BI__builtin_rvv_vget_v_f64m2_f64m1:
+ case RISCV::BI__builtin_rvv_vget_v_u8m2_u8m1:
+ case RISCV::BI__builtin_rvv_vget_v_u16m2_u16m1:
+ case RISCV::BI__builtin_rvv_vget_v_u32m2_u32m1:
+ case RISCV::BI__builtin_rvv_vget_v_u64m2_u64m1:
+ case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m2:
+ case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m2:
+ case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m2:
+ case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m2:
+ case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m2:
+ case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m2:
+ case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m2:
+ case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m2:
+ case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m2:
+ case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m2:
+ case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m4:
+ case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m4:
+ case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m4:
+ case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m4:
+ case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m4:
+ case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m4:
+ case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m4:
+ case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m4:
+ case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m4:
+ case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m4:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case RISCV::BI__builtin_rvv_vget_v_i8m4_i8m1:
+ case RISCV::BI__builtin_rvv_vget_v_i16m4_i16m1:
+ case RISCV::BI__builtin_rvv_vget_v_i32m4_i32m1:
+ case RISCV::BI__builtin_rvv_vget_v_i64m4_i64m1:
+ case RISCV::BI__builtin_rvv_vget_v_f32m4_f32m1:
+ case RISCV::BI__builtin_rvv_vget_v_f64m4_f64m1:
+ case RISCV::BI__builtin_rvv_vget_v_u8m4_u8m1:
+ case RISCV::BI__builtin_rvv_vget_v_u16m4_u16m1:
+ case RISCV::BI__builtin_rvv_vget_v_u32m4_u32m1:
+ case RISCV::BI__builtin_rvv_vget_v_u64m4_u64m1:
+ case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m2:
+ case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m2:
+ case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m2:
+ case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m2:
+ case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m2:
+ case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m2:
+ case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m2:
+ case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m2:
+ case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m2:
+ case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m2:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case RISCV::BI__builtin_rvv_vget_v_i8m8_i8m1:
+ case RISCV::BI__builtin_rvv_vget_v_i16m8_i16m1:
+ case RISCV::BI__builtin_rvv_vget_v_i32m8_i32m1:
+ case RISCV::BI__builtin_rvv_vget_v_i64m8_i64m1:
+ case RISCV::BI__builtin_rvv_vget_v_f32m8_f32m1:
+ case RISCV::BI__builtin_rvv_vget_v_f64m8_f64m1:
+ case RISCV::BI__builtin_rvv_vget_v_u8m8_u8m1:
+ case RISCV::BI__builtin_rvv_vget_v_u16m8_u16m1:
+ case RISCV::BI__builtin_rvv_vget_v_u32m8_u32m1:
+ case RISCV::BI__builtin_rvv_vget_v_u64m8_u64m1:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m2:
+ case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m2:
+ case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m2:
+ case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m2:
+ case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m2:
+ case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m2:
+ case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m2:
+ case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m2:
+ case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m2:
+ case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m2:
+ case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m4:
+ case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m4:
+ case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m4:
+ case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m4:
+ case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m4:
+ case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m4:
+ case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m4:
+ case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m4:
+ case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m4:
+ case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m4:
+ case RISCV::BI__builtin_rvv_vset_v_i8m4_i8m8:
+ case RISCV::BI__builtin_rvv_vset_v_i16m4_i16m8:
+ case RISCV::BI__builtin_rvv_vset_v_i32m4_i32m8:
+ case RISCV::BI__builtin_rvv_vset_v_i64m4_i64m8:
+ case RISCV::BI__builtin_rvv_vset_v_f32m4_f32m8:
+ case RISCV::BI__builtin_rvv_vset_v_f64m4_f64m8:
+ case RISCV::BI__builtin_rvv_vset_v_u8m4_u8m8:
+ case RISCV::BI__builtin_rvv_vset_v_u16m4_u16m8:
+ case RISCV::BI__builtin_rvv_vset_v_u32m4_u32m8:
+ case RISCV::BI__builtin_rvv_vset_v_u64m4_u64m8:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 1);
+ case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m4:
+ case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m4:
+ case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m4:
+ case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m4:
+ case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m4:
+ case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m4:
+ case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m4:
+ case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m4:
+ case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m4:
+ case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m4:
+ case RISCV::BI__builtin_rvv_vset_v_i8m2_i8m8:
+ case RISCV::BI__builtin_rvv_vset_v_i16m2_i16m8:
+ case RISCV::BI__builtin_rvv_vset_v_i32m2_i32m8:
+ case RISCV::BI__builtin_rvv_vset_v_i64m2_i64m8:
+ case RISCV::BI__builtin_rvv_vset_v_f32m2_f32m8:
+ case RISCV::BI__builtin_rvv_vset_v_f64m2_f64m8:
+ case RISCV::BI__builtin_rvv_vset_v_u8m2_u8m8:
+ case RISCV::BI__builtin_rvv_vset_v_u16m2_u16m8:
+ case RISCV::BI__builtin_rvv_vset_v_u32m2_u32m8:
+ case RISCV::BI__builtin_rvv_vset_v_u64m2_u64m8:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 3);
+ case RISCV::BI__builtin_rvv_vset_v_i8m1_i8m8:
+ case RISCV::BI__builtin_rvv_vset_v_i16m1_i16m8:
+ case RISCV::BI__builtin_rvv_vset_v_i32m1_i32m8:
+ case RISCV::BI__builtin_rvv_vset_v_i64m1_i64m8:
+ case RISCV::BI__builtin_rvv_vset_v_f32m1_f32m8:
+ case RISCV::BI__builtin_rvv_vset_v_f64m1_f64m8:
+ case RISCV::BI__builtin_rvv_vset_v_u8m1_u8m8:
+ case RISCV::BI__builtin_rvv_vset_v_u16m1_u16m8:
+ case RISCV::BI__builtin_rvv_vset_v_u32m1_u32m8:
+ case RISCV::BI__builtin_rvv_vset_v_u64m1_u64m8:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 7);
+ }
+
+ return false;
+}
+
bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
CallExpr *TheCall) {
if (BuiltinID == SystemZ::BI__builtin_tabort) {
@@ -3443,6 +3783,11 @@ bool Sema::CheckSystemZBuiltinFunctionCall(unsigned BuiltinID,
case SystemZ::BI__builtin_s390_vfmaxdb: i = 2; l = 0; u = 15; break;
case SystemZ::BI__builtin_s390_vsld: i = 2; l = 0; u = 7; break;
case SystemZ::BI__builtin_s390_vsrd: i = 2; l = 0; u = 7; break;
+ case SystemZ::BI__builtin_s390_vclfnhs:
+ case SystemZ::BI__builtin_s390_vclfnls:
+ case SystemZ::BI__builtin_s390_vcfn:
+ case SystemZ::BI__builtin_s390_vcnf: i = 1; l = 0; u = 15; break;
+ case SystemZ::BI__builtin_s390_vcrnfs: i = 2; l = 0; u = 15; break;
}
return SemaBuiltinConstantArgRange(TheCall, i, l, u);
}
@@ -4448,6 +4793,45 @@ static void CheckNonNullArguments(Sema &S,
}
}
+/// Warn if a pointer or reference argument passed to a function points to an
+/// object that is less aligned than the parameter. This can happen when
+/// creating a typedef with a lower alignment than the original type and then
+/// calling functions defined in terms of the original type.
+void Sema::CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
+ StringRef ParamName, QualType ArgTy,
+ QualType ParamTy) {
+
+ // If a function accepts a pointer or reference type
+ if (!ParamTy->isPointerType() && !ParamTy->isReferenceType())
+ return;
+
+ // If the parameter is a pointer type, get the pointee type for the
+ // argument too. If the parameter is a reference type, don't try to get
+ // the pointee type for the argument.
+ if (ParamTy->isPointerType())
+ ArgTy = ArgTy->getPointeeType();
+
+ // Remove reference or pointer
+ ParamTy = ParamTy->getPointeeType();
+
+ // Find expected alignment, and the actual alignment of the passed object.
+ // getTypeAlignInChars requires complete types
+ if (ArgTy.isNull() || ParamTy->isIncompleteType() ||
+ ArgTy->isIncompleteType() || ParamTy->isUndeducedType() ||
+ ArgTy->isUndeducedType())
+ return;
+
+ CharUnits ParamAlign = Context.getTypeAlignInChars(ParamTy);
+ CharUnits ArgAlign = Context.getTypeAlignInChars(ArgTy);
+
+ // If the argument is less aligned than the parameter, there is a
+ // potential alignment issue.
+ if (ArgAlign < ParamAlign)
+ Diag(Loc, diag::warn_param_mismatched_alignment)
+ << (int)ArgAlign.getQuantity() << (int)ParamAlign.getQuantity()
+ << ParamName << FDecl;
+}
+
/// Handles the checks for format strings, non-POD arguments to vararg
/// functions, NULL arguments passed to non-NULL parameters, and diagnose_if
/// attributes.
@@ -4502,6 +4886,31 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
}
}
+ // Check that passed arguments match the alignment of original arguments.
+ // Try to get the missing prototype from the declaration.
+ if (!Proto && FDecl) {
+ const auto *FT = FDecl->getFunctionType();
+ if (isa_and_nonnull<FunctionProtoType>(FT))
+ Proto = cast<FunctionProtoType>(FDecl->getFunctionType());
+ }
+ if (Proto) {
+ // For variadic functions, we may have more args than parameters.
+ // For some K&R functions, we may have less args than parameters.
+ const auto N = std::min<unsigned>(Proto->getNumParams(), Args.size());
+ for (unsigned ArgIdx = 0; ArgIdx < N; ++ArgIdx) {
+ // Args[ArgIdx] can be null in malformed code.
+ if (const Expr *Arg = Args[ArgIdx]) {
+ if (Arg->containsErrors())
+ continue;
+
+ QualType ParamTy = Proto->getParamType(ArgIdx);
+ QualType ArgTy = Arg->getType();
+ CheckArgAlignment(Arg->getExprLoc(), FDecl, std::to_string(ArgIdx + 1),
+ ArgTy, ParamTy);
+ }
+ }
+ }
+
if (FDecl && FDecl->hasAttr<AllocAlignAttr>()) {
auto *AA = FDecl->getAttr<AllocAlignAttr>();
const Expr *Arg = Args[AA->getParamIndex().getASTIndex()];
@@ -4526,12 +4935,17 @@ void Sema::checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
/// CheckConstructorCall - Check a constructor call for correctness and safety
/// properties not enforced by the C type system.
-void Sema::CheckConstructorCall(FunctionDecl *FDecl,
+void Sema::CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc) {
VariadicCallType CallType =
- Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+ Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+
+ auto *Ctor = cast<CXXConstructorDecl>(FDecl);
+ CheckArgAlignment(Loc, FDecl, "'this'", Context.getPointerType(ThisType),
+ Context.getPointerType(Ctor->getThisObjectType()));
+
checkCall(FDecl, Proto, /*ThisArg=*/nullptr, Args, /*IsMemberFunction=*/true,
Loc, SourceRange(), CallType);
}
@@ -4561,6 +4975,22 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
ImplicitThis =
cast<CXXMemberCallExpr>(TheCall)->getImplicitObjectArgument();
+ if (ImplicitThis) {
+ // ImplicitThis may or may not be a pointer, depending on whether . or -> is
+ // used.
+ QualType ThisType = ImplicitThis->getType();
+ if (!ThisType->isPointerType()) {
+ assert(!ThisType->isReferenceType());
+ ThisType = Context.getPointerType(ThisType);
+ }
+
+ QualType ThisTypeFromDecl =
+ Context.getPointerType(cast<CXXMethodDecl>(FDecl)->getThisObjectType());
+
+ CheckArgAlignment(TheCall->getRParenLoc(), FDecl, "'this'", ThisType,
+ ThisTypeFromDecl);
+ }
+
checkCall(FDecl, Proto, ImplicitThis, llvm::makeArrayRef(Args, NumArgs),
IsMemberFunction, TheCall->getRParenLoc(),
TheCall->getCallee()->getSourceRange(), CallType);
@@ -4794,7 +5224,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_add_fetch:
case AtomicExpr::AO__atomic_sub_fetch:
IsAddSub = true;
- LLVM_FALLTHROUGH;
+ Form = Arithmetic;
+ break;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
@@ -4809,6 +5240,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__atomic_or_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_nand_fetch:
+ Form = Arithmetic;
+ break;
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_min:
@@ -4901,10 +5334,24 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
- // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
- if (IsAddSub && !ValType->isIntegerType()
- && !ValType->isPointerType()) {
- Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ // gcc does not enforce these rules for GNU atomics, but we do so for
+ // sanity.
+ auto IsAllowedValueType = [&](QualType ValType) {
+ if (ValType->isIntegerType())
+ return true;
+ if (ValType->isPointerType())
+ return true;
+ if (!ValType->isFloatingType())
+ return false;
+ // LLVM Parser does not allow atomicrmw with x86_fp80 type.
+ if (ValType->isSpecificBuiltinType(BuiltinType::LongDouble) &&
+ &Context.getTargetInfo().getLongDoubleFormat() ==
+ &llvm::APFloat::x87DoubleExtended())
+ return false;
+ return true;
+ };
+ if (IsAddSub && !IsAllowedValueType(ValType)) {
+ Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_atomic_int_ptr_or_fp)
<< IsC11 << Ptr->getType() << Ptr->getSourceRange();
return ExprError();
}
@@ -5031,7 +5478,9 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// passed by address. For the rest, GNU uses by-address and C11 uses
// by-value.
assert(Form != Load);
- if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
+ if (Form == Arithmetic && ValType->isPointerType())
+ Ty = Context.getPointerDiffType();
+ else if (Form == Init || Form == Arithmetic)
Ty = ValType;
else if (Form == Copy || Form == Xchg) {
if (IsPassedByAddress) {
@@ -5040,9 +5489,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
ExprRange.getBegin());
}
Ty = ByValType;
- } else if (Form == Arithmetic)
- Ty = Context.getPointerDiffType();
- else {
+ } else {
Expr *ValArg = APIOrderedArgs[i];
// The value pointer is always dereferenced, a nullptr is undefined.
CheckNonNullArgument(*this, ValArg, ExprRange.getBegin());
@@ -6165,7 +6612,7 @@ ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
ExprResult Sema::SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc) {
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
QualType DstTy = TInfo->getType();
QualType SrcTy = E->getType();
@@ -6211,6 +6658,29 @@ bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
return false;
}
+/// SemaBuiltinArithmeticFence - Handle __arithmetic_fence.
+bool Sema::SemaBuiltinArithmeticFence(CallExpr *TheCall) {
+ if (!Context.getTargetInfo().checkArithmeticFenceSupported())
+ return Diag(TheCall->getBeginLoc(), diag::err_builtin_target_unsupported)
+ << SourceRange(TheCall->getBeginLoc(), TheCall->getEndLoc());
+ if (checkArgCount(*this, TheCall, 1))
+ return true;
+ Expr *Arg = TheCall->getArg(0);
+ if (Arg->isInstantiationDependent())
+ return false;
+
+ QualType ArgTy = Arg->getType();
+ if (!ArgTy->hasFloatingRepresentation())
+ return Diag(TheCall->getEndLoc(), diag::err_typecheck_expect_flt_or_vector)
+ << ArgTy;
+ if (Arg->isLValue()) {
+ ExprResult FirstArg = DefaultLvalueConversion(Arg);
+ TheCall->setArg(0, FirstArg.get());
+ }
+ TheCall->setType(TheCall->getArg(0)->getType());
+ return false;
+}
+
/// SemaBuiltinAssume - Handle __assume (MS Extension).
// __assume does not evaluate its arguments, and should warn if its argument
// has side effects.
@@ -6418,13 +6888,13 @@ bool Sema::SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum,
if (Result.getSExtValue() < Low || Result.getSExtValue() > High) {
if (RangeIsError)
return Diag(TheCall->getBeginLoc(), diag::err_argument_invalid_range)
- << Result.toString(10) << Low << High << Arg->getSourceRange();
+ << toString(Result, 10) << Low << High << Arg->getSourceRange();
else
// Defer the warning until we know if the code will be emitted so that
// dead code can ignore this.
DiagRuntimeBehavior(TheCall->getBeginLoc(), TheCall,
PDiag(diag::warn_argument_invalid_range)
- << Result.toString(10) << Low << High
+ << toString(Result, 10) << Low << High
<< Arg->getSourceRange());
}
@@ -6759,18 +7229,18 @@ bool Sema::SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
bool ValidString = true;
if (IsARMBuiltin) {
- ValidString &= Fields[0].startswith_lower("cp") ||
- Fields[0].startswith_lower("p");
+ ValidString &= Fields[0].startswith_insensitive("cp") ||
+ Fields[0].startswith_insensitive("p");
if (ValidString)
- Fields[0] =
- Fields[0].drop_front(Fields[0].startswith_lower("cp") ? 2 : 1);
+ Fields[0] = Fields[0].drop_front(
+ Fields[0].startswith_insensitive("cp") ? 2 : 1);
- ValidString &= Fields[2].startswith_lower("c");
+ ValidString &= Fields[2].startswith_insensitive("c");
if (ValidString)
Fields[2] = Fields[2].drop_front(1);
if (FiveFields) {
- ValidString &= Fields[3].startswith_lower("c");
+ ValidString &= Fields[3].startswith_insensitive("c");
if (ValidString)
Fields[3] = Fields[3].drop_front(1);
}
@@ -8630,8 +9100,11 @@ CheckPrintfHandler::checkFormatExpr(const analyze_printf::PrintfSpecifier &FS,
} else if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E)) {
// Special case for 'a', which has type 'int' in C.
// Note, however, that we do /not/ want to treat multibyte constants like
- // 'MooV' as characters! This form is deprecated but still exists.
- if (ExprTy == S.Context.IntTy)
+ // 'MooV' as characters! This form is deprecated but still exists. In
+ // addition, don't treat expressions as of type 'char' if one byte length
+ // modifier is provided.
+ if (ExprTy == S.Context.IntTy &&
+ FS.getLengthModifier().getKind() != LengthModifier::AsChar)
if (llvm::isUIntN(S.Context.getCharWidth(), CL->getValue()))
ExprTy = S.Context.CharTy;
}
@@ -10250,64 +10723,116 @@ void Sema::CheckStrncatArguments(const CallExpr *CE,
namespace {
void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
- const UnaryOperator *UnaryExpr,
- const VarDecl *Var) {
- StorageClass Class = Var->getStorageClass();
- if (Class == StorageClass::SC_Extern ||
- Class == StorageClass::SC_PrivateExtern ||
- Var->getType()->isReferenceType())
- return;
-
- S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
- << CalleeName << Var;
-}
-
-void CheckFreeArgumentsOnLvalue(Sema &S, const std::string &CalleeName,
const UnaryOperator *UnaryExpr, const Decl *D) {
- if (const auto *Field = dyn_cast<FieldDecl>(D))
+ if (isa<FieldDecl, FunctionDecl, VarDecl>(D)) {
S.Diag(UnaryExpr->getBeginLoc(), diag::warn_free_nonheap_object)
- << CalleeName << Field;
+ << CalleeName << 0 /*object: */ << cast<NamedDecl>(D);
+ return;
+ }
}
void CheckFreeArgumentsAddressof(Sema &S, const std::string &CalleeName,
const UnaryOperator *UnaryExpr) {
- if (UnaryExpr->getOpcode() != UnaryOperator::Opcode::UO_AddrOf)
- return;
-
- if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr()))
- if (const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl()))
- return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, Var);
+ if (const auto *Lvalue = dyn_cast<DeclRefExpr>(UnaryExpr->getSubExpr())) {
+ const Decl *D = Lvalue->getDecl();
+ if (isa<DeclaratorDecl>(D))
+ if (!dyn_cast<DeclaratorDecl>(D)->getType()->isReferenceType())
+ return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr, D);
+ }
if (const auto *Lvalue = dyn_cast<MemberExpr>(UnaryExpr->getSubExpr()))
return CheckFreeArgumentsOnLvalue(S, CalleeName, UnaryExpr,
Lvalue->getMemberDecl());
}
-void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
- const DeclRefExpr *Lvalue) {
- if (!Lvalue->getType()->isArrayType())
+void CheckFreeArgumentsPlus(Sema &S, const std::string &CalleeName,
+ const UnaryOperator *UnaryExpr) {
+ const auto *Lambda = dyn_cast<LambdaExpr>(
+ UnaryExpr->getSubExpr()->IgnoreImplicitAsWritten()->IgnoreParens());
+ if (!Lambda)
return;
+ S.Diag(Lambda->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << 2 /*object: lambda expression*/;
+}
+
+void CheckFreeArgumentsStackArray(Sema &S, const std::string &CalleeName,
+ const DeclRefExpr *Lvalue) {
const auto *Var = dyn_cast<VarDecl>(Lvalue->getDecl());
if (Var == nullptr)
return;
S.Diag(Lvalue->getBeginLoc(), diag::warn_free_nonheap_object)
- << CalleeName << Var;
+ << CalleeName << 0 /*object: */ << Var;
+}
+
+void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
+ const CastExpr *Cast) {
+ SmallString<128> SizeString;
+ llvm::raw_svector_ostream OS(SizeString);
+
+ clang::CastKind Kind = Cast->getCastKind();
+ if (Kind == clang::CK_BitCast &&
+ !Cast->getSubExpr()->getType()->isFunctionPointerType())
+ return;
+ if (Kind == clang::CK_IntegralToPointer &&
+ !isa<IntegerLiteral>(
+ Cast->getSubExpr()->IgnoreParenImpCasts()->IgnoreParens()))
+ return;
+
+ switch (Cast->getCastKind()) {
+ case clang::CK_BitCast:
+ case clang::CK_IntegralToPointer:
+ case clang::CK_FunctionToPointerDecay:
+ OS << '\'';
+ Cast->printPretty(OS, nullptr, S.getPrintingPolicy());
+ OS << '\'';
+ break;
+ default:
+ return;
+ }
+
+ S.Diag(Cast->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << 0 /*object: */ << OS.str();
}
} // namespace
/// Alerts the user that they are attempting to free a non-malloc'd object.
void Sema::CheckFreeArguments(const CallExpr *E) {
- const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
const std::string CalleeName =
dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
- if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg))
- return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr);
+ { // Prefer something that doesn't involve a cast to make things simpler.
+ const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
+ if (const auto *UnaryExpr = dyn_cast<UnaryOperator>(Arg))
+ switch (UnaryExpr->getOpcode()) {
+ case UnaryOperator::Opcode::UO_AddrOf:
+ return CheckFreeArgumentsAddressof(*this, CalleeName, UnaryExpr);
+ case UnaryOperator::Opcode::UO_Plus:
+ return CheckFreeArgumentsPlus(*this, CalleeName, UnaryExpr);
+ default:
+ break;
+ }
+
+ if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg))
+ if (Lvalue->getType()->isArrayType())
+ return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue);
+
+ if (const auto *Label = dyn_cast<AddrLabelExpr>(Arg)) {
+ Diag(Label->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << 0 /*object: */ << Label->getLabel()->getIdentifier();
+ return;
+ }
- if (const auto *Lvalue = dyn_cast<DeclRefExpr>(Arg))
- return CheckFreeArgumentsStackArray(*this, CalleeName, Lvalue);
+ if (isa<BlockExpr>(Arg)) {
+ Diag(Arg->getBeginLoc(), diag::warn_free_nonheap_object)
+ << CalleeName << 1 /*object: block*/;
+ return;
+ }
+ }
+ // Maybe the cast was important, check after the other cases.
+ if (const auto *Cast = dyn_cast<CastExpr>(E->getArg(0)))
+ return CheckFreeArgumentsCast(*this, CalleeName, Cast);
}
void
@@ -11225,11 +11750,14 @@ static bool CheckTautologicalComparison(Sema &S, BinaryOperator *E,
<< OtherIsBooleanDespiteType << *Result
<< E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange());
} else {
- unsigned Diag = (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
- ? (HasEnumType(OriginalOther)
- ? diag::warn_unsigned_enum_always_true_comparison
- : diag::warn_unsigned_always_true_comparison)
- : diag::warn_tautological_constant_compare;
+ bool IsCharTy = OtherT.withoutLocalFastQualifiers() == S.Context.CharTy;
+ unsigned Diag =
+ (isKnownToHaveUnsignedValue(OriginalOther) && Value == 0)
+ ? (HasEnumType(OriginalOther)
+ ? diag::warn_unsigned_enum_always_true_comparison
+ : IsCharTy ? diag::warn_unsigned_char_always_true_comparison
+ : diag::warn_unsigned_always_true_comparison)
+ : diag::warn_tautological_constant_compare;
S.Diag(E->getOperatorLoc(), Diag)
<< RhsConstant << OtherT << E->getOpcodeStr() << OS.str() << *Result
@@ -11484,8 +12012,8 @@ static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
if (FieldWidth == 1 && Value == 1)
return false;
- std::string PrettyValue = Value.toString(10);
- std::string PrettyTrunc = TruncatedValue.toString(10);
+ std::string PrettyValue = toString(Value, 10);
+ std::string PrettyTrunc = toString(TruncatedValue, 10);
S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
<< PrettyValue << PrettyTrunc << OriginalInit->getType()
@@ -11721,7 +12249,7 @@ static std::string PrettyPrintInRange(const llvm::APSInt &Value,
llvm::APSInt ValueInRange = Value;
ValueInRange.setIsSigned(!Range.NonNegative);
ValueInRange = ValueInRange.trunc(Range.Width);
- return ValueInRange.toString(10);
+ return toString(ValueInRange, 10);
}
static bool IsImplicitBoolFloatConversion(Sema &S, Expr *Ex, bool ToBool) {
@@ -12037,7 +12565,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
adornObjCBoolConversionDiagWithTernaryFixit(
S, E,
S.Diag(CC, diag::warn_impcast_constant_value_to_objc_bool)
- << Result.Val.getInt().toString(10));
+ << toString(Result.Val.getInt(), 10));
}
return;
}
@@ -12052,6 +12580,13 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Strip vector types.
if (isa<VectorType>(Source)) {
+ if (Target->isVLSTBuiltinType() &&
+ (S.Context.areCompatibleSveTypes(QualType(Target, 0),
+ QualType(Source, 0)) ||
+ S.Context.areLaxCompatibleSveTypes(QualType(Target, 0),
+ QualType(Source, 0))))
+ return;
+
if (!isa<VectorType>(Target)) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -12212,7 +12747,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (Overflowed) {
S.DiagRuntimeBehavior(E->getExprLoc(), E,
S.PDiag(diag::warn_impcast_fixed_point_range)
- << Value.toString(/*Radix=*/10) << T
+ << toString(Value, /*Radix=*/10) << T
<< E->getSourceRange()
<< clang::SourceRange(CC));
return;
@@ -12252,7 +12787,8 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
llvm::APFloat::rmNearestTiesToEven);
if (ConversionStatus != llvm::APFloat::opOK) {
- std::string PrettySourceValue = SourceInt->toString(10);
+ SmallString<32> PrettySourceValue;
+ SourceInt->toString(PrettySourceValue, 10);
SmallString<32> PrettyTargetValue;
TargetFloatValue.toString(PrettyTargetValue, TargetPrecision);
@@ -12311,7 +12847,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
if (S.SourceMgr.isInSystemMacro(CC))
return;
- std::string PrettySourceValue = Value.toString(10);
+ std::string PrettySourceValue = toString(Value, 10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
S.DiagRuntimeBehavior(
@@ -12357,7 +12893,7 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
!S.SourceMgr.isInSystemMacro(CC)) {
llvm::APSInt Value = Result.Val.getInt();
if (isSameWidthConstantConversion(S, E, T, CC)) {
- std::string PrettySourceValue = Value.toString(10);
+ std::string PrettySourceValue = toString(Value, 10);
std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
S.DiagRuntimeBehavior(
@@ -14043,7 +14579,8 @@ static getBaseAlignmentAndOffsetFromLValue(const Expr *E, ASTContext &Ctx) {
case Stmt::MemberExprClass: {
auto *ME = cast<MemberExpr>(E);
auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
- if (!FD || FD->getType()->isReferenceType())
+ if (!FD || FD->getType()->isReferenceType() ||
+ FD->getParent()->isInvalidDecl())
break;
Optional<std::pair<CharUnits, CharUnits>> P;
if (ME->isArrow())
@@ -14267,11 +14804,11 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ConstantArrayType *ArrayTy =
Context.getAsConstantArrayType(BaseExpr->getType());
- if (!ArrayTy)
- return;
-
- const Type *BaseType = ArrayTy->getElementType().getTypePtr();
- if (EffectiveType->isDependentType() || BaseType->isDependentType())
+ const Type *BaseType =
+ ArrayTy == nullptr ? nullptr : ArrayTy->getElementType().getTypePtr();
+ bool IsUnboundedArray = (BaseType == nullptr);
+ if (EffectiveType->isDependentType() ||
+ (!IsUnboundedArray && BaseType->isDependentType()))
return;
Expr::EvalResult Result;
@@ -14279,8 +14816,10 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
return;
llvm::APSInt index = Result.Val.getInt();
- if (IndexNegated)
+ if (IndexNegated) {
+ index.setIsUnsigned(false);
index = -index;
+ }
const NamedDecl *ND = nullptr;
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
@@ -14288,6 +14827,74 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
ND = ME->getMemberDecl();
+ if (IsUnboundedArray) {
+ if (index.isUnsigned() || !index.isNegative()) {
+ const auto &ASTC = getASTContext();
+ unsigned AddrBits =
+ ASTC.getTargetInfo().getPointerWidth(ASTC.getTargetAddressSpace(
+ EffectiveType->getCanonicalTypeInternal()));
+ if (index.getBitWidth() < AddrBits)
+ index = index.zext(AddrBits);
+ Optional<CharUnits> ElemCharUnits =
+ ASTC.getTypeSizeInCharsIfKnown(EffectiveType);
+ // PR50741 - If EffectiveType has unknown size (e.g., if it's a void
+ // pointer) bounds-checking isn't meaningful.
+ if (!ElemCharUnits)
+ return;
+ llvm::APInt ElemBytes(index.getBitWidth(), ElemCharUnits->getQuantity());
+ // If index has more active bits than address space, we already know
+ // we have a bounds violation to warn about. Otherwise, compute
+ // address of (index + 1)th element, and warn about bounds violation
+ // only if that address exceeds address space.
+ if (index.getActiveBits() <= AddrBits) {
+ bool Overflow;
+ llvm::APInt Product(index);
+ Product += 1;
+ Product = Product.umul_ov(ElemBytes, Overflow);
+ if (!Overflow && Product.getActiveBits() <= AddrBits)
+ return;
+ }
+
+ // Need to compute max possible elements in address space, since that
+ // is included in diag message.
+ llvm::APInt MaxElems = llvm::APInt::getMaxValue(AddrBits);
+ MaxElems = MaxElems.zext(std::max(AddrBits + 1, ElemBytes.getBitWidth()));
+ MaxElems += 1;
+ ElemBytes = ElemBytes.zextOrTrunc(MaxElems.getBitWidth());
+ MaxElems = MaxElems.udiv(ElemBytes);
+
+ unsigned DiagID =
+ ASE ? diag::warn_array_index_exceeds_max_addressable_bounds
+ : diag::warn_ptr_arith_exceeds_max_addressable_bounds;
+
+ // Diag message shows element size in bits and in "bytes" (platform-
+ // dependent CharUnits)
+ DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
+ PDiag(DiagID)
+ << toString(index, 10, true) << AddrBits
+ << (unsigned)ASTC.toBits(*ElemCharUnits)
+ << toString(ElemBytes, 10, false)
+ << toString(MaxElems, 10, false)
+ << (unsigned)MaxElems.getLimitedValue(~0U)
+ << IndexExpr->getSourceRange());
+
+ if (!ND) {
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = DRE->getDecl();
+ if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = ME->getMemberDecl();
+ }
+
+ if (ND)
+ DiagRuntimeBehavior(ND->getBeginLoc(), BaseExpr,
+ PDiag(diag::note_array_declared_here) << ND);
+ }
+ return;
+ }
+
if (index.isUnsigned() || !index.isNegative()) {
// It is possible that the type of the base expression after
// IgnoreParenCasts is incomplete, even though the type of the base
@@ -14350,13 +14957,12 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
}
}
- unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
- if (ASE)
- DiagID = diag::warn_array_index_exceeds_bounds;
+ unsigned DiagID = ASE ? diag::warn_array_index_exceeds_bounds
+ : diag::warn_ptr_arith_exceeds_bounds;
DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID) << index.toString(10, true)
- << size.toString(10, true)
+ PDiag(DiagID) << toString(index, 10, true)
+ << toString(size, 10, true)
<< (unsigned)size.getLimitedValue(~0U)
<< IndexExpr->getSourceRange());
} else {
@@ -14367,18 +14973,17 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
}
DiagRuntimeBehavior(BaseExpr->getBeginLoc(), BaseExpr,
- PDiag(DiagID) << index.toString(10, true)
+ PDiag(DiagID) << toString(index, 10, true)
<< IndexExpr->getSourceRange());
}
if (!ND) {
// Try harder to find a NamedDecl to point at in the note.
- while (const ArraySubscriptExpr *ASE =
- dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BaseExpr))
BaseExpr = ASE->getBase()->IgnoreParenCasts();
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
ND = DRE->getDecl();
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ if (const auto *ME = dyn_cast<MemberExpr>(BaseExpr))
ND = ME->getMemberDecl();
}
diff --git a/clang/lib/Sema/SemaCodeComplete.cpp b/clang/lib/Sema/SemaCodeComplete.cpp
index c2785fd60fc2..e03b671ae61e 100644
--- a/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/clang/lib/Sema/SemaCodeComplete.cpp
@@ -381,6 +381,8 @@ public:
} // namespace
void PreferredTypeBuilder::enterReturn(Sema &S, SourceLocation Tok) {
+ if (!Enabled)
+ return;
if (isa<BlockDecl>(S.CurContext)) {
if (sema::BlockScopeInfo *BSI = S.getCurBlock()) {
ComputeType = nullptr;
@@ -399,14 +401,30 @@ void PreferredTypeBuilder::enterReturn(Sema &S, SourceLocation Tok) {
}
void PreferredTypeBuilder::enterVariableInit(SourceLocation Tok, Decl *D) {
+ if (!Enabled)
+ return;
auto *VD = llvm::dyn_cast_or_null<ValueDecl>(D);
ComputeType = nullptr;
Type = VD ? VD->getType() : QualType();
ExpectedLoc = Tok;
}
+static QualType getDesignatedType(QualType BaseType, const Designation &Desig);
+
+void PreferredTypeBuilder::enterDesignatedInitializer(SourceLocation Tok,
+ QualType BaseType,
+ const Designation &D) {
+ if (!Enabled)
+ return;
+ ComputeType = nullptr;
+ Type = getDesignatedType(BaseType, D);
+ ExpectedLoc = Tok;
+}
+
void PreferredTypeBuilder::enterFunctionArgument(
SourceLocation Tok, llvm::function_ref<QualType()> ComputeType) {
+ if (!Enabled)
+ return;
this->ComputeType = ComputeType;
Type = QualType();
ExpectedLoc = Tok;
@@ -414,6 +432,8 @@ void PreferredTypeBuilder::enterFunctionArgument(
void PreferredTypeBuilder::enterParenExpr(SourceLocation Tok,
SourceLocation LParLoc) {
+ if (!Enabled)
+ return;
// expected type for parenthesized expression does not change.
if (ExpectedLoc == LParLoc)
ExpectedLoc = Tok;
@@ -531,6 +551,8 @@ static QualType getPreferredTypeOfUnaryArg(Sema &S, QualType ContextType,
void PreferredTypeBuilder::enterBinary(Sema &S, SourceLocation Tok, Expr *LHS,
tok::TokenKind Op) {
+ if (!Enabled)
+ return;
ComputeType = nullptr;
Type = getPreferredTypeOfBinaryRHS(S, LHS, Op);
ExpectedLoc = Tok;
@@ -538,7 +560,7 @@ void PreferredTypeBuilder::enterBinary(Sema &S, SourceLocation Tok, Expr *LHS,
void PreferredTypeBuilder::enterMemAccess(Sema &S, SourceLocation Tok,
Expr *Base) {
- if (!Base)
+ if (!Enabled || !Base)
return;
// Do we have expected type for Base?
if (ExpectedLoc != Base->getBeginLoc())
@@ -551,6 +573,8 @@ void PreferredTypeBuilder::enterMemAccess(Sema &S, SourceLocation Tok,
void PreferredTypeBuilder::enterUnary(Sema &S, SourceLocation Tok,
tok::TokenKind OpKind,
SourceLocation OpLoc) {
+ if (!Enabled)
+ return;
ComputeType = nullptr;
Type = getPreferredTypeOfUnaryArg(S, this->get(OpLoc), OpKind);
ExpectedLoc = Tok;
@@ -558,6 +582,8 @@ void PreferredTypeBuilder::enterUnary(Sema &S, SourceLocation Tok,
void PreferredTypeBuilder::enterSubscript(Sema &S, SourceLocation Tok,
Expr *LHS) {
+ if (!Enabled)
+ return;
ComputeType = nullptr;
Type = S.getASTContext().IntTy;
ExpectedLoc = Tok;
@@ -565,12 +591,16 @@ void PreferredTypeBuilder::enterSubscript(Sema &S, SourceLocation Tok,
void PreferredTypeBuilder::enterTypeCast(SourceLocation Tok,
QualType CastType) {
+ if (!Enabled)
+ return;
ComputeType = nullptr;
Type = !CastType.isNull() ? CastType.getCanonicalType() : QualType();
ExpectedLoc = Tok;
}
void PreferredTypeBuilder::enterCondition(Sema &S, SourceLocation Tok) {
+ if (!Enabled)
+ return;
ComputeType = nullptr;
Type = S.getASTContext().BoolTy;
ExpectedLoc = Tok;
@@ -709,18 +739,17 @@ getRequiredQualification(ASTContext &Context, const DeclContext *CurContext,
// Filter out names reserved for the implementation if they come from a
// system header.
static bool shouldIgnoreDueToReservedName(const NamedDecl *ND, Sema &SemaRef) {
- const IdentifierInfo *Id = ND->getIdentifier();
- if (!Id)
- return false;
-
+ ReservedIdentifierStatus Status = ND->isReserved(SemaRef.getLangOpts());
// Ignore reserved names for compiler provided decls.
- if (Id->isReservedName() && ND->getLocation().isInvalid())
+ if ((Status != ReservedIdentifierStatus::NotReserved) &&
+ (Status != ReservedIdentifierStatus::StartsWithUnderscoreAtGlobalScope) &&
+ ND->getLocation().isInvalid())
return true;
// For system headers ignore only double-underscore names.
// This allows for system headers providing private symbols with a single
// underscore.
- if (Id->isReservedName(/*doubleUnderscoreOnly=*/true) &&
+ if (Status == ReservedIdentifierStatus::StartsWithDoubleUnderscore &&
SemaRef.SourceMgr.isInSystemHeader(
SemaRef.SourceMgr.getSpellingLoc(ND->getLocation())))
return true;
@@ -3886,6 +3915,9 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
case Decl::UnresolvedUsingTypename:
return CXCursor_UsingDeclaration;
+ case Decl::UsingEnum:
+ return CXCursor_EnumDecl;
+
case Decl::ObjCPropertyImpl:
switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
case ObjCPropertyImplDecl::Dynamic:
@@ -4784,8 +4816,16 @@ static void AddRecordMembersCompletionResults(
// in case of specializations. Since we might not have a decl for the
// instantiation/specialization yet, e.g. dependent code.
static RecordDecl *getAsRecordDecl(const QualType BaseType) {
- if (auto *RD = BaseType->getAsRecordDecl())
+ if (auto *RD = BaseType->getAsRecordDecl()) {
+ if (const auto *CTSD =
+ llvm::dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ // Template might not be instantiated yet, fall back to primary template
+ // in such cases.
+ if (CTSD->getTemplateSpecializationKind() == TSK_Undeclared)
+ RD = CTSD->getSpecializedTemplate()->getTemplatedDecl();
+ }
return RD;
+ }
if (const auto *TST = BaseType->getAs<TemplateSpecializationType>()) {
if (const auto *TD = dyn_cast_or_null<ClassTemplateDecl>(
@@ -5158,6 +5198,88 @@ private:
llvm::DenseMap<const IdentifierInfo *, Member> Results;
};
+
+// Returns a type for E that yields acceptable member completions.
+// In particular, when E->getType() is DependentTy, try to guess a likely type.
+// We accept some lossiness (like dropping parameters).
+// We only try to handle common expressions on the LHS of MemberExpr.
+QualType getApproximateType(const Expr *E) {
+ QualType Unresolved = E->getType();
+ if (Unresolved.isNull() ||
+ !Unresolved->isSpecificBuiltinType(BuiltinType::Dependent))
+ return Unresolved;
+ E = E->IgnoreParens();
+ // A call: approximate-resolve callee to a function type, get its return type
+ if (const CallExpr *CE = llvm::dyn_cast<CallExpr>(E)) {
+ QualType Callee = getApproximateType(CE->getCallee());
+ if (Callee.isNull() ||
+ Callee->isSpecificPlaceholderType(BuiltinType::BoundMember))
+ Callee = Expr::findBoundMemberType(CE->getCallee());
+ if (Callee.isNull())
+ return Unresolved;
+
+ if (const auto *FnTypePtr = Callee->getAs<PointerType>()) {
+ Callee = FnTypePtr->getPointeeType();
+ } else if (const auto *BPT = Callee->getAs<BlockPointerType>()) {
+ Callee = BPT->getPointeeType();
+ }
+ if (const FunctionType *FnType = Callee->getAs<FunctionType>())
+ return FnType->getReturnType().getNonReferenceType();
+
+ // Unresolved call: try to guess the return type.
+ if (const auto *OE = llvm::dyn_cast<OverloadExpr>(CE->getCallee())) {
+ // If all candidates have the same approximate return type, use it.
+ // Discard references and const to allow more to be "the same".
+ // (In particular, if there's one candidate + ADL, resolve it).
+ const Type *Common = nullptr;
+ for (const auto *D : OE->decls()) {
+ QualType ReturnType;
+ if (const auto *FD = llvm::dyn_cast<FunctionDecl>(D))
+ ReturnType = FD->getReturnType();
+ else if (const auto *FTD = llvm::dyn_cast<FunctionTemplateDecl>(D))
+ ReturnType = FTD->getTemplatedDecl()->getReturnType();
+ if (ReturnType.isNull())
+ continue;
+ const Type *Candidate =
+ ReturnType.getNonReferenceType().getCanonicalType().getTypePtr();
+ if (Common && Common != Candidate)
+ return Unresolved; // Multiple candidates.
+ Common = Candidate;
+ }
+ if (Common != nullptr)
+ return QualType(Common, 0);
+ }
+ }
+ // A dependent member: approximate-resolve the base, then lookup.
+ if (const auto *CDSME = llvm::dyn_cast<CXXDependentScopeMemberExpr>(E)) {
+ QualType Base = CDSME->isImplicitAccess()
+ ? CDSME->getBaseType()
+ : getApproximateType(CDSME->getBase());
+ if (CDSME->isArrow() && !Base.isNull())
+ Base = Base->getPointeeType(); // could handle unique_ptr etc here?
+ RecordDecl *RD = Base.isNull() ? nullptr : getAsRecordDecl(Base);
+ if (RD && RD->isCompleteDefinition()) {
+ for (const auto *Member : RD->lookup(CDSME->getMember()))
+ if (const ValueDecl *VD = llvm::dyn_cast<ValueDecl>(Member))
+ return VD->getType().getNonReferenceType();
+ }
+ }
+ return Unresolved;
+}
+
+// If \p Base is ParenListExpr, assume a chain of comma operators and pick the
+// last expr. We expect other ParenListExprs to be resolved to e.g. constructor
+// calls before here. (So the ParenListExpr should be nonempty, but check just
+// in case)
+Expr *unwrapParenList(Expr *Base) {
+ if (auto *PLE = llvm::dyn_cast_or_null<ParenListExpr>(Base)) {
+ if (PLE->getNumExprs() == 0)
+ return nullptr;
+ Base = PLE->getExpr(PLE->getNumExprs() - 1);
+ }
+ return Base;
+}
+
} // namespace
void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
@@ -5165,13 +5287,15 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType) {
+ Base = unwrapParenList(Base);
+ OtherOpBase = unwrapParenList(OtherOpBase);
if (!Base || !CodeCompleter)
return;
ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
if (ConvertedBase.isInvalid())
return;
- QualType ConvertedBaseType = ConvertedBase.get()->getType();
+ QualType ConvertedBaseType = getApproximateType(ConvertedBase.get());
enum CodeCompletionContext::Kind contextKind;
@@ -5207,7 +5331,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
return false;
Base = ConvertedBase.get();
- QualType BaseType = Base->getType();
+ QualType BaseType = getApproximateType(Base);
if (BaseType.isNull())
return false;
ExprValueKind BaseKind = Base->getValueKind();
@@ -5589,20 +5713,22 @@ ProduceSignatureHelp(Sema &SemaRef, Scope *S,
unsigned CurrentArg, SourceLocation OpenParLoc) {
if (Candidates.empty())
return QualType();
- SemaRef.CodeCompleter->ProcessOverloadCandidates(
- SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc);
+ if (SemaRef.getPreprocessor().isCodeCompletionReached())
+ SemaRef.CodeCompleter->ProcessOverloadCandidates(
+ SemaRef, CurrentArg, Candidates.data(), Candidates.size(), OpenParLoc);
return getParamType(SemaRef, Candidates, CurrentArg);
}
QualType Sema::ProduceCallSignatureHelp(Scope *S, Expr *Fn,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc) {
- if (!CodeCompleter)
+ Fn = unwrapParenList(Fn);
+ if (!CodeCompleter || !Fn)
return QualType();
// FIXME: Provide support for variadic template functions.
// Ignore type-dependent call expressions entirely.
- if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args))
+ if (Fn->isTypeDependent() || anyNullArguments(Args))
return QualType();
// In presence of dependent args we surface all possible signatures using the
// non-dependent args in the prefix. Afterwards we do a post filtering to make
@@ -5754,25 +5880,39 @@ QualType Sema::ProduceCtorInitMemberSignatureHelp(
return QualType();
}
-void Sema::CodeCompleteDesignator(const QualType BaseType,
+static QualType getDesignatedType(QualType BaseType, const Designation &Desig) {
+ for (unsigned I = 0; I < Desig.getNumDesignators(); ++I) {
+ if (BaseType.isNull())
+ break;
+ QualType NextType;
+ const auto &D = Desig.getDesignator(I);
+ if (D.isArrayDesignator() || D.isArrayRangeDesignator()) {
+ if (BaseType->isArrayType())
+ NextType = BaseType->getAsArrayTypeUnsafe()->getElementType();
+ } else {
+ assert(D.isFieldDesignator());
+ auto *RD = getAsRecordDecl(BaseType);
+ if (RD && RD->isCompleteDefinition()) {
+ for (const auto *Member : RD->lookup(D.getField()))
+ if (const FieldDecl *FD = llvm::dyn_cast<FieldDecl>(Member)) {
+ NextType = FD->getType();
+ break;
+ }
+ }
+ }
+ BaseType = NextType;
+ }
+ return BaseType;
+}
+
+void Sema::CodeCompleteDesignator(QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D) {
+ BaseType = getDesignatedType(BaseType, D);
if (BaseType.isNull())
return;
- // FIXME: Handle nested designations, e.g. : .x.^
- if (!D.empty())
- return;
-
const auto *RD = getAsRecordDecl(BaseType);
- if (!RD)
- return;
- if (const auto *CTSD = llvm::dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
- // Template might not be instantiated yet, fall back to primary template in
- // such cases.
- if (CTSD->getTemplateSpecializationKind() == TSK_Undeclared)
- RD = CTSD->getSpecializedTemplate()->getTemplatedDecl();
- }
- if (RD->fields().empty())
+ if (!RD || RD->fields().empty())
return;
CodeCompletionContext CCC(CodeCompletionContext::CCC_DotMemberAccess,
@@ -9061,6 +9201,18 @@ void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
Builder.AddPlaceholderChunk("condition");
Results.AddResult(Builder.TakeString());
+ // #elifdef <macro>
+ Builder.AddTypedTextChunk("elifdef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #elifndef <macro>
+ Builder.AddTypedTextChunk("elifndef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
// #else
Builder.AddTypedTextChunk("else");
Results.AddResult(Builder.TakeString());
@@ -9339,10 +9491,10 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
// Only files that really look like headers. (Except in system dirs).
if (!IsSystem) {
// Header extensions from Types.def, which we can't depend on here.
- if (!(Filename.endswith_lower(".h") ||
- Filename.endswith_lower(".hh") ||
- Filename.endswith_lower(".hpp") ||
- Filename.endswith_lower(".inc")))
+ if (!(Filename.endswith_insensitive(".h") ||
+ Filename.endswith_insensitive(".hh") ||
+ Filename.endswith_insensitive(".hpp") ||
+ Filename.endswith_insensitive(".inc")))
break;
}
AddCompletion(Filename, /*IsDirectory=*/false);
diff --git a/clang/lib/Sema/SemaConcept.cpp b/clang/lib/Sema/SemaConcept.cpp
index 1ff7b1cdd515..f2c70d0a56ef 100644
--- a/clang/lib/Sema/SemaConcept.cpp
+++ b/clang/lib/Sema/SemaConcept.cpp
@@ -25,6 +25,8 @@
#include "clang/Basic/OperatorPrecedence.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/StringExtras.h"
+
using namespace clang;
using namespace sema;
@@ -41,9 +43,12 @@ public:
LHS = BO->getLHS();
RHS = BO->getRHS();
} else if (auto *OO = dyn_cast<CXXOperatorCallExpr>(E)) {
- Op = OO->getOperator();
- LHS = OO->getArg(0);
- RHS = OO->getArg(1);
+ // If OO is not || or && it might not have exactly 2 arguments.
+ if (OO->getNumArgs() == 2) {
+ Op = OO->getOperator();
+ LHS = OO->getArg(0);
+ RHS = OO->getArg(1);
+ }
}
}
@@ -172,9 +177,11 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
SmallVector<PartialDiagnosticAt, 2> EvaluationDiags;
Expr::EvalResult EvalResult;
EvalResult.Diag = &EvaluationDiags;
- if (!SubstitutedAtomicExpr.get()->EvaluateAsRValue(EvalResult, S.Context)) {
- // C++2a [temp.constr.atomic]p1
- // ...E shall be a constant expression of type bool.
+ if (!SubstitutedAtomicExpr.get()->EvaluateAsConstantExpr(EvalResult,
+ S.Context) ||
+ !EvaluationDiags.empty()) {
+ // C++2a [temp.constr.atomic]p1
+ // ...E shall be a constant expression of type bool.
S.Diag(SubstitutedAtomicExpr.get()->getBeginLoc(),
diag::err_non_constant_constraint_expression)
<< SubstitutedAtomicExpr.get()->getSourceRange();
@@ -183,6 +190,8 @@ calculateConstraintSatisfaction(Sema &S, const Expr *ConstraintExpr,
return true;
}
+ assert(EvalResult.Val.isInt() &&
+ "evaluating bool expression didn't produce int");
Satisfaction.IsSatisfied = EvalResult.Val.getInt().getBoolValue();
if (!Satisfaction.IsSatisfied)
Satisfaction.Details.emplace_back(ConstraintExpr,
@@ -214,6 +223,13 @@ static bool calculateConstraintSatisfaction(
Sema::SFINAETrap Trap(S);
SubstitutedExpression = S.SubstExpr(const_cast<Expr *>(AtomicExpr),
MLTAL);
+ // Substitution might have stripped off a contextual conversion to
+ // bool if this is the operand of an '&&' or '||'. For example, we
+ // might lose an lvalue-to-rvalue conversion here. If so, put it back
+ // before we try to evaluate.
+ if (!SubstitutedExpression.isInvalid())
+ SubstitutedExpression =
+ S.PerformContextuallyConvertToBool(SubstitutedExpression.get());
if (SubstitutedExpression.isInvalid() || Trap.hasErrorOccurred()) {
// C++2a [temp.constr.atomic]p1
// ...If substitution results in an invalid type or expression, the
@@ -439,18 +455,19 @@ static void diagnoseUnsatisfiedRequirement(Sema &S,
case concepts::ExprRequirement::SS_ConstraintsNotSatisfied: {
ConceptSpecializationExpr *ConstraintExpr =
Req->getReturnTypeRequirementSubstitutedConstraintExpr();
- if (ConstraintExpr->getTemplateArgsAsWritten()->NumTemplateArgs == 1)
+ if (ConstraintExpr->getTemplateArgsAsWritten()->NumTemplateArgs == 1) {
// A simple case - expr type is the type being constrained and the concept
// was not provided arguments.
- S.Diag(ConstraintExpr->getBeginLoc(),
+ Expr *e = Req->getExpr();
+ S.Diag(e->getBeginLoc(),
diag::note_expr_requirement_constraints_not_satisfied_simple)
- << (int)First << S.BuildDecltypeType(Req->getExpr(),
- Req->getExpr()->getBeginLoc())
+ << (int)First << S.getDecltypeForParenthesizedExpr(e)
<< ConstraintExpr->getNamedConcept();
- else
+ } else {
S.Diag(ConstraintExpr->getBeginLoc(),
diag::note_expr_requirement_constraints_not_satisfied)
<< (int)First << ConstraintExpr;
+ }
S.DiagnoseUnsatisfiedConstraint(ConstraintExpr->getSatisfaction());
break;
}
@@ -522,9 +539,9 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
diagnoseWellFormedUnsatisfiedConstraintExpr(S, BO->getRHS(),
/*First=*/false);
return;
- case BO_LAnd:
- bool LHSSatisfied;
- BO->getLHS()->EvaluateAsBooleanCondition(LHSSatisfied, S.Context);
+ case BO_LAnd: {
+ bool LHSSatisfied =
+ BO->getLHS()->EvaluateKnownConstInt(S.Context).getBoolValue();
if (LHSSatisfied) {
// LHS is true, so RHS must be false.
diagnoseWellFormedUnsatisfiedConstraintExpr(S, BO->getRHS(), First);
@@ -534,12 +551,13 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
diagnoseWellFormedUnsatisfiedConstraintExpr(S, BO->getLHS(), First);
// RHS might also be false
- bool RHSSatisfied;
- BO->getRHS()->EvaluateAsBooleanCondition(RHSSatisfied, S.Context);
+ bool RHSSatisfied =
+ BO->getRHS()->EvaluateKnownConstInt(S.Context).getBoolValue();
if (!RHSSatisfied)
diagnoseWellFormedUnsatisfiedConstraintExpr(S, BO->getRHS(),
/*First=*/false);
return;
+ }
case BO_GE:
case BO_LE:
case BO_GT:
@@ -550,15 +568,19 @@ static void diagnoseWellFormedUnsatisfiedConstraintExpr(Sema &S,
BO->getRHS()->getType()->isIntegerType()) {
Expr::EvalResult SimplifiedLHS;
Expr::EvalResult SimplifiedRHS;
- BO->getLHS()->EvaluateAsInt(SimplifiedLHS, S.Context);
- BO->getRHS()->EvaluateAsInt(SimplifiedRHS, S.Context);
+ BO->getLHS()->EvaluateAsInt(SimplifiedLHS, S.Context,
+ Expr::SE_NoSideEffects,
+ /*InConstantContext=*/true);
+ BO->getRHS()->EvaluateAsInt(SimplifiedRHS, S.Context,
+ Expr::SE_NoSideEffects,
+ /*InConstantContext=*/true);
if (!SimplifiedLHS.Diag && ! SimplifiedRHS.Diag) {
S.Diag(SubstExpr->getBeginLoc(),
diag::note_atomic_constraint_evaluated_to_false_elaborated)
<< (int)First << SubstExpr
- << SimplifiedLHS.Val.getInt().toString(10)
+ << toString(SimplifiedLHS.Val.getInt(), 10)
<< BinaryOperator::getOpcodeStr(BO->getOpcode())
- << SimplifiedRHS.Val.getInt().toString(10);
+ << toString(SimplifiedRHS.Val.getInt(), 10);
return;
}
}
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index 7a48bfa429e9..94c728093e7c 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -291,26 +291,6 @@ static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, Scope *S,
cast<UnresolvedLookupExpr>(R.get()));
}
-static Expr *buildBuiltinCall(Sema &S, SourceLocation Loc, Builtin::ID Id,
- MultiExprArg CallArgs) {
- StringRef Name = S.Context.BuiltinInfo.getName(Id);
- LookupResult R(S, &S.Context.Idents.get(Name), Loc, Sema::LookupOrdinaryName);
- S.LookupName(R, S.TUScope, /*AllowBuiltinCreation=*/true);
-
- auto *BuiltInDecl = R.getAsSingle<FunctionDecl>();
- assert(BuiltInDecl && "failed to find builtin declaration");
-
- ExprResult DeclRef =
- S.BuildDeclRefExpr(BuiltInDecl, BuiltInDecl->getType(), VK_LValue, Loc);
- assert(DeclRef.isUsable() && "Builtin reference cannot fail");
-
- ExprResult Call =
- S.BuildCallExpr(/*Scope=*/nullptr, DeclRef.get(), Loc, CallArgs, Loc);
-
- assert(!Call.isInvalid() && "Call to builtin cannot fail!");
- return Call.get();
-}
-
static ExprResult buildCoroutineHandle(Sema &S, QualType PromiseType,
SourceLocation Loc) {
QualType CoroHandleType = lookupCoroutineHandleType(S, PromiseType, Loc);
@@ -327,7 +307,7 @@ static ExprResult buildCoroutineHandle(Sema &S, QualType PromiseType,
}
Expr *FramePtr =
- buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_frame, {});
+ S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_frame, {});
CXXScopeSpec SS;
ExprResult FromAddr =
@@ -404,8 +384,8 @@ static Expr *maybeTailCall(Sema &S, QualType RetType, Expr *E,
// the resume call and return instruction, which would interfere with the
// musttail call contract.
JustAddress = S.MaybeCreateExprWithCleanups(JustAddress);
- return buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_resume,
- JustAddress);
+ return S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_resume,
+ JustAddress);
}
/// Build calls to await_ready, await_suspend, and await_resume for a co_await
@@ -897,7 +877,7 @@ ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
// If the expression is a temporary, materialize it as an lvalue so that we
// can use it multiple times.
- if (E->getValueKind() == VK_RValue)
+ if (E->isPRValue())
E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
// The location of the `co_await` token cannot be used when constructing
@@ -957,7 +937,7 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
// If the expression is a temporary, materialize it as an lvalue so that we
// can use it multiple times.
- if (E->getValueKind() == VK_RValue)
+ if (E->isPRValue())
E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
// Build the await_ready, await_suspend, await_resume calls.
@@ -994,25 +974,10 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
E = R.get();
}
- // Move the return value if we can
- if (E) {
- auto NRVOCandidate = this->getCopyElisionCandidate(E->getType(), E, CES_AsIfByStdMove);
- if (NRVOCandidate) {
- InitializedEntity Entity =
- InitializedEntity::InitializeResult(Loc, E->getType(), NRVOCandidate);
- ExprResult MoveResult = this->PerformMoveOrCopyInitialization(
- Entity, NRVOCandidate, E->getType(), E);
- if (MoveResult.get())
- E = MoveResult.get();
- }
- }
-
- // FIXME: If the operand is a reference to a variable that's about to go out
- // of scope, we should treat the operand as an xvalue for this overload
- // resolution.
VarDecl *Promise = FSI->CoroutinePromise;
ExprResult PC;
if (E && (isa<InitListExpr>(E) || !E->getType()->isVoidType())) {
+ getNamedReturnInfo(E, SimplerImplicitMoveMode::ForceOn);
PC = buildPromiseCall(*this, Promise, Loc, "return_value", E);
} else {
E = MakeFullDiscardedValueExpr(E).get();
@@ -1372,10 +1337,10 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
return false;
Expr *FramePtr =
- buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_frame, {});
+ S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_frame, {});
Expr *FrameSize =
- buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_size, {});
+ S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_size, {});
// Make new call.
@@ -1404,7 +1369,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
return false;
Expr *CoroFree =
- buildBuiltinCall(S, Loc, Builtin::BI__builtin_coro_free, {FramePtr});
+ S.BuildBuiltinCallExpr(Loc, Builtin::BI__builtin_coro_free, {FramePtr});
SmallVector<Expr *, 2> DeleteArgs{CoroFree};
@@ -1569,7 +1534,7 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
// Trigger a nice error message.
InitializedEntity Entity =
InitializedEntity::InitializeResult(Loc, FnRetType, false);
- S.PerformMoveOrCopyInitialization(Entity, nullptr, FnRetType, ReturnValue);
+ S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
noteMemberDeclaredHere(S, ReturnValue, Fn);
return false;
}
@@ -1585,8 +1550,8 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
return false;
InitializedEntity Entity = InitializedEntity::InitializeVariable(GroDecl);
- ExprResult Res = S.PerformMoveOrCopyInitialization(Entity, nullptr, GroType,
- this->ReturnValue);
+ ExprResult Res =
+ S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
if (Res.isInvalid())
return false;
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 3ee0c43097d7..205f58000302 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -434,10 +434,14 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
// Look to see if we have a type anywhere in the list of results.
for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
Res != ResEnd; ++Res) {
- if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res) ||
- (AllowDeducedTemplate && getAsTypeTemplateDecl(*Res))) {
- if (!IIDecl || (*Res)->getLocation() < IIDecl->getLocation())
- IIDecl = *Res;
+ NamedDecl *RealRes = (*Res)->getUnderlyingDecl();
+ if (isa<TypeDecl, ObjCInterfaceDecl, UnresolvedUsingIfExistsDecl>(
+ RealRes) ||
+ (AllowDeducedTemplate && getAsTypeTemplateDecl(RealRes))) {
+ if (!IIDecl ||
+ // Make the selection of the recovery decl deterministic.
+ RealRes->getLocation() < IIDecl->getLocation())
+ IIDecl = RealRes;
}
}
@@ -486,6 +490,10 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
(void)DiagnoseUseOfDecl(IDecl, NameLoc);
if (!HasTrailingDot)
T = Context.getObjCInterfaceType(IDecl);
+ } else if (auto *UD = dyn_cast<UnresolvedUsingIfExistsDecl>(IIDecl)) {
+ (void)DiagnoseUseOfDecl(UD, NameLoc);
+ // Recover with 'int'
+ T = Context.IntTy;
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl))
T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
@@ -502,7 +510,7 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
// constructor or destructor name (in such a case, the scope specifier
// will be attached to the enclosing Expr or Decl node).
if (SS && SS->isNotEmpty() && !IsCtorOrDtorName &&
- !isa<ObjCInterfaceDecl>(IIDecl)) {
+ !isa<ObjCInterfaceDecl, UnresolvedUsingIfExistsDecl>(IIDecl)) {
if (WantNontrivialTypeSourceInfo) {
// Construct a type with type-source information.
TypeLocBuilder Builder;
@@ -1161,6 +1169,11 @@ Corrected:
return NameClassification::Concept(
TemplateName(cast<TemplateDecl>(FirstDecl)));
+ if (auto *EmptyD = dyn_cast<UnresolvedUsingIfExistsDecl>(FirstDecl)) {
+ (void)DiagnoseUseOfDecl(EmptyD, NameLoc);
+ return NameClassification::Error();
+ }
+
// We can have a type template here if we're classifying a template argument.
if (isa<TemplateDecl>(FirstDecl) && !isa<FunctionTemplateDecl>(FirstDecl) &&
!isa<VarTemplateDecl>(FirstDecl))
@@ -1516,6 +1529,7 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
} else {
IdResolver.AddDecl(D);
}
+ warnOnReservedIdentifier(D);
}
bool Sema::isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S,
@@ -1905,6 +1919,41 @@ void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
Diag(D->getLocation(), DiagID) << D << Hint;
}
+void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD) {
+ // If it's not referenced, it can't be set.
+ if (!VD->isReferenced() || !VD->getDeclName() || VD->hasAttr<UnusedAttr>())
+ return;
+
+ const auto *Ty = VD->getType().getTypePtr()->getBaseElementTypeUnsafe();
+
+ if (Ty->isReferenceType() || Ty->isDependentType())
+ return;
+
+ if (const TagType *TT = Ty->getAs<TagType>()) {
+ const TagDecl *Tag = TT->getDecl();
+ if (Tag->hasAttr<UnusedAttr>())
+ return;
+ // In C++, don't warn for record types that don't have WarnUnusedAttr, to
+ // mimic gcc's behavior.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
+ if (!RD->hasAttr<WarnUnusedAttr>())
+ return;
+ }
+ }
+
+ auto iter = RefsMinusAssignments.find(VD);
+ if (iter == RefsMinusAssignments.end())
+ return;
+
+ assert(iter->getSecond() >= 0 &&
+ "Found a negative number of references to a VarDecl");
+ if (iter->getSecond() != 0)
+ return;
+ unsigned DiagID = isa<ParmVarDecl>(VD) ? diag::warn_unused_but_set_parameter
+ : diag::warn_unused_but_set_variable;
+ Diag(VD->getLocation(), DiagID) << VD;
+}
+
static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
// Verify that we have no forward references left. If so, there was a goto
// or address of a label taken, but no definition of it. Label fwd
@@ -1937,6 +1986,10 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
DiagnoseUnusedDecl(D);
if (const auto *RD = dyn_cast<RecordDecl>(D))
DiagnoseUnusedNestedTypedefs(RD);
+ if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ DiagnoseUnusedButSetDecl(VD);
+ RefsMinusAssignments.erase(VD);
+ }
}
if (!D->getDeclName()) continue;
@@ -2540,9 +2593,18 @@ static bool mergeAlignedAttrs(Sema &S, NamedDecl *New, Decl *Old) {
return AnyAdded;
}
+#define WANT_DECL_MERGE_LOGIC
+#include "clang/Sema/AttrParsedAttrImpl.inc"
+#undef WANT_DECL_MERGE_LOGIC
+
static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
const InheritableAttr *Attr,
Sema::AvailabilityMergeKind AMK) {
+ // Diagnose any mutual exclusions between the attribute that we want to add
+ // and attributes that already exist on the declaration.
+ if (!DiagnoseMutualExclusions(S, D, Attr))
+ return false;
+
// This function copies an attribute Attr from a previous declaration to the
// new declaration D if the new declaration doesn't itself have that attribute
// yet or if that attribute allows duplicates.
@@ -2592,22 +2654,17 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeOptimizeNoneAttr(D, *OA);
else if (const auto *InternalLinkageA = dyn_cast<InternalLinkageAttr>(Attr))
NewAttr = S.mergeInternalLinkageAttr(D, *InternalLinkageA);
- else if (const auto *CommonA = dyn_cast<CommonAttr>(Attr))
- NewAttr = S.mergeCommonAttr(D, *CommonA);
else if (isa<AlignedAttr>(Attr))
// AlignedAttrs are handled separately, because we need to handle all
// such attributes on a declaration at the same time.
NewAttr = nullptr;
else if ((isa<DeprecatedAttr>(Attr) || isa<UnavailableAttr>(Attr)) &&
(AMK == Sema::AMK_Override ||
- AMK == Sema::AMK_ProtocolImplementation))
+ AMK == Sema::AMK_ProtocolImplementation ||
+ AMK == Sema::AMK_OptionalProtocolImplementation))
NewAttr = nullptr;
else if (const auto *UA = dyn_cast<UuidAttr>(Attr))
NewAttr = S.mergeUuidAttr(D, *UA, UA->getGuid(), UA->getGuidDecl());
- else if (const auto *SLHA = dyn_cast<SpeculativeLoadHardeningAttr>(Attr))
- NewAttr = S.mergeSpeculativeLoadHardeningAttr(D, *SLHA);
- else if (const auto *SLHA = dyn_cast<NoSpeculativeLoadHardeningAttr>(Attr))
- NewAttr = S.mergeNoSpeculativeLoadHardeningAttr(D, *SLHA);
else if (const auto *IMA = dyn_cast<WebAssemblyImportModuleAttr>(Attr))
NewAttr = S.mergeImportModuleAttr(D, *IMA);
else if (const auto *INA = dyn_cast<WebAssemblyImportNameAttr>(Attr))
@@ -2831,6 +2888,11 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
NewAttr->setInherited(true);
New->addAttr(NewAttr);
}
+ if (RetainAttr *OldAttr = Old->getMostRecentDecl()->getAttr<RetainAttr>()) {
+ RetainAttr *NewAttr = OldAttr->clone(Context);
+ NewAttr->setInherited(true);
+ New->addAttr(NewAttr);
+ }
if (!Old->hasAttrs() && !New->hasAttrs())
return;
@@ -2947,13 +3009,14 @@ void Sema::mergeDeclAttributes(NamedDecl *New, Decl *Old,
case AMK_Redeclaration:
case AMK_Override:
case AMK_ProtocolImplementation:
+ case AMK_OptionalProtocolImplementation:
LocalAMK = AMK;
break;
}
}
// Already handled.
- if (isa<UsedAttr>(I))
+ if (isa<UsedAttr>(I) || isa<RetainAttr>(I))
continue;
if (mergeDeclAttribute(*this, New, I, LocalAMK))
@@ -3102,6 +3165,7 @@ static bool haveIncompatibleLanguageLinkages(const T *Old, const T *New) {
template<typename T> static bool isExternC(T *D) { return D->isExternC(); }
static bool isExternC(VarTemplateDecl *) { return false; }
+static bool isExternC(FunctionTemplateDecl *) { return false; }
/// Check whether a redeclaration of an entity introduced by a
/// using-declaration is valid, given that we know it's not an overload
@@ -3136,7 +3200,7 @@ static bool checkUsingShadowRedecl(Sema &S, UsingShadowDecl *OldS,
if (!Old) {
S.Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
S.Diag(OldS->getTargetDecl()->getLocation(), diag::note_using_decl_target);
- S.Diag(OldS->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
+ S.Diag(OldS->getIntroducer()->getLocation(), diag::note_using_decl) << 0;
return true;
}
return false;
@@ -3221,15 +3285,25 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
Diag(New->getLocation(), diag::err_using_decl_friend);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
- Diag(Shadow->getUsingDecl()->getLocation(),
- diag::note_using_decl) << 0;
+ Diag(Shadow->getIntroducer()->getLocation(), diag::note_using_decl)
+ << 0;
return true;
}
- // Check whether the two declarations might declare the same function.
- if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New))
- return true;
- OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl());
+ // Check whether the two declarations might declare the same function or
+ // function template.
+ if (FunctionTemplateDecl *NewTemplate =
+ New->getDescribedFunctionTemplate()) {
+ if (checkUsingShadowRedecl<FunctionTemplateDecl>(*this, Shadow,
+ NewTemplate))
+ return true;
+ OldD = Old = cast<FunctionTemplateDecl>(Shadow->getTargetDecl())
+ ->getAsFunction();
+ } else {
+ if (checkUsingShadowRedecl<FunctionDecl>(*this, Shadow, New))
+ return true;
+ OldD = Old = cast<FunctionDecl>(Shadow->getTargetDecl());
+ }
} else {
Diag(New->getLocation(), diag::err_redefinition_different_kind)
<< New->getDeclName();
@@ -3852,10 +3926,11 @@ void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
ObjCMethodDecl *oldMethod) {
// Merge the attributes, including deprecated/unavailable
AvailabilityMergeKind MergeKind =
- isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
- ? AMK_ProtocolImplementation
- : isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
- : AMK_Override;
+ isa<ObjCProtocolDecl>(oldMethod->getDeclContext())
+ ? (oldMethod->isOptional() ? AMK_OptionalProtocolImplementation
+ : AMK_ProtocolImplementation)
+ : isa<ObjCImplDecl>(newMethod->getDeclContext()) ? AMK_Redeclaration
+ : AMK_Override;
mergeDeclAttributes(newMethod, oldMethod, MergeKind);
@@ -5206,7 +5281,8 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
// trivial in almost all cases, except if a union member has an in-class
// initializer:
// union { int n = 0; };
- ActOnUninitializedDecl(Anon);
+ if (!Invalid)
+ ActOnUninitializedDecl(Anon);
}
Anon->setImplicit();
@@ -5351,10 +5427,8 @@ Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
case UnqualifiedIdKind::IK_OperatorFunctionId:
NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
Name.OperatorFunctionId.Operator));
- NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc =
- Name.OperatorFunctionId.SymbolLocations[0].getRawEncoding();
- NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
- = Name.EndLocation.getRawEncoding();
+ NameInfo.setCXXOperatorNameRange(SourceRange(
+ Name.OperatorFunctionId.SymbolLocations[0], Name.EndLocation));
return NameInfo;
case UnqualifiedIdKind::IK_LiteralOperatorId:
@@ -5551,6 +5625,18 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
return false;
}
+void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
+ // Avoid warning twice on the same identifier, and don't warn on redeclaration
+ // of system decl.
+ if (D->getPreviousDecl() || D->isImplicit())
+ return;
+ ReservedIdentifierStatus Status = D->isReserved(getLangOpts());
+ if (Status != ReservedIdentifierStatus::NotReserved &&
+ !Context.getSourceManager().isInSystemHeader(D->getLocation()))
+ Diag(D->getLocation(), diag::warn_reserved_extern_symbol)
+ << D << static_cast<int>(Status);
+}
+
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
@@ -5559,9 +5645,6 @@ Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
- if (getLangOpts().OpenCL)
- setCurrentOpenCLExtensionForDecl(Dcl);
-
return Dcl;
}
@@ -6035,26 +6118,26 @@ TryToFixInvalidVariablyModifiedTypeSourceInfo(TypeSourceInfo *TInfo,
/// Attempt to fold a variable-sized type to a constant-sized type, returning
/// true if we were successful.
-static bool tryToFixVariablyModifiedVarType(Sema &S, TypeSourceInfo *&TInfo,
- QualType &T, SourceLocation Loc,
- unsigned FailedFoldDiagID) {
+bool Sema::tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
+ QualType &T, SourceLocation Loc,
+ unsigned FailedFoldDiagID) {
bool SizeIsNegative;
llvm::APSInt Oversized;
TypeSourceInfo *FixedTInfo = TryToFixInvalidVariablyModifiedTypeSourceInfo(
- TInfo, S.Context, SizeIsNegative, Oversized);
+ TInfo, Context, SizeIsNegative, Oversized);
if (FixedTInfo) {
- S.Diag(Loc, diag::ext_vla_folded_to_constant);
+ Diag(Loc, diag::ext_vla_folded_to_constant);
TInfo = FixedTInfo;
T = FixedTInfo->getType();
return true;
}
if (SizeIsNegative)
- S.Diag(Loc, diag::err_typecheck_negative_array_size);
+ Diag(Loc, diag::err_typecheck_negative_array_size);
else if (Oversized.getBoolValue())
- S.Diag(Loc, diag::err_array_too_large) << Oversized.toString(10);
+ Diag(Loc, diag::err_array_too_large) << toString(Oversized, 10);
else if (FailedFoldDiagID)
- S.Diag(Loc, FailedFoldDiagID);
+ Diag(Loc, FailedFoldDiagID);
return false;
}
@@ -6172,7 +6255,7 @@ Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
else if (Oversized.getBoolValue())
Diag(NewTD->getLocation(), diag::err_array_too_large)
- << Oversized.toString(10);
+ << toString(Oversized, 10);
else
Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
NewTD->setInvalidDecl();
@@ -6341,7 +6424,11 @@ void Sema::deduceOpenCLAddressSpace(ValueDecl *Decl) {
if (Type->isSamplerT() || Type->isVoidType())
return;
LangAS ImplAS = LangAS::opencl_private;
- if ((getLangOpts().OpenCLCPlusPlus || getLangOpts().OpenCLVersion >= 200) &&
+ // OpenCL C v3.0 s6.7.8 - For OpenCL C 2.0 or with the
+ // __opencl_c_program_scope_global_variables feature, the address space
+ // for a variable at program scope or a static or extern variable inside
+ // a function are inferred to be __global.
+ if (getOpenCLOptions().areProgramScopeVariablesSupported(getLangOpts()) &&
Var->hasGlobalStorage())
ImplAS = LangAS::opencl_global;
// If the original type from a decayed type is an array type and that array
@@ -6434,16 +6521,6 @@ static void checkAttributesAfterMerging(Sema &S, NamedDecl &ND) {
}
}
- // Virtual functions cannot be marked as 'notail'.
- if (auto *Attr = ND.getAttr<NotTailCalledAttr>())
- if (auto *MD = dyn_cast<CXXMethodDecl>(&ND))
- if (MD->isVirtual()) {
- S.Diag(ND.getLocation(),
- diag::err_invalid_attribute_on_virtual_function)
- << Attr;
- ND.dropAttr<NotTailCalledAttr>();
- }
-
// Check the attributes on the function type, if any.
if (const auto *FD = dyn_cast<FunctionDecl>(&ND)) {
// Don't declare this variable in the second operand of the for-statement;
@@ -6727,17 +6804,20 @@ static bool isDeclExternC(const Decl *D) {
llvm_unreachable("Unknown type of decl!");
}
+
/// Returns true if there hasn't been any invalid type diagnosed.
-static bool diagnoseOpenCLTypes(Scope *S, Sema &Se, Declarator &D,
- DeclContext *DC, QualType R) {
+static bool diagnoseOpenCLTypes(Sema &Se, VarDecl *NewVD) {
+ DeclContext *DC = NewVD->getDeclContext();
+ QualType R = NewVD->getType();
+
// OpenCL v2.0 s6.9.b - Image type can only be used as a function argument.
// OpenCL v2.0 s6.13.16.1 - Pipe type can only be used as a function
// argument.
if (R->isImageType() || R->isPipeType()) {
- Se.Diag(D.getIdentifierLoc(),
+ Se.Diag(NewVD->getLocation(),
diag::err_opencl_type_can_only_be_used_as_function_parameter)
<< R;
- D.setInvalidType();
+ NewVD->setInvalidDecl();
return false;
}
@@ -6746,35 +6826,40 @@ static bool diagnoseOpenCLTypes(Scope *S, Sema &Se, Declarator &D,
// OpenCL v2.0 s6.9.q:
// The clk_event_t and reserve_id_t types cannot be declared in program
// scope.
- if (NULL == S->getParent()) {
+ if (NewVD->hasGlobalStorage() && !NewVD->isStaticLocal()) {
if (R->isReserveIDT() || R->isClkEventT() || R->isEventT()) {
- Se.Diag(D.getIdentifierLoc(),
+ Se.Diag(NewVD->getLocation(),
diag::err_invalid_type_for_program_scope_var)
<< R;
- D.setInvalidType();
+ NewVD->setInvalidDecl();
return false;
}
}
// OpenCL v1.0 s6.8.a.3: Pointers to functions are not allowed.
- if (!Se.getOpenCLOptions().isEnabled("__cl_clang_function_pointers")) {
- QualType NR = R;
- while (NR->isPointerType() || NR->isMemberFunctionPointerType()) {
- if (NR->isFunctionPointerType() || NR->isMemberFunctionPointerType()) {
- Se.Diag(D.getIdentifierLoc(), diag::err_opencl_function_pointer);
- D.setInvalidType();
+ if (!Se.getOpenCLOptions().isAvailableOption("__cl_clang_function_pointers",
+ Se.getLangOpts())) {
+ QualType NR = R.getCanonicalType();
+ while (NR->isPointerType() || NR->isMemberFunctionPointerType() ||
+ NR->isReferenceType()) {
+ if (NR->isFunctionPointerType() || NR->isMemberFunctionPointerType() ||
+ NR->isFunctionReferenceType()) {
+ Se.Diag(NewVD->getLocation(), diag::err_opencl_function_pointer)
+ << NR->isReferenceType();
+ NewVD->setInvalidDecl();
return false;
}
NR = NR->getPointeeType();
}
}
- if (!Se.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
+ if (!Se.getOpenCLOptions().isAvailableOption("cl_khr_fp16",
+ Se.getLangOpts())) {
// OpenCL v1.2 s6.1.1.1: reject declaring variables of the half and
// half array type (unless the cl_khr_fp16 extension is enabled).
if (Se.Context.getBaseElementType(R)->isHalfType()) {
- Se.Diag(D.getIdentifierLoc(), diag::err_opencl_half_declaration) << R;
- D.setInvalidType();
+ Se.Diag(NewVD->getLocation(), diag::err_opencl_half_declaration) << R;
+ NewVD->setInvalidDecl();
return false;
}
}
@@ -6784,34 +6869,20 @@ static bool diagnoseOpenCLTypes(Scope *S, Sema &Se, Declarator &D,
// address space qualifiers.
if (R->isEventT()) {
if (R.getAddressSpace() != LangAS::opencl_private) {
- Se.Diag(D.getBeginLoc(), diag::err_event_t_addr_space_qual);
- D.setInvalidType();
+ Se.Diag(NewVD->getBeginLoc(), diag::err_event_t_addr_space_qual);
+ NewVD->setInvalidDecl();
return false;
}
}
- // C++ for OpenCL does not allow the thread_local storage qualifier.
- // OpenCL C does not support thread_local either, and
- // also reject all other thread storage class specifiers.
- DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
- if (TSC != TSCS_unspecified) {
- bool IsCXX = Se.getLangOpts().OpenCLCPlusPlus;
- Se.Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
- diag::err_opencl_unknown_type_specifier)
- << IsCXX << Se.getLangOpts().getOpenCLVersionTuple().getAsString()
- << DeclSpec::getSpecifierName(TSC) << 1;
- D.setInvalidType();
- return false;
- }
-
if (R->isSamplerT()) {
// OpenCL v1.2 s6.9.b p4:
// The sampler type cannot be used with the __local and __global address
// space qualifiers.
if (R.getAddressSpace() == LangAS::opencl_local ||
R.getAddressSpace() == LangAS::opencl_global) {
- Se.Diag(D.getIdentifierLoc(), diag::err_wrong_sampler_addressspace);
- D.setInvalidType();
+ Se.Diag(NewVD->getLocation(), diag::err_wrong_sampler_addressspace);
+ NewVD->setInvalidDecl();
}
// OpenCL v1.2 s6.12.14.1:
@@ -6820,15 +6891,26 @@ static bool diagnoseOpenCLTypes(Scope *S, Sema &Se, Declarator &D,
if (DC->isTranslationUnit() &&
!(R.getAddressSpace() == LangAS::opencl_constant ||
R.isConstQualified())) {
- Se.Diag(D.getIdentifierLoc(), diag::err_opencl_nonconst_global_sampler);
- D.setInvalidType();
+ Se.Diag(NewVD->getLocation(), diag::err_opencl_nonconst_global_sampler);
+ NewVD->setInvalidDecl();
}
- if (D.isInvalidType())
+ if (NewVD->isInvalidDecl())
return false;
}
+
return true;
}
+template <typename AttrTy>
+static void copyAttrFromTypedefToDecl(Sema &S, Decl *D, const TypedefType *TT) {
+ const TypedefNameDecl *TND = TT->getDecl();
+ if (const auto *Attribute = TND->getAttr<AttrTy>()) {
+ AttrTy *Clone = Attribute->clone(S.Context);
+ Clone->setInherited(true);
+ D->addAttr(Clone);
+ }
+}
+
NamedDecl *Sema::ActOnVariableDeclarator(
Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo,
LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists,
@@ -6898,10 +6980,10 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
}
- // If this variable has a variable-modified type and an initializer, try to
+ // If this variable has a VLA type and an initializer, try to
// fold to a constant-sized type. This is otherwise invalid.
- if (D.hasInitializer() && R->isVariablyModifiedType())
- tryToFixVariablyModifiedVarType(*this, TInfo, R, D.getIdentifierLoc(),
+ if (D.hasInitializer() && R->isVariableArrayType())
+ tryToFixVariablyModifiedVarType(TInfo, R, D.getIdentifierLoc(),
/*DiagID=*/0);
bool IsMemberSpecialization = false;
@@ -7176,7 +7258,6 @@ NamedDecl *Sema::ActOnVariableDeclarator(
case ConstexprSpecKind::Constexpr:
NewVD->setConstexpr(true);
- MaybeAddCUDAConstantAttr(NewVD);
// C++1z [dcl.spec.constexpr]p1:
// A static data member declared with the constexpr specifier is
// implicitly an inline variable.
@@ -7242,15 +7323,30 @@ NamedDecl *Sema::ActOnVariableDeclarator(
}
if (getLangOpts().OpenCL) {
-
deduceOpenCLAddressSpace(NewVD);
- diagnoseOpenCLTypes(S, *this, D, DC, NewVD->getType());
+ DeclSpec::TSCS TSC = D.getDeclSpec().getThreadStorageClassSpec();
+ if (TSC != TSCS_unspecified) {
+ bool IsCXX = getLangOpts().OpenCLCPlusPlus;
+ Diag(D.getDeclSpec().getThreadStorageClassSpecLoc(),
+ diag::err_opencl_unknown_type_specifier)
+ << IsCXX << getLangOpts().getOpenCLVersionTuple().getAsString()
+ << DeclSpec::getSpecifierName(TSC) << 1;
+ NewVD->setInvalidDecl();
+ }
}
// Handle attributes prior to checking for duplicates in MergeVarDecl
ProcessDeclAttributes(S, NewVD, D);
+ // FIXME: This is probably the wrong location to be doing this and we should
+ // probably be doing this for more attributes (especially for function
+ // pointer attributes such as format, warn_unused_result, etc.). Ideally
+ // the code to copy attributes would be generated by TableGen.
+ if (R->isFunctionPointerType())
+ if (const auto *TT = R->getAs<TypedefType>())
+ copyAttrFromTypedefToDecl<AllocSizeAttr>(*this, NewVD, TT);
+
if (getLangOpts().CUDA || getLangOpts().OpenMPIsDevice ||
getLangOpts().SYCLIsDevice) {
if (EmitTLSUnsupportedError &&
@@ -7490,7 +7586,8 @@ enum ShadowedDeclKind {
SDK_StaticMember,
SDK_Field,
SDK_Typedef,
- SDK_Using
+ SDK_Using,
+ SDK_StructuredBinding
};
/// Determine what kind of declaration we're shadowing.
@@ -7500,6 +7597,8 @@ static ShadowedDeclKind computeShadowedDeclKind(const NamedDecl *ShadowedDecl,
return SDK_Using;
else if (isa<TypedefDecl>(ShadowedDecl))
return SDK_Typedef;
+ else if (isa<BindingDecl>(ShadowedDecl))
+ return SDK_StructuredBinding;
else if (isa<RecordDecl>(OldDC))
return isa<FieldDecl>(ShadowedDecl) ? SDK_Field : SDK_StaticMember;
@@ -7539,9 +7638,8 @@ NamedDecl *Sema::getShadowedDeclaration(const VarDecl *D,
return nullptr;
NamedDecl *ShadowedDecl = R.getFoundDecl();
- return isa<VarDecl>(ShadowedDecl) || isa<FieldDecl>(ShadowedDecl)
- ? ShadowedDecl
- : nullptr;
+ return isa<VarDecl, FieldDecl, BindingDecl>(ShadowedDecl) ? ShadowedDecl
+ : nullptr;
}
/// Return the declaration shadowed by the given typedef \p D, or null
@@ -7559,6 +7657,18 @@ NamedDecl *Sema::getShadowedDeclaration(const TypedefNameDecl *D,
return isa<TypedefNameDecl>(ShadowedDecl) ? ShadowedDecl : nullptr;
}
+/// Return the declaration shadowed by the given variable \p D, or null
+/// if it doesn't shadow any declaration or shadowing warnings are disabled.
+NamedDecl *Sema::getShadowedDeclaration(const BindingDecl *D,
+ const LookupResult &R) {
+ if (!shouldWarnIfShadowedDecl(Diags, R))
+ return nullptr;
+
+ NamedDecl *ShadowedDecl = R.getFoundDecl();
+ return isa<VarDecl, FieldDecl, BindingDecl>(ShadowedDecl) ? ShadowedDecl
+ : nullptr;
+}
+
/// Diagnose variable or built-in function shadowing. Implements
/// -Wshadow.
///
@@ -7878,7 +7988,8 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
// OpenCL v1.2 s6.8 - The static qualifier is valid only in program
// scope.
if (getLangOpts().OpenCLVersion == 120 &&
- !getOpenCLOptions().isEnabled("cl_clang_storage_class_specifiers") &&
+ !getOpenCLOptions().isAvailableOption("cl_clang_storage_class_specifiers",
+ getLangOpts()) &&
NewVD->isStaticLocal()) {
Diag(NewVD->getLocation(), diag::err_static_function_scope);
NewVD->setInvalidDecl();
@@ -7886,6 +7997,9 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
}
if (getLangOpts().OpenCL) {
+ if (!diagnoseOpenCLTypes(*this, NewVD))
+ return;
+
// OpenCL v2.0 s6.12.5 - The __block storage type is not supported.
if (NewVD->hasAttr<BlocksAttr>()) {
Diag(NewVD->getLocation(), diag::err_opencl_block_storage_type);
@@ -7907,23 +8021,17 @@ void Sema::CheckVariableDeclarationType(VarDecl *NewVD) {
return;
}
}
- // OpenCL C v1.2 s6.5 - All program scope variables must be declared in the
- // __constant address space.
- // OpenCL C v2.0 s6.5.1 - Variables defined at program scope and static
- // variables inside a function can also be declared in the global
- // address space.
- // C++ for OpenCL inherits rule from OpenCL C v2.0.
+
// FIXME: Adding local AS in C++ for OpenCL might make sense.
if (NewVD->isFileVarDecl() || NewVD->isStaticLocal() ||
NewVD->hasExternalStorage()) {
- if (!T->isSamplerT() &&
- !T->isDependentType() &&
+ if (!T->isSamplerT() && !T->isDependentType() &&
!(T.getAddressSpace() == LangAS::opencl_constant ||
(T.getAddressSpace() == LangAS::opencl_global &&
- (getLangOpts().OpenCLVersion == 200 ||
- getLangOpts().OpenCLCPlusPlus)))) {
+ getOpenCLOptions().areProgramScopeVariablesSupported(
+ getLangOpts())))) {
int Scope = NewVD->isStaticLocal() | NewVD->hasExternalStorage() << 1;
- if (getLangOpts().OpenCLVersion == 200 || getLangOpts().OpenCLCPlusPlus)
+ if (getOpenCLOptions().areProgramScopeVariablesSupported(getLangOpts()))
Diag(NewVD->getLocation(), diag::err_opencl_global_invalid_addr_space)
<< Scope << "global or constant";
else
@@ -8613,7 +8721,10 @@ static bool isOpenCLSizeDependentType(ASTContext &C, QualType Ty) {
}
static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
- if (PT->isPointerType()) {
+ if (PT->isDependentType())
+ return InvalidKernelParam;
+
+ if (PT->isPointerType() || PT->isReferenceType()) {
QualType PointeeType = PT->getPointeeType();
if (PointeeType.getAddressSpace() == LangAS::opencl_generic ||
PointeeType.getAddressSpace() == LangAS::opencl_private ||
@@ -8630,6 +8741,18 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
return PtrPtrKernelParam;
}
+
+ // C++ for OpenCL v1.0 s2.4:
+ // Moreover the types used in parameters of the kernel functions must be:
+ // Standard layout types for pointer parameters. The same applies to
+ // reference if an implementation supports them in kernel parameters.
+ if (S.getLangOpts().OpenCLCPlusPlus &&
+ !S.getOpenCLOptions().isAvailableOption(
+ "__cl_clang_non_portable_kernel_param_types", S.getLangOpts()) &&
+ !PointeeType->isAtomicType() && !PointeeType->isVoidType() &&
+ !PointeeType->isStandardLayoutType())
+ return InvalidKernelParam;
+
return PtrKernelParam;
}
@@ -8650,12 +8773,10 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
// OpenCL extension spec v1.2 s9.5:
// This extension adds support for half scalar and vector types as built-in
// types that can be used for arithmetic operations, conversions etc.
- if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16") && PT->isHalfType())
+ if (!S.getOpenCLOptions().isAvailableOption("cl_khr_fp16", S.getLangOpts()) &&
+ PT->isHalfType())
return InvalidKernelParam;
- if (PT->isRecordType())
- return RecordKernelParam;
-
// Look into an array argument to check if it has a forbidden type.
if (PT->isArrayType()) {
const Type *UnderlyingTy = PT->getPointeeOrArrayElementType();
@@ -8665,6 +8786,19 @@ static OpenCLParamType getOpenCLKernelParameterType(Sema &S, QualType PT) {
return getOpenCLKernelParameterType(S, QualType(UnderlyingTy, 0));
}
+ // C++ for OpenCL v1.0 s2.4:
+ // Moreover the types used in parameters of the kernel functions must be:
+ // Trivial and standard-layout types C++17 [basic.types] (plain old data
+ // types) for parameters passed by value;
+ if (S.getLangOpts().OpenCLCPlusPlus &&
+ !S.getOpenCLOptions().isAvailableOption(
+ "__cl_clang_non_portable_kernel_param_types", S.getLangOpts()) &&
+ !PT->isOpenCLSpecificType() && !PT.isPODType(S.Context))
+ return InvalidKernelParam;
+
+ if (PT->isRecordType())
+ return RecordKernelParam;
+
return ValidKernelParam;
}
@@ -8685,7 +8819,7 @@ static void checkIsValidOpenCLKernelParameter(
// OpenCL v3.0 s6.11.a:
// A kernel function argument cannot be declared as a pointer to a pointer
// type. [...] This restriction only applies to OpenCL C 1.2 or below.
- if (S.getLangOpts().OpenCLVersion < 120 &&
+ if (S.getLangOpts().OpenCLVersion <= 120 &&
!S.getLangOpts().OpenCLCPlusPlus) {
S.Diag(Param->getLocation(), diag::err_opencl_ptrptr_kernel_param);
D.setInvalidType();
@@ -9420,6 +9554,9 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
}
+ if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice))
+ checkDeviceDecl(NewFD, D.getBeginLoc());
+
if (!getLangOpts().CPlusPlus) {
// Perform semantic checking on the function declaration.
if (!NewFD->isInvalidDecl() && NewFD->isMain())
@@ -9654,6 +9791,10 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
(D.getCXXScopeSpec().getScopeRep()->isDependent() ||
(!Previous.empty() && CurContext->isDependentContext()))) {
// ignore these
+ } else if (NewFD->isCPUDispatchMultiVersion() ||
+ NewFD->isCPUSpecificMultiVersion()) {
+ // ignore this, we allow the redeclaration behavior here to create new
+ // versions of the function.
} else {
// The user tried to provide an out-of-line definition for a
// function that is a member of a class or namespace, but there
@@ -9790,7 +9931,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (II && II->isStr(getCudaConfigureFuncName()) &&
!NewFD->isInvalidDecl() &&
NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
- if (!R->getAs<FunctionType>()->getReturnType()->isScalarType())
+ if (!R->castAs<FunctionType>()->getReturnType()->isScalarType())
Diag(NewFD->getLocation(), diag::err_config_scalar_return)
<< getCudaConfigureFuncName();
Context.setcudaConfigureCallDecl(NewFD);
@@ -10927,24 +11068,6 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (!CurContext->isRecord())
CheckCXXDefaultArguments(NewFD);
- // If this function declares a builtin function, check the type of this
- // declaration against the expected type for the builtin.
- if (unsigned BuiltinID = NewFD->getBuiltinID()) {
- ASTContext::GetBuiltinTypeError Error;
- LookupNecessaryTypesForBuiltin(S, BuiltinID);
- QualType T = Context.GetBuiltinType(BuiltinID, Error);
- // If the type of the builtin differs only in its exception
- // specification, that's OK.
- // FIXME: If the types do differ in this way, it would be better to
- // retain the 'noexcept' form of the type.
- if (!T.isNull() &&
- !Context.hasSameFunctionTypeIgnoringExceptionSpec(T,
- NewFD->getType()))
- // The type of this function differs from the type of the builtin,
- // so forget about the builtin entirely.
- Context.BuiltinInfo.forgetBuiltin(BuiltinID, Context.Idents);
- }
-
// If this function is declared as being extern "C", then check to see if
// the function returns a UDT (class, struct, or union type) that is not C
// compatible, and if it does, warn the user.
@@ -11156,6 +11279,25 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
}
}
+static bool isDefaultStdCall(FunctionDecl *FD, Sema &S) {
+
+ // Default calling convention for main and wmain is __cdecl
+ if (FD->getName() == "main" || FD->getName() == "wmain")
+ return false;
+
+ // Default calling convention for MinGW is __cdecl
+ const llvm::Triple &T = S.Context.getTargetInfo().getTriple();
+ if (T.isWindowsGNUEnvironment())
+ return false;
+
+ // Default calling convention for WinMain, wWinMain and DllMain
+ // is __stdcall on 32 bit Windows
+ if (T.isOSWindows() && T.getArch() == llvm::Triple::x86)
+ return true;
+
+ return false;
+}
+
void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
@@ -11170,6 +11312,21 @@ void Sema::CheckMSVCRTEntryPoint(FunctionDecl *FD) {
if (FD->getName() != "DllMain")
FD->setHasImplicitReturnZero(true);
+ // Explicity specified calling conventions are applied to MSVC entry points
+ if (!hasExplicitCallingConv(T)) {
+ if (isDefaultStdCall(FD, *this)) {
+ if (FT->getCallConv() != CC_X86StdCall) {
+ FT = Context.adjustFunctionType(
+ FT, FT->getExtInfo().withCallingConv(CC_X86StdCall));
+ FD->setType(QualType(FT, 0));
+ }
+ } else if (FT->getCallConv() != CC_C) {
+ FT = Context.adjustFunctionType(FT,
+ FT->getExtInfo().withCallingConv(CC_C));
+ FD->setType(QualType(FT, 0));
+ }
+ }
+
if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
Diag(FD->getLocation(), diag::err_mainlike_template_decl) << FD;
FD->setInvalidDecl();
@@ -12071,12 +12228,12 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// If adding the initializer will turn this declaration into a definition,
// and we already have a definition for this variable, diagnose or otherwise
// handle the situation.
- VarDecl *Def;
- if ((Def = VDecl->getDefinition()) && Def != VDecl &&
- (!VDecl->isStaticDataMember() || VDecl->isOutOfLine()) &&
- !VDecl->isThisDeclarationADemotedDefinition() &&
- checkVarDeclRedefinition(Def, VDecl))
- return;
+ if (VarDecl *Def = VDecl->getDefinition())
+ if (Def != VDecl &&
+ (!VDecl->isStaticDataMember() || VDecl->isOutOfLine()) &&
+ !VDecl->isThisDeclarationADemotedDefinition() &&
+ checkVarDeclRedefinition(Def, VDecl))
+ return;
if (getLangOpts().CPlusPlus) {
// C++ [class.static.data]p4
@@ -12190,12 +12347,10 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
// Variables declared within a function/method body (except for references)
// are handled by a dataflow analysis.
// This is undefined behavior in C++, but valid in C.
- if (getLangOpts().CPlusPlus) {
+ if (getLangOpts().CPlusPlus)
if (!VDecl->hasLocalStorage() || VDecl->getType()->isRecordType() ||
- VDecl->getType()->isReferenceType()) {
+ VDecl->getType()->isReferenceType())
CheckSelfReference(*this, RealDecl, Init, DirectInit);
- }
- }
// If the type changed, it means we had an incomplete type that was
// completed by the initializer. For example:
@@ -12441,7 +12596,7 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init, bool DirectInit) {
}
if (LangOpts.OpenMP && VDecl->isFileVarDecl())
- DeclsToCheckForDeferredDiags.push_back(VDecl);
+ DeclsToCheckForDeferredDiags.insert(VDecl);
CheckCompleteVariableDeclaration(VDecl);
}
@@ -12540,9 +12695,21 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (!Var->isInvalidDecl() &&
Var->getType().getAddressSpace() == LangAS::opencl_constant &&
Var->getStorageClass() != SC_Extern && !Var->getInit()) {
- Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
- Var->setInvalidDecl();
- return;
+ bool HasConstExprDefaultConstructor = false;
+ if (CXXRecordDecl *RD = Var->getType()->getAsCXXRecordDecl()) {
+ for (auto *Ctor : RD->ctors()) {
+ if (Ctor->isConstexpr() && Ctor->getNumParams() == 0 &&
+ Ctor->getMethodQualifiers().getAddressSpace() ==
+ LangAS::opencl_constant) {
+ HasConstExprDefaultConstructor = true;
+ }
+ }
+ }
+ if (!HasConstExprDefaultConstructor) {
+ Diag(Var->getLocation(), diag::err_opencl_constant_no_init);
+ Var->setInvalidDecl();
+ return;
+ }
}
if (!Var->isInvalidDecl() && RealDecl->hasAttr<LoaderUninitializedAttr>()) {
@@ -12564,6 +12731,8 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
return;
}
}
+ // The declaration is unitialized, no need for further checks.
+ return;
}
VarDecl::DefinitionKind DefKind = Var->isThisDeclarationADefinition();
@@ -12608,7 +12777,7 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
Diag(Var->getLocation(), diag::note_private_extern);
}
- if (Context.getTargetInfo().allowDebugInfoForExternalVar() &&
+ if (Context.getTargetInfo().allowDebugInfoForExternalRef() &&
!Var->isInvalidDecl() && !getLangOpts().CPlusPlus)
ExternalDeclarations.push_back(Var);
@@ -12845,6 +13014,8 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
if (var->isInvalidDecl()) return;
+ MaybeAddCUDAConstantAttr(var);
+
if (getLangOpts().OpenCL) {
// OpenCL v2.0 s6.12.5 - Every block variable declaration must have an
// initialiser
@@ -12935,43 +13106,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
}
- // Apply section attributes and pragmas to global variables.
- bool GlobalStorage = var->hasGlobalStorage();
- if (GlobalStorage && var->isThisDeclarationADefinition() &&
- !inTemplateInstantiation()) {
- PragmaStack<StringLiteral *> *Stack = nullptr;
- int SectionFlags = ASTContext::PSF_Read;
- if (var->getType().isConstQualified())
- Stack = &ConstSegStack;
- else if (!var->getInit()) {
- Stack = &BSSSegStack;
- SectionFlags |= ASTContext::PSF_Write;
- } else {
- Stack = &DataSegStack;
- SectionFlags |= ASTContext::PSF_Write;
- }
- if (const SectionAttr *SA = var->getAttr<SectionAttr>()) {
- if (SA->getSyntax() == AttributeCommonInfo::AS_Declspec)
- SectionFlags |= ASTContext::PSF_Implicit;
- UnifySection(SA->getName(), SectionFlags, var);
- } else if (Stack->CurrentValue) {
- SectionFlags |= ASTContext::PSF_Implicit;
- auto SectionName = Stack->CurrentValue->getString();
- var->addAttr(SectionAttr::CreateImplicit(
- Context, SectionName, Stack->CurrentPragmaLocation,
- AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
- if (UnifySection(SectionName, SectionFlags, var))
- var->dropAttr<SectionAttr>();
- }
-
- // Apply the init_seg attribute if this has an initializer. If the
- // initializer turns out to not be dynamic, we'll end up ignoring this
- // attribute.
- if (CurInitSeg && var->getInit())
- var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
- CurInitSegLoc,
- AttributeCommonInfo::AS_Pragma));
- }
if (!var->getType()->isStructureType() && var->hasInit() &&
isa<InitListExpr>(var->getInit())) {
@@ -13021,14 +13155,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
}
- // All the following checks are C++ only.
- if (!getLangOpts().CPlusPlus) {
- // If this variable must be emitted, add it as an initializer for the
- // current module.
- if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
- Context.addModuleInitializer(ModuleScopes.back().Module, var);
- return;
- }
QualType type = var->getType();
@@ -13036,11 +13162,14 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
getCurFunction()->addByrefBlockVar(var);
Expr *Init = var->getInit();
+ bool GlobalStorage = var->hasGlobalStorage();
bool IsGlobal = GlobalStorage && !var->isStaticLocal();
QualType baseType = Context.getBaseElementType(type);
+ bool HasConstInit = true;
// Check whether the initializer is sufficiently constant.
- if (!type->isDependentType() && Init && !Init->isValueDependent() &&
+ if (getLangOpts().CPlusPlus && !type->isDependentType() && Init &&
+ !Init->isValueDependent() &&
(GlobalStorage || var->isConstexpr() ||
var->mightBeUsableInConstantExpressions(Context))) {
// If this variable might have a constant initializer or might be usable in
@@ -13048,7 +13177,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
// do this lazily, because the result might depend on things that change
// later, such as which constexpr functions happen to be defined.
SmallVector<PartialDiagnosticAt, 8> Notes;
- bool HasConstInit;
if (!getLangOpts().CPlusPlus11) {
// Prior to C++11, in contexts where a constant initializer is required,
// the set of valid constant initializers is described by syntactic rules
@@ -13113,6 +13241,57 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
}
}
+ // Apply section attributes and pragmas to global variables.
+ if (GlobalStorage && var->isThisDeclarationADefinition() &&
+ !inTemplateInstantiation()) {
+ PragmaStack<StringLiteral *> *Stack = nullptr;
+ int SectionFlags = ASTContext::PSF_Read;
+ if (var->getType().isConstQualified()) {
+ if (HasConstInit)
+ Stack = &ConstSegStack;
+ else {
+ Stack = &BSSSegStack;
+ SectionFlags |= ASTContext::PSF_Write;
+ }
+ } else if (var->hasInit() && HasConstInit) {
+ Stack = &DataSegStack;
+ SectionFlags |= ASTContext::PSF_Write;
+ } else {
+ Stack = &BSSSegStack;
+ SectionFlags |= ASTContext::PSF_Write;
+ }
+ if (const SectionAttr *SA = var->getAttr<SectionAttr>()) {
+ if (SA->getSyntax() == AttributeCommonInfo::AS_Declspec)
+ SectionFlags |= ASTContext::PSF_Implicit;
+ UnifySection(SA->getName(), SectionFlags, var);
+ } else if (Stack->CurrentValue) {
+ SectionFlags |= ASTContext::PSF_Implicit;
+ auto SectionName = Stack->CurrentValue->getString();
+ var->addAttr(SectionAttr::CreateImplicit(
+ Context, SectionName, Stack->CurrentPragmaLocation,
+ AttributeCommonInfo::AS_Pragma, SectionAttr::Declspec_allocate));
+ if (UnifySection(SectionName, SectionFlags, var))
+ var->dropAttr<SectionAttr>();
+ }
+
+ // Apply the init_seg attribute if this has an initializer. If the
+ // initializer turns out to not be dynamic, we'll end up ignoring this
+ // attribute.
+ if (CurInitSeg && var->getInit())
+ var->addAttr(InitSegAttr::CreateImplicit(Context, CurInitSeg->getString(),
+ CurInitSegLoc,
+ AttributeCommonInfo::AS_Pragma));
+ }
+
+ // All the following checks are C++ only.
+ if (!getLangOpts().CPlusPlus) {
+ // If this variable must be emitted, add it as an initializer for the
+ // current module.
+ if (Context.DeclMustBeEmitted(var) && !ModuleScopes.empty())
+ Context.addModuleInitializer(ModuleScopes.back().Module, var);
+ return;
+ }
+
// Require the destructor.
if (!type->isDependentType())
if (const RecordType *recordType = baseType->getAs<RecordType>())
@@ -13128,16 +13307,6 @@ void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
CheckCompleteDecompositionDeclaration(DD);
}
-/// Determines if a variable's alignment is dependent.
-static bool hasDependentAlignment(VarDecl *VD) {
- if (VD->getType()->isDependentType())
- return true;
- for (auto *I : VD->specific_attrs<AlignedAttr>())
- if (I->isAlignmentDependent())
- return true;
- return false;
-}
-
/// Check if VD needs to be dllexport/dllimport due to being in a
/// dllexport/import function.
void Sema::CheckStaticLocalForDllExport(VarDecl *VD) {
@@ -13226,8 +13395,7 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (unsigned MaxAlign = Context.getTargetInfo().getMaxTLSAlign()) {
// Protect the check so that it's not performed on dependent types and
// dependent alignments (we can't determine the alignment in that case).
- if (VD->getTLSKind() && !hasDependentAlignment(VD) &&
- !VD->isInvalidDecl()) {
+ if (VD->getTLSKind() && !VD->hasDependentAlignment()) {
CharUnits MaxAlignChars = Context.toCharUnitsFromBits(MaxAlign);
if (Context.getDeclAlign(VD) > MaxAlignChars) {
Diag(VD->getLocation(), diag::err_tls_var_aligned_over_maximum)
@@ -13291,10 +13459,18 @@ void Sema::FinalizeDeclaration(Decl *ThisDecl) {
if (UsedAttr *Attr = VD->getAttr<UsedAttr>()) {
if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
- Diag(Attr->getLocation(), diag::warn_attribute_ignored) << Attr;
+ Diag(Attr->getLocation(), diag::warn_attribute_ignored_on_non_definition)
+ << Attr;
VD->dropAttr<UsedAttr>();
}
}
+ if (RetainAttr *Attr = VD->getAttr<RetainAttr>()) {
+ if (!Attr->isInherited() && !VD->isThisDeclarationADefinition()) {
+ Diag(Attr->getLocation(), diag::warn_attribute_ignored_on_non_definition)
+ << Attr;
+ VD->dropAttr<RetainAttr>();
+ }
+ }
const DeclContext *DC = VD->getDeclContext();
// If there's a #pragma GCC visibility in scope, and this isn't a class
@@ -13869,7 +14045,7 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
// Don't warn about 'main'.
if (isa<TranslationUnitDecl>(FD->getDeclContext()->getRedeclContext()))
if (IdentifierInfo *II = FD->getIdentifier())
- if (II->isStr("main"))
+ if (II->isStr("main") || II->isStr("efi_main"))
return false;
// Don't warn about inline functions.
@@ -14653,7 +14829,7 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
auto ES = getEmissionStatus(FD);
if (ES == Sema::FunctionEmissionStatus::Emitted ||
ES == Sema::FunctionEmissionStatus::Unknown)
- DeclsToCheckForDeferredDiags.push_back(FD);
+ DeclsToCheckForDeferredDiags.insert(FD);
}
return dcl;
@@ -15796,7 +15972,7 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
Diag(KWLoc, diag::err_using_decl_conflict_reverse);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
- Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl)
+ Diag(Shadow->getIntroducer()->getLocation(), diag::note_using_decl)
<< 0;
// Recover by ignoring the old declaration.
Previous.clear();
@@ -16328,6 +16504,7 @@ Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
+ bool IsAbstract,
SourceLocation LBraceLoc) {
AdjustDeclIfTemplate(TagD);
CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
@@ -16337,11 +16514,14 @@ void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
if (!Record->getIdentifier())
return;
- if (FinalLoc.isValid())
+ if (IsAbstract)
+ Record->markAbstract();
+
+ if (FinalLoc.isValid()) {
Record->addAttr(FinalAttr::Create(
Context, FinalLoc, AttributeCommonInfo::AS_Keyword,
static_cast<FinalAttr::Spelling>(IsFinalSpelledSealed)));
-
+ }
// C++ [class]p2:
// [...] The class-name is also inserted into the scope of the
// class itself; this is known as the injected-class-name. For
@@ -16475,16 +16655,16 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
if (Value.isSigned() && Value.isNegative()) {
if (FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
- << FieldName << Value.toString(10);
+ << FieldName << toString(Value, 10);
return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
- << Value.toString(10);
+ << toString(Value, 10);
}
// The size of the bit-field must not exceed our maximum permitted object
// size.
if (Value.getActiveBits() > ConstantArrayType::getMaxSizeBits(Context)) {
return Diag(FieldLoc, diag::err_bitfield_too_wide)
- << !FieldName << FieldName << Value.toString(10);
+ << !FieldName << FieldName << toString(Value, 10);
}
if (!FieldTy->isDependentType()) {
@@ -16502,14 +16682,9 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
if (CStdConstraintViolation || MSBitfieldViolation) {
unsigned DiagWidth =
CStdConstraintViolation ? TypeWidth : TypeStorageSize;
- if (FieldName)
- return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
- << FieldName << Value.toString(10)
- << !CStdConstraintViolation << DiagWidth;
-
- return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_width)
- << Value.toString(10) << !CStdConstraintViolation
- << DiagWidth;
+ return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_width)
+ << (bool)FieldName << FieldName << toString(Value, 10)
+ << !CStdConstraintViolation << DiagWidth;
}
// Warn on types where the user might conceivably expect to get all
@@ -16517,7 +16692,7 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
// 'bool'.
if (BitfieldIsOverwide && !FieldTy->isBooleanType() && FieldName) {
Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_width)
- << FieldName << Value.toString(10)
+ << FieldName << toString(Value, 10)
<< (unsigned)TypeWidth;
}
}
@@ -16695,8 +16870,10 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
Record->setInvalidDecl();
InvalidDecl = true;
}
- // OpenCL v1.2 s6.9.c: bitfields are not supported.
- if (BitWidth) {
+ // OpenCL v1.2 s6.9.c: bitfields are not supported, unless Clang extension
+ // is enabled.
+ if (BitWidth && !getOpenCLOptions().isAvailableOption(
+ "__cl_clang_bitfields", LangOpts)) {
Diag(Loc, diag::err_opencl_bitfields);
InvalidDecl = true;
}
@@ -16713,7 +16890,7 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
// than a variably modified type.
if (!InvalidDecl && T->isVariablyModifiedType()) {
if (!tryToFixVariablyModifiedVarType(
- *this, TInfo, T, Loc, diag::err_typecheck_field_variable_size))
+ TInfo, T, Loc, diag::err_typecheck_field_variable_size))
InvalidDecl = true;
}
@@ -16941,7 +17118,7 @@ Decl *Sema::ActOnIvar(Scope *S,
// than a variably modified type.
else if (T->isVariablyModifiedType()) {
if (!tryToFixVariablyModifiedVarType(
- *this, TInfo, T, Loc, diag::err_typecheck_ivar_variable_size))
+ TInfo, T, Loc, diag::err_typecheck_ivar_variable_size))
D.setInvalidType();
}
@@ -17627,7 +17804,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
// Complain if the value is not representable in an int.
if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
Diag(IdLoc, diag::ext_enum_value_not_int)
- << EnumVal.toString(10) << Val->getSourceRange()
+ << toString(EnumVal, 10) << Val->getSourceRange()
<< (EnumVal.isUnsigned() || EnumVal.isNonNegative());
else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
// Force the type of the expression to 'int'.
@@ -17685,11 +17862,11 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (Enum->isFixed())
// When the underlying type is fixed, this is ill-formed.
Diag(IdLoc, diag::err_enumerator_wrapped)
- << EnumVal.toString(10)
+ << toString(EnumVal, 10)
<< EltTy;
else
Diag(IdLoc, diag::ext_enumerator_increment_too_large)
- << EnumVal.toString(10);
+ << toString(EnumVal, 10);
} else {
EltTy = T;
}
@@ -17713,7 +17890,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
// Enforce C99 6.7.2.2p2 even when we compute the next value.
Diag(IdLoc, diag::ext_enum_value_not_int)
- << EnumVal.toString(10) << 1;
+ << toString(EnumVal, 10) << 1;
}
}
}
@@ -17967,14 +18144,14 @@ static void CheckForDuplicateEnumValues(Sema &S, ArrayRef<Decl *> Elements,
// Emit warning for one enum constant.
auto *FirstECD = Vec->front();
S.Diag(FirstECD->getLocation(), diag::warn_duplicate_enum_values)
- << FirstECD << FirstECD->getInitVal().toString(10)
+ << FirstECD << toString(FirstECD->getInitVal(), 10)
<< FirstECD->getSourceRange();
// Emit one note for each of the remaining enum constants with
// the same value.
for (auto *ECD : llvm::make_range(Vec->begin() + 1, Vec->end()))
S.Diag(ECD->getLocation(), diag::note_duplicate_element)
- << ECD << ECD->getInitVal().toString(10)
+ << ECD << toString(ECD->getInitVal(), 10)
<< ECD->getSourceRange();
}
}
@@ -18209,7 +18386,7 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
!Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
ECD->setInitExpr(ImplicitCastExpr::Create(
Context, NewTy, CK_IntegralCast, ECD->getInitExpr(),
- /*base paths*/ nullptr, VK_RValue, FPOptionsOverride()));
+ /*base paths*/ nullptr, VK_PRValue, FPOptionsOverride()));
if (getLangOpts().CPlusPlus)
// C++ [dcl.enum]p4: Following the closing brace of an
// enum-specifier, each enumerator has the type of its
@@ -18320,6 +18497,8 @@ Decl *Sema::getObjCDeclContext() const {
Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
bool Final) {
+ assert(FD && "Expected non-null FunctionDecl");
+
// SYCL functions can be template, so we check if they have appropriate
// attribute prior to checking if it is a template.
if (LangOpts.SYCLIsDevice && FD->hasAttr<SYCLKernelAttr>())
@@ -18329,42 +18508,51 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
if (FD->isDependentContext())
return FunctionEmissionStatus::TemplateDiscarded;
- FunctionEmissionStatus OMPES = FunctionEmissionStatus::Unknown;
+ // Check whether this function is an externally visible definition.
+ auto IsEmittedForExternalSymbol = [this, FD]() {
+ // We have to check the GVA linkage of the function's *definition* -- if we
+ // only have a declaration, we don't know whether or not the function will
+ // be emitted, because (say) the definition could include "inline".
+ FunctionDecl *Def = FD->getDefinition();
+
+ return Def && !isDiscardableGVALinkage(
+ getASTContext().GetGVALinkageForFunction(Def));
+ };
+
if (LangOpts.OpenMPIsDevice) {
+ // In OpenMP device mode we will not emit host only functions, or functions
+ // we don't need due to their linkage.
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
- if (DevTy.hasValue()) {
+ // DevTy may be changed later by
+ // #pragma omp declare target to(*) device_type(*).
+ // Therefore DevTy having no value does not imply host. The emission status
+ // will be checked again at the end of compilation unit with Final = true.
+ if (DevTy.hasValue())
if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
- OMPES = FunctionEmissionStatus::OMPDiscarded;
- else if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost ||
- *DevTy == OMPDeclareTargetDeclAttr::DT_Any) {
- OMPES = FunctionEmissionStatus::Emitted;
- }
- }
- } else if (LangOpts.OpenMP) {
- // In OpenMP 4.5 all the functions are host functions.
- if (LangOpts.OpenMP <= 45) {
- OMPES = FunctionEmissionStatus::Emitted;
- } else {
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
- // In OpenMP 5.0 or above, DevTy may be changed later by
- // #pragma omp declare target to(*) device_type(*). Therefore DevTy
- // having no value does not imply host. The emission status will be
- // checked again at the end of compilation unit.
- if (DevTy.hasValue()) {
- if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
- OMPES = FunctionEmissionStatus::OMPDiscarded;
- } else if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host ||
- *DevTy == OMPDeclareTargetDeclAttr::DT_Any)
- OMPES = FunctionEmissionStatus::Emitted;
- } else if (Final)
- OMPES = FunctionEmissionStatus::Emitted;
- }
- }
- if (OMPES == FunctionEmissionStatus::OMPDiscarded ||
- (OMPES == FunctionEmissionStatus::Emitted && !LangOpts.CUDA))
- return OMPES;
+ return FunctionEmissionStatus::OMPDiscarded;
+ // If we have an explicit value for the device type, or we are in a target
+ // declare context, we need to emit all extern and used symbols.
+ if (isInOpenMPDeclareTargetContext() || DevTy.hasValue())
+ if (IsEmittedForExternalSymbol())
+ return FunctionEmissionStatus::Emitted;
+ // Device mode only emits what it must, if it wasn't tagged yet and needed,
+ // we'll omit it.
+ if (Final)
+ return FunctionEmissionStatus::OMPDiscarded;
+ } else if (LangOpts.OpenMP > 45) {
+ // In OpenMP host compilation prior to 5.0 everything was an emitted host
+ // function. In 5.0, no_host was introduced which might cause a function to
+ // be ommitted.
+ Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
+ OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
+ if (DevTy.hasValue())
+ if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
+ return FunctionEmissionStatus::OMPDiscarded;
+ }
+
+ if (Final && LangOpts.OpenMP && !LangOpts.CUDA)
+ return FunctionEmissionStatus::Emitted;
if (LangOpts.CUDA) {
// When compiling for device, host functions are never emitted. Similarly,
@@ -18378,17 +18566,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
(T == Sema::CFT_Device || T == Sema::CFT_Global))
return FunctionEmissionStatus::CUDADiscarded;
- // Check whether this function is externally visible -- if so, it's
- // known-emitted.
- //
- // We have to check the GVA linkage of the function's *definition* -- if we
- // only have a declaration, we don't know whether or not the function will
- // be emitted, because (say) the definition could include "inline".
- FunctionDecl *Def = FD->getDefinition();
-
- if (Def &&
- !isDiscardableGVALinkage(getASTContext().GetGVALinkageForFunction(Def))
- && (!LangOpts.OpenMP || OMPES == FunctionEmissionStatus::Emitted))
+ if (IsEmittedForExternalSymbol())
return FunctionEmissionStatus::Emitted;
}
diff --git a/clang/lib/Sema/SemaDeclAttr.cpp b/clang/lib/Sema/SemaDeclAttr.cpp
index 30d08b3d4ac0..bb4ce8d4962e 100644
--- a/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/clang/lib/Sema/SemaDeclAttr.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/DarwinSDKInfo.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -40,6 +41,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Assumptions.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Support/Error.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
@@ -153,7 +156,8 @@ static bool isInstanceMethod(const Decl *D) {
return false;
}
-static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+static inline bool isNSStringType(QualType T, ASTContext &Ctx,
+ bool AllowNSAttributedString = false) {
const auto *PT = T->getAs<ObjCObjectPointerType>();
if (!PT)
return false;
@@ -164,6 +168,9 @@ static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
IdentifierInfo* ClsName = Cls->getIdentifier();
+ if (AllowNSAttributedString &&
+ ClsName == &Ctx.Idents.get("NSAttributedString"))
+ return true;
// FIXME: Should we walk the chain of classes?
return ClsName == &Ctx.Idents.get("NSString") ||
ClsName == &Ctx.Idents.get("NSMutableString");
@@ -190,44 +197,6 @@ static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
return AL.getNumArgs() + AL.hasParsedType();
}
-template <typename Compare>
-static bool checkAttributeNumArgsImpl(Sema &S, const ParsedAttr &AL,
- unsigned Num, unsigned Diag,
- Compare Comp) {
- if (Comp(getNumAttributeArgs(AL), Num)) {
- S.Diag(AL.getLoc(), Diag) << AL << Num;
- return false;
- }
-
- return true;
-}
-
-/// Check if the attribute has exactly as many args as Num. May
-/// output an error.
-static bool checkAttributeNumArgs(Sema &S, const ParsedAttr &AL, unsigned Num) {
- return checkAttributeNumArgsImpl(S, AL, Num,
- diag::err_attribute_wrong_number_arguments,
- std::not_equal_to<unsigned>());
-}
-
-/// Check if the attribute has at least as many args as Num. May
-/// output an error.
-static bool checkAttributeAtLeastNumArgs(Sema &S, const ParsedAttr &AL,
- unsigned Num) {
- return checkAttributeNumArgsImpl(S, AL, Num,
- diag::err_attribute_too_few_arguments,
- std::less<unsigned>());
-}
-
-/// Check if the attribute has at most as many args as Num. May
-/// output an error.
-static bool checkAttributeAtMostNumArgs(Sema &S, const ParsedAttr &AL,
- unsigned Num) {
- return checkAttributeNumArgsImpl(S, AL, Num,
- diag::err_attribute_too_many_arguments,
- std::greater<unsigned>());
-}
-
/// A helper function to provide Attribute Location for the Attr types
/// AND the ParsedAttr.
template <typename AttrInfo>
@@ -261,7 +230,7 @@ static bool checkUInt32Argument(Sema &S, const AttrInfo &AI, const Expr *Expr,
if (!I->isIntN(32)) {
S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
- << I->toString(10, false) << 32 << /* Unsigned */ 1;
+ << toString(*I, 10, false) << 32 << /* Unsigned */ 1;
return false;
}
@@ -289,7 +258,7 @@ static bool checkPositiveIntArgument(Sema &S, const AttrInfo &AI, const Expr *Ex
llvm::APSInt I(32); // for toString
I = UVal;
S.Diag(Expr->getExprLoc(), diag::err_ice_too_large)
- << I.toString(10, false) << 32 << /* Unsigned */ 0;
+ << toString(I, 10, false) << 32 << /* Unsigned */ 0;
return false;
}
@@ -423,10 +392,10 @@ appendDiagnostics(const Sema::SemaDiagnosticBuilder &Bldr, T &&ExtraArg,
std::forward<DiagnosticArgs>(ExtraArgs)...);
}
-/// Add an attribute {@code AttrType} to declaration {@code D}, provided that
-/// {@code PassesCheck} is true.
-/// Otherwise, emit diagnostic {@code DiagID}, passing in all parameters
-/// specified in {@code ExtraArgs}.
+/// Add an attribute @c AttrType to declaration @c D, provided that
+/// @c PassesCheck is true.
+/// Otherwise, emit diagnostic @c DiagID, passing in all parameters
+/// specified in @c ExtraArgs.
template <typename AttrType, typename... DiagnosticArgs>
static void handleSimpleAttributeOrDiagnose(Sema &S, Decl *D,
const AttributeCommonInfo &CI,
@@ -440,24 +409,6 @@ static void handleSimpleAttributeOrDiagnose(Sema &S, Decl *D,
handleSimpleAttribute<AttrType>(S, D, CI);
}
-template <typename AttrType>
-static void handleSimpleAttributeWithExclusions(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- handleSimpleAttribute<AttrType>(S, D, AL);
-}
-
-/// Applies the given attribute to the Decl so long as the Decl doesn't
-/// already have one of the given incompatible attributes.
-template <typename AttrType, typename IncompatibleAttrType,
- typename... IncompatibleAttrTypes>
-static void handleSimpleAttributeWithExclusions(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<IncompatibleAttrType>(S, D, AL))
- return;
- handleSimpleAttributeWithExclusions<AttrType, IncompatibleAttrTypes...>(S, D,
- AL);
-}
-
/// Check if the passed-in expression is of type int or bool.
static bool isIntOrBool(Expr *Exp) {
QualType QT = Exp->getType();
@@ -546,16 +497,9 @@ static bool checkRecordDeclForAttr(const RecordDecl *RD) {
// Else check if any base classes have the attribute.
if (const auto *CRD = dyn_cast<CXXRecordDecl>(RD)) {
- CXXBasePaths BPaths(false, false);
- if (CRD->lookupInBases(
- [](const CXXBaseSpecifier *BS, CXXBasePath &) {
- const auto &Ty = *BS->getType();
- // If it's type-dependent, we assume it could have the attribute.
- if (Ty.isDependentType())
- return true;
- return Ty.castAs<RecordType>()->getDecl()->hasAttr<AttrType>();
- },
- BPaths, true))
+ if (!CRD->forallBases([](const CXXRecordDecl *Base) {
+ return !Base->hasAttr<AttrType>();
+ }))
return true;
}
return false;
@@ -770,7 +714,7 @@ static void handlePtGuardedByAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static bool checkAcquireOrderAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return false;
// Check that this attribute only applies to lockable types.
@@ -846,32 +790,33 @@ static void handleAssertExclusiveLockAttr(Sema &S, Decl *D,
///
/// AttrArgNo is used to actually retrieve the argument, so it's base-0.
template <typename AttrInfo>
-static bool checkParamIsIntegerType(Sema &S, const FunctionDecl *FD,
- const AttrInfo &AI, unsigned AttrArgNo) {
+static bool checkParamIsIntegerType(Sema &S, const Decl *D, const AttrInfo &AI,
+ unsigned AttrArgNo) {
assert(AI.isArgExpr(AttrArgNo) && "Expected expression argument");
Expr *AttrArg = AI.getArgAsExpr(AttrArgNo);
ParamIdx Idx;
- if (!checkFunctionOrMethodParameterIndex(S, FD, AI, AttrArgNo + 1, AttrArg,
+ if (!checkFunctionOrMethodParameterIndex(S, D, AI, AttrArgNo + 1, AttrArg,
Idx))
return false;
- const ParmVarDecl *Param = FD->getParamDecl(Idx.getASTIndex());
- if (!Param->getType()->isIntegerType() && !Param->getType()->isCharType()) {
+ QualType ParamTy = getFunctionOrMethodParamType(D, Idx.getASTIndex());
+ if (!ParamTy->isIntegerType() && !ParamTy->isCharType()) {
SourceLocation SrcLoc = AttrArg->getBeginLoc();
S.Diag(SrcLoc, diag::err_attribute_integers_only)
- << AI << Param->getSourceRange();
+ << AI << getFunctionOrMethodParamRange(D, Idx.getASTIndex());
return false;
}
return true;
}
static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
- !checkAttributeAtMostNumArgs(S, AL, 2))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
return;
- const auto *FD = cast<FunctionDecl>(D);
- if (!FD->getReturnType()->isPointerType()) {
+ assert(isFunctionOrMethod(D) && hasFunctionProto(D));
+
+ QualType RetTy = getFunctionOrMethodResultType(D);
+ if (!RetTy->isPointerType()) {
S.Diag(AL.getLoc(), diag::warn_attribute_return_pointers_only) << AL;
return;
}
@@ -881,7 +826,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Parameter indices are 1-indexed, hence Index=1
if (!checkPositiveIntArgument(S, AL, SizeExpr, SizeArgNoVal, /*Idx=*/1))
return;
- if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/0))
+ if (!checkParamIsIntegerType(S, D, AL, /*AttrArgNo=*/0))
return;
ParamIdx SizeArgNo(SizeArgNoVal, D);
@@ -892,7 +837,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Parameter indices are 1-based, hence Index=2
if (!checkPositiveIntArgument(S, AL, NumberExpr, Val, /*Idx=*/2))
return;
- if (!checkParamIsIntegerType(S, FD, AL, /*AttrArgNo=*/1))
+ if (!checkParamIsIntegerType(S, D, AL, /*AttrArgNo=*/1))
return;
NumberArgNo = ParamIdx(Val, D);
}
@@ -903,7 +848,7 @@ static void handleAllocSizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
static bool checkTryLockFunAttrCommon(Sema &S, Decl *D, const ParsedAttr &AL,
SmallVectorImpl<Expr *> &Args) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return false;
if (!isIntOrBool(AL.getArgAsExpr(0))) {
@@ -950,7 +895,7 @@ static void handleLockReturnedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleLocksExcludedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
// check that all arguments are lockable objects
@@ -1192,7 +1137,7 @@ static bool checkForConsumableClass(Sema &S, const CXXMethodDecl *MD,
}
static void handleCallableWhenAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
if (!checkForConsumableClass(S, cast<CXXMethodDecl>(D), AL))
@@ -1991,6 +1936,12 @@ static void handleTLSModelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
+ if (S.Context.getTargetInfo().getTriple().isOSAIX() &&
+ Model != "global-dynamic") {
+ S.Diag(LiteralLoc, diag::err_aix_attr_unsupported_tls_model) << Model;
+ return;
+ }
+
D->addAttr(::new (S.Context) TLSModelAttr(S.Context, AL, Model));
}
@@ -2015,7 +1966,7 @@ static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
SmallVector<IdentifierInfo *, 8> CPUs;
@@ -2062,8 +2013,7 @@ static void handleCommonAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (CommonAttr *CA = S.mergeCommonAttr(D, AL))
- D->addAttr(CA);
+ D->addAttr(::new (S.Context) CommonAttr(S.Context, AL));
}
static void handleCmseNSEntryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -2082,9 +2032,6 @@ static void handleCmseNSEntryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<DisableTailCallsAttr>(S, D, AL))
- return;
-
if (AL.isDeclspecAttribute()) {
const auto &Triple = S.getASTContext().getTargetInfo().getTriple();
const auto &Arch = Triple.getArch();
@@ -2119,7 +2066,7 @@ static void handleNoCfCheckAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
}
bool Sema::CheckAttrNoArgs(const ParsedAttr &Attrs) {
- if (!checkAttributeNumArgs(*this, Attrs, 0)) {
+ if (!Attrs.checkExactlyNumArgs(*this, 0)) {
Attrs.setInvalid();
return true;
}
@@ -2147,7 +2094,7 @@ static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ValueDecl *VD = dyn_cast<ValueDecl>(D);
if (!VD || (!VD->getType()->isBlockPointerType() &&
!VD->getType()->isFunctionPointerType())) {
- S.Diag(AL.getLoc(), AL.isCXX11Attribute()
+ S.Diag(AL.getLoc(), AL.isStandardAttributeSyntax()
? diag::err_attribute_wrong_decl_type
: diag::warn_attribute_wrong_decl_type)
<< AL << ExpectedFunctionMethodOrBlock;
@@ -2354,6 +2301,7 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
case AMK_Override:
case AMK_ProtocolImplementation:
+ case AMK_OptionalProtocolImplementation:
OverrideOrImpl = true;
break;
}
@@ -2422,6 +2370,14 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
diag::warn_mismatched_availability_override_unavail)
<< AvailabilityAttr::getPrettyPlatformName(Platform->getName())
<< (AMK == AMK_Override);
+ } else if (Which != 1 && AMK == AMK_OptionalProtocolImplementation) {
+ // Allow different 'introduced' / 'obsoleted' availability versions
+ // on a method that implements an optional protocol requirement. It
+ // makes less sense to allow this for 'deprecated' as the user can't
+ // see if the method is 'deprecated' as 'respondsToSelector' will
+ // still return true when the method is deprecated.
+ ++i;
+ continue;
} else {
Diag(OldAA->getLocation(),
diag::warn_mismatched_availability_override)
@@ -2491,7 +2447,14 @@ AvailabilityAttr *Sema::mergeAvailabilityAttr(
}
static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeNumArgs(S, AL, 1))
+ if (isa<UsingDecl, UnresolvedUsingTypenameDecl, UnresolvedUsingValueDecl>(
+ D)) {
+ S.Diag(AL.getRange().getBegin(), diag::warn_deprecated_ignored_on_using)
+ << AL;
+ return;
+ }
+
+ if (!AL.checkExactlyNumArgs(S, 1))
return;
IdentifierLoc *Platform = AL.getArgAsIdent(0);
@@ -2594,15 +2557,90 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (NewAttr)
D->addAttr(NewAttr);
}
+ } else if (S.Context.getTargetInfo().getTriple().getOS() ==
+ llvm::Triple::IOS &&
+ S.Context.getTargetInfo().getTriple().isMacCatalystEnvironment()) {
+ auto GetSDKInfo = [&]() {
+ return S.getDarwinSDKInfoForAvailabilityChecking(AL.getRange().getBegin(),
+ "macOS");
+ };
+
+ // Transcribe "ios" to "maccatalyst" (and add a new attribute).
+ IdentifierInfo *NewII = nullptr;
+ if (II->getName() == "ios")
+ NewII = &S.Context.Idents.get("maccatalyst");
+ else if (II->getName() == "ios_app_extension")
+ NewII = &S.Context.Idents.get("maccatalyst_app_extension");
+ if (NewII) {
+ auto MinMacCatalystVersion = [](const VersionTuple &V) {
+ if (V.empty())
+ return V;
+ if (V.getMajor() < 13 ||
+ (V.getMajor() == 13 && V.getMinor() && *V.getMinor() < 1))
+ return VersionTuple(13, 1); // The min Mac Catalyst version is 13.1.
+ return V;
+ };
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL.getRange(), NewII, true /*Implicit*/,
+ MinMacCatalystVersion(Introduced.Version),
+ MinMacCatalystVersion(Deprecated.Version),
+ MinMacCatalystVersion(Obsoleted.Version), IsUnavailable, Str,
+ IsStrict, Replacement, Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ } else if (II->getName() == "macos" && GetSDKInfo() &&
+ (!Introduced.Version.empty() || !Deprecated.Version.empty() ||
+ !Obsoleted.Version.empty())) {
+ if (const auto *MacOStoMacCatalystMapping =
+ GetSDKInfo()->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ // Infer Mac Catalyst availability from the macOS availability attribute
+ // if it has versioned availability. Don't infer 'unavailable'. This
+ // inferred availability has lower priority than the other availability
+ // attributes that are inferred from 'ios'.
+ NewII = &S.Context.Idents.get("maccatalyst");
+ auto RemapMacOSVersion =
+ [&](const VersionTuple &V) -> Optional<VersionTuple> {
+ if (V.empty())
+ return None;
+ // API_TO_BE_DEPRECATED is 100000.
+ if (V.getMajor() == 100000)
+ return VersionTuple(100000);
+ // The minimum iosmac version is 13.1
+ return MacOStoMacCatalystMapping->map(V, VersionTuple(13, 1), None);
+ };
+ Optional<VersionTuple> NewIntroduced =
+ RemapMacOSVersion(Introduced.Version),
+ NewDeprecated =
+ RemapMacOSVersion(Deprecated.Version),
+ NewObsoleted =
+ RemapMacOSVersion(Obsoleted.Version);
+ if (NewIntroduced || NewDeprecated || NewObsoleted) {
+ auto VersionOrEmptyVersion =
+ [](const Optional<VersionTuple> &V) -> VersionTuple {
+ return V ? *V : VersionTuple();
+ };
+ AvailabilityAttr *NewAttr = S.mergeAvailabilityAttr(
+ ND, AL.getRange(), NewII, true /*Implicit*/,
+ VersionOrEmptyVersion(NewIntroduced),
+ VersionOrEmptyVersion(NewDeprecated),
+ VersionOrEmptyVersion(NewObsoleted), /*IsUnavailable=*/false, Str,
+ IsStrict, Replacement, Sema::AMK_None,
+ PriorityModifier + Sema::AP_InferredFromOtherPlatform +
+ Sema::AP_InferredFromOtherPlatform);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+ }
+ }
+ }
}
}
static void handleExternalSourceSymbolAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 3))
return;
- assert(checkAttributeAtMostNumArgs(S, AL, 3) &&
- "Invalid number of arguments in an external_source_symbol attribute");
StringRef Language;
if (const auto *SE = dyn_cast_or_null<StringLiteral>(AL.getArgAsExpr(0)))
@@ -2693,11 +2731,6 @@ static void handleVisibilityAttr(Sema &S, Decl *D, const ParsedAttr &AL,
D->addAttr(newAttr);
}
-static void handleObjCNonRuntimeProtocolAttr(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- handleSimpleAttribute<ObjCNonRuntimeProtocolAttr>(S, D, AL);
-}
-
static void handleObjCDirectAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// objc_direct cannot be set on methods declared in the context of a protocol
if (isa<ObjCProtocolDecl>(D->getDeclContext())) {
@@ -2872,8 +2905,10 @@ static void handleSentinelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
QualType Ty = V->getType();
if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
const FunctionType *FT = Ty->isFunctionPointerType()
- ? D->getFunctionType()
- : Ty->castAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
+ ? D->getFunctionType()
+ : Ty->castAs<BlockPointerType>()
+ ->getPointeeType()
+ ->castAs<FunctionType>();
if (!cast<FunctionProtoType>(FT)->isVariadic()) {
int m = Ty->isFunctionPointerType() ? 0 : 1;
S.Diag(AL.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
@@ -2906,7 +2941,7 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
}
StringRef Str;
- if ((AL.isCXX11Attribute() || AL.isC2xAttribute()) && !AL.getScopeName()) {
+ if (AL.isStandardAttributeSyntax() && !AL.getScopeName()) {
// The standard attribute cannot be applied to variable declarations such
// as a function pointer.
if (isa<VarDecl>(D))
@@ -3051,11 +3086,31 @@ SectionAttr *Sema::mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
return ::new (Context) SectionAttr(Context, CI, Name);
}
+/// Used to implement to perform semantic checking on
+/// attribute((section("foo"))) specifiers.
+///
+/// In this case, "foo" is passed in to be checked. If the section
+/// specifier is invalid, return an Error that indicates the problem.
+///
+/// This is a simple quality of implementation feature to catch errors
+/// and give good diagnostics in cases when the assembler or code generator
+/// would otherwise reject the section specifier.
+llvm::Error Sema::isValidSectionSpecifier(StringRef SecName) {
+ if (!Context.getTargetInfo().getTriple().isOSDarwin())
+ return llvm::Error::success();
+
+ // Let MCSectionMachO validate this.
+ StringRef Segment, Section;
+ unsigned TAA, StubSize;
+ bool HasTAA;
+ return llvm::MCSectionMachO::ParseSectionSpecifier(SecName, Segment, Section,
+ TAA, HasTAA, StubSize);
+}
+
bool Sema::checkSectionName(SourceLocation LiteralLoc, StringRef SecName) {
- std::string Error = Context.getTargetInfo().isValidSectionSpecifier(SecName);
- if (!Error.empty()) {
- Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target) << Error
- << 1 /*'section'*/;
+ if (llvm::Error E = isValidSectionSpecifier(SecName)) {
+ Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target)
+ << toString(std::move(E)) << 1 /*'section'*/;
return false;
}
return true;
@@ -3072,14 +3127,6 @@ static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.checkSectionName(LiteralLoc, Str))
return;
- // If the target wants to validate the section specifier, make it happen.
- std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(Str);
- if (!Error.empty()) {
- S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target)
- << Error;
- return;
- }
-
SectionAttr *NewAttr = S.mergeSectionAttr(D, AL, Str);
if (NewAttr) {
D->addAttr(NewAttr);
@@ -3095,11 +3142,9 @@ static void handleSectionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// `#pragma code_seg("segname")` uses checkSectionName() instead.
static bool checkCodeSegName(Sema &S, SourceLocation LiteralLoc,
StringRef CodeSegName) {
- std::string Error =
- S.Context.getTargetInfo().isValidSectionSpecifier(CodeSegName);
- if (!Error.empty()) {
+ if (llvm::Error E = S.isValidSectionSpecifier(CodeSegName)) {
S.Diag(LiteralLoc, diag::err_attribute_section_invalid_for_target)
- << Error << 0 /*'code-seg'*/;
+ << toString(std::move(E)) << 0 /*'code-seg'*/;
return false;
}
@@ -3328,7 +3373,7 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
Ty = getFunctionOrMethodResultType(D);
- if (!isNSStringType(Ty, S.Context) &&
+ if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true) &&
!isCFStringType(Ty, S.Context) &&
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
@@ -3504,7 +3549,7 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
} else if (Kind == NSStringFormat) {
// FIXME: do we need to check if the type is NSString*? What are the
// semantics?
- if (!isNSStringType(Ty, S.Context)) {
+ if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true)) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
<< "an NSString" << IdxExpr->getSourceRange()
<< getFunctionOrMethodParamRange(D, ArgIdx);
@@ -3802,11 +3847,11 @@ void Sema::AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
E = ImplicitCastExpr::Create(Context,
Context.getPointerType(E->getType()),
clang::CK_FunctionToPointerDecay, E, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
if (E->isLValue())
E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
clang::CK_LValueToRValue, E, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
Expr::EvalResult Eval;
Notes.clear();
@@ -3986,6 +4031,12 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
return;
uint64_t AlignVal = Alignment.getZExtValue();
+ // 16 byte ByVal alignment not due to a vector member is not honoured by XL
+ // on AIX. Emit a warning here that users are generating binary incompatible
+ // code to be safe.
+ if (AlignVal >= 16 && isa<FieldDecl>(D) &&
+ Context.getTargetInfo().getTriple().isOSAIX())
+ Diag(AttrLoc, diag::warn_not_xl_compatible) << E->getSourceRange();
// C++11 [dcl.align]p2:
// -- if the constant expression evaluates to zero, the alignment
@@ -4350,20 +4401,6 @@ AlwaysInlineAttr *Sema::mergeAlwaysInlineAttr(Decl *D,
return ::new (Context) AlwaysInlineAttr(Context, CI);
}
-CommonAttr *Sema::mergeCommonAttr(Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, AL))
- return nullptr;
-
- return ::new (Context) CommonAttr(Context, AL);
-}
-
-CommonAttr *Sema::mergeCommonAttr(Decl *D, const CommonAttr &AL) {
- if (checkAttrMutualExclusion<InternalLinkageAttr>(*this, D, AL))
- return nullptr;
-
- return ::new (Context) CommonAttr(Context, AL);
-}
-
InternalLinkageAttr *Sema::mergeInternalLinkageAttr(Decl *D,
const ParsedAttr &AL) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
@@ -4382,9 +4419,6 @@ InternalLinkageAttr *Sema::mergeInternalLinkageAttr(Decl *D,
}
}
- if (checkAttrMutualExclusion<CommonAttr>(*this, D, AL))
- return nullptr;
-
return ::new (Context) InternalLinkageAttr(Context, AL);
}
InternalLinkageAttr *
@@ -4405,9 +4439,6 @@ Sema::mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL) {
}
}
- if (checkAttrMutualExclusion<CommonAttr>(*this, D, AL))
- return nullptr;
-
return ::new (Context) InternalLinkageAttr(Context, AL);
}
@@ -4424,14 +4455,6 @@ MinSizeAttr *Sema::mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI) {
return ::new (Context) MinSizeAttr(Context, CI);
}
-NoSpeculativeLoadHardeningAttr *Sema::mergeNoSpeculativeLoadHardeningAttr(
- Decl *D, const NoSpeculativeLoadHardeningAttr &AL) {
- if (checkAttrMutualExclusion<SpeculativeLoadHardeningAttr>(*this, D, AL))
- return nullptr;
-
- return ::new (Context) NoSpeculativeLoadHardeningAttr(Context, AL);
-}
-
SwiftNameAttr *Sema::mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name) {
if (const auto *PrevSNA = D->getAttr<SwiftNameAttr>()) {
@@ -4465,18 +4488,7 @@ OptimizeNoneAttr *Sema::mergeOptimizeNoneAttr(Decl *D,
return ::new (Context) OptimizeNoneAttr(Context, CI);
}
-SpeculativeLoadHardeningAttr *Sema::mergeSpeculativeLoadHardeningAttr(
- Decl *D, const SpeculativeLoadHardeningAttr &AL) {
- if (checkAttrMutualExclusion<NoSpeculativeLoadHardeningAttr>(*this, D, AL))
- return nullptr;
-
- return ::new (Context) SpeculativeLoadHardeningAttr(Context, AL);
-}
-
static void handleAlwaysInlineAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<NotTailCalledAttr>(S, D, AL))
- return;
-
if (AlwaysInlineAttr *Inline =
S.mergeAlwaysInlineAttr(D, AL, AL.getAttrName()))
D->addAttr(Inline);
@@ -4493,21 +4505,22 @@ static void handleOptimizeNoneAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleConstantAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL) ||
- checkAttrMutualExclusion<HIPManagedAttr>(S, D, AL))
- return;
const auto *VD = cast<VarDecl>(D);
if (VD->hasLocalStorage()) {
S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
return;
}
+ // constexpr variable may already get an implicit constant attr, which should
+ // be replaced by the explicit constant attr.
+ if (auto *A = D->getAttr<CUDAConstantAttr>()) {
+ if (!A->isImplicit())
+ return;
+ D->dropAttr<CUDAConstantAttr>();
+ }
D->addAttr(::new (S.Context) CUDAConstantAttr(S.Context, AL));
}
static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL) ||
- checkAttrMutualExclusion<HIPManagedAttr>(S, D, AL))
- return;
const auto *VD = cast<VarDecl>(D);
// extern __shared__ is only allowed on arrays with no length (e.g.
// "int x[]").
@@ -4524,10 +4537,6 @@ static void handleSharedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDADeviceAttr>(S, D, AL) ||
- checkAttrMutualExclusion<CUDAHostAttr>(S, D, AL)) {
- return;
- }
const auto *FD = cast<FunctionDecl>(D);
if (!FD->getReturnType()->isVoidType() &&
!FD->getReturnType()->getAs<AutoType>() &&
@@ -4561,10 +4570,6 @@ static void handleGlobalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleDeviceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDAGlobalAttr>(S, D, AL)) {
- return;
- }
-
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasLocalStorage()) {
S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
@@ -4581,11 +4586,6 @@ static void handleDeviceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleManagedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (checkAttrMutualExclusion<CUDAConstantAttr>(S, D, AL) ||
- checkAttrMutualExclusion<CUDASharedAttr>(S, D, AL)) {
- return;
- }
-
if (const auto *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasLocalStorage()) {
S.Diag(AL.getLoc(), diag::err_cuda_nonstatic_constdev);
@@ -4645,6 +4645,9 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_SwiftCall:
D->addAttr(::new (S.Context) SwiftCallAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_SwiftAsyncCall:
+ D->addAttr(::new (S.Context) SwiftAsyncCallAttr(S.Context, AL));
+ return;
case ParsedAttr::AT_VectorCall:
D->addAttr(::new (S.Context) VectorCallAttr(S.Context, AL));
return;
@@ -4691,7 +4694,7 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleSuppressAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
std::vector<StringRef> DiagnosticIdentifiers;
@@ -4730,7 +4733,10 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
// To check if earlier decl attributes do not conflict the newly parsed ones
- // we always add (and check) the attribute to the cannonical decl.
+ // we always add (and check) the attribute to the cannonical decl. We need
+ // to repeat the check for attribute mutual exclusion because we're attaching
+ // all of the attributes to the canonical declaration rather than the current
+ // declaration.
D = D->getCanonicalDecl();
if (AL.getKind() == ParsedAttr::AT_Owner) {
if (checkAttrMutualExclusion<PointerAttr>(S, D, AL))
@@ -4781,7 +4787,7 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
}
unsigned ReqArgs = Attrs.getKind() == ParsedAttr::AT_Pcs ? 1 : 0;
- if (!checkAttributeNumArgs(*this, Attrs, ReqArgs)) {
+ if (!Attrs.checkExactlyNumArgs(*this, ReqArgs)) {
Attrs.setInvalid();
return true;
}
@@ -4806,6 +4812,9 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_SwiftCall:
CC = CC_Swift;
break;
+ case ParsedAttr::AT_SwiftAsyncCall:
+ CC = CC_SwiftAsync;
+ break;
case ParsedAttr::AT_VectorCall:
CC = CC_X86VectorCall;
break;
@@ -4984,6 +4993,14 @@ void Sema::AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
D->addAttr(::new (Context) SwiftContextAttr(Context, CI));
return;
+ case ParameterABI::SwiftAsyncContext:
+ if (!isValidSwiftContextType(type)) {
+ Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
+ << getParameterABISpelling(abi) << /*pointer to pointer */ 0 << type;
+ }
+ D->addAttr(::new (Context) SwiftAsyncContextAttr(Context, CI));
+ return;
+
case ParameterABI::SwiftErrorResult:
if (!isValidSwiftErrorResultType(type)) {
Diag(CI.getLoc(), diag::err_swift_abi_parameter_wrong_type)
@@ -5009,7 +5026,7 @@ bool Sema::CheckRegparmAttr(const ParsedAttr &AL, unsigned &numParams) {
if (AL.isInvalid())
return true;
- if (!checkAttributeNumArgs(*this, AL, 1)) {
+ if (!AL.checkExactlyNumArgs(*this, 1)) {
AL.setInvalid();
return true;
}
@@ -5063,7 +5080,7 @@ static Expr *makeLaunchBoundsArgExpr(Sema &S, Expr *E,
// Make sure we can fit it in 32 bits.
if (!I->isIntN(32)) {
S.Diag(E->getExprLoc(), diag::err_ice_too_large)
- << I->toString(10, false) << 32 << /* Unsigned */ 1;
+ << toString(*I, 10, false) << 32 << /* Unsigned */ 1;
return nullptr;
}
if (*I < 0)
@@ -5098,8 +5115,7 @@ void Sema::AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
}
static void handleLaunchBoundsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
- !checkAttributeAtMostNumArgs(S, AL, 2))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
return;
S.AddLaunchBoundsAttr(D, AL, AL.getArgAsExpr(0),
@@ -5146,7 +5162,7 @@ static void handleTypeTagForDatatypeAttr(Sema &S, Decl *D,
return;
}
- if (!checkAttributeNumArgs(S, AL, 1))
+ if (!AL.checkExactlyNumArgs(S, 1))
return;
if (!isa<VarDecl>(D)) {
@@ -5237,15 +5253,12 @@ static bool ArmCdeAliasValid(unsigned BuiltinID, StringRef AliasName) {
return ArmBuiltinAliasValid(BuiltinID, AliasName, Map, IntrinNames);
}
-static bool ArmSveAliasValid(unsigned BuiltinID, StringRef AliasName) {
- switch (BuiltinID) {
- default:
- return false;
-#define GET_SVE_BUILTINS
-#define BUILTIN(name, types, attr) case SVE::BI##name:
-#include "clang/Basic/arm_sve_builtins.inc"
- return true;
- }
+static bool ArmSveAliasValid(ASTContext &Context, unsigned BuiltinID,
+ StringRef AliasName) {
+ if (Context.BuiltinInfo.isAuxBuiltinID(BuiltinID))
+ BuiltinID = Context.BuiltinInfo.getAuxBuiltinID(BuiltinID);
+ return BuiltinID >= AArch64::FirstSVEBuiltin &&
+ BuiltinID <= AArch64::LastSVEBuiltin;
}
static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -5260,7 +5273,7 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
- if ((IsAArch64 && !ArmSveAliasValid(BuiltinID, AliasName)) ||
+ if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
(!IsAArch64 && !ArmMveAliasValid(BuiltinID, AliasName) &&
!ArmCdeAliasValid(BuiltinID, AliasName))) {
S.Diag(AL.getLoc(), diag::err_attribute_arm_builtin_alias);
@@ -5270,6 +5283,38 @@ static void handleArmBuiltinAliasAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) ArmBuiltinAliasAttr(S.Context, AL, Ident));
}
+static bool RISCVAliasValid(unsigned BuiltinID, StringRef AliasName) {
+ return BuiltinID >= Builtin::FirstTSBuiltin &&
+ BuiltinID < RISCV::LastTSBuiltin;
+}
+
+static void handleBuiltinAliasAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ if (!AL.isArgIdent(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_type)
+ << AL << 1 << AANT_ArgumentIdentifier;
+ return;
+ }
+
+ IdentifierInfo *Ident = AL.getArgAsIdent(0)->Ident;
+ unsigned BuiltinID = Ident->getBuiltinID();
+ StringRef AliasName = cast<FunctionDecl>(D)->getIdentifier()->getName();
+
+ bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
+ bool IsARM = S.Context.getTargetInfo().getTriple().isARM();
+ bool IsRISCV = S.Context.getTargetInfo().getTriple().isRISCV();
+ if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
+ (IsARM && !ArmMveAliasValid(BuiltinID, AliasName) &&
+ !ArmCdeAliasValid(BuiltinID, AliasName)) ||
+ (IsRISCV && !RISCVAliasValid(BuiltinID, AliasName)) ||
+ (!IsAArch64 && !IsARM && !IsRISCV)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_builtin_alias) << AL;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) BuiltinAliasAttr(S.Context, AL, Ident));
+}
+
//===----------------------------------------------------------------------===//
// Checker-specific attribute handlers.
//===----------------------------------------------------------------------===//
@@ -5751,9 +5796,11 @@ static void handleSwiftBridge(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.checkStringLiteralArgumentAttr(AL, 0, BT))
return;
- // Don't duplicate annotations that are already set.
- if (D->hasAttr<SwiftBridgeAttr>()) {
- S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
+ // Warn about duplicate attributes if they have different arguments, but drop
+ // any duplicate attributes regardless.
+ if (const auto *Other = D->getAttr<SwiftBridgeAttr>()) {
+ if (Other->getSwiftType() != BT)
+ S.Diag(AL.getLoc(), diag::warn_duplicate_attribute) << AL;
return;
}
@@ -5856,6 +5903,125 @@ static void handleSwiftError(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) SwiftErrorAttr(S.Context, AL, Convention));
}
+static void checkSwiftAsyncErrorBlock(Sema &S, Decl *D,
+ const SwiftAsyncErrorAttr *ErrorAttr,
+ const SwiftAsyncAttr *AsyncAttr) {
+ if (AsyncAttr->getKind() == SwiftAsyncAttr::None) {
+ if (ErrorAttr->getConvention() != SwiftAsyncErrorAttr::None) {
+ S.Diag(AsyncAttr->getLocation(),
+ diag::err_swift_async_error_without_swift_async)
+ << AsyncAttr << isa<ObjCMethodDecl>(D);
+ }
+ return;
+ }
+
+ const ParmVarDecl *HandlerParam = getFunctionOrMethodParam(
+ D, AsyncAttr->getCompletionHandlerIndex().getASTIndex());
+ // handleSwiftAsyncAttr already verified the type is correct, so no need to
+ // double-check it here.
+ const auto *FuncTy = HandlerParam->getType()
+ ->castAs<BlockPointerType>()
+ ->getPointeeType()
+ ->getAs<FunctionProtoType>();
+ ArrayRef<QualType> BlockParams;
+ if (FuncTy)
+ BlockParams = FuncTy->getParamTypes();
+
+ switch (ErrorAttr->getConvention()) {
+ case SwiftAsyncErrorAttr::ZeroArgument:
+ case SwiftAsyncErrorAttr::NonZeroArgument: {
+ uint32_t ParamIdx = ErrorAttr->getHandlerParamIdx();
+ if (ParamIdx == 0 || ParamIdx > BlockParams.size()) {
+ S.Diag(ErrorAttr->getLocation(),
+ diag::err_attribute_argument_out_of_bounds) << ErrorAttr << 2;
+ return;
+ }
+ QualType ErrorParam = BlockParams[ParamIdx - 1];
+ if (!ErrorParam->isIntegralType(S.Context)) {
+ StringRef ConvStr =
+ ErrorAttr->getConvention() == SwiftAsyncErrorAttr::ZeroArgument
+ ? "zero_argument"
+ : "nonzero_argument";
+ S.Diag(ErrorAttr->getLocation(), diag::err_swift_async_error_non_integral)
+ << ErrorAttr << ConvStr << ParamIdx << ErrorParam;
+ return;
+ }
+ break;
+ }
+ case SwiftAsyncErrorAttr::NonNullError: {
+ bool AnyErrorParams = false;
+ for (QualType Param : BlockParams) {
+ // Check for NSError *.
+ if (const auto *ObjCPtrTy = Param->getAs<ObjCObjectPointerType>()) {
+ if (const auto *ID = ObjCPtrTy->getInterfaceDecl()) {
+ if (ID->getIdentifier() == S.getNSErrorIdent()) {
+ AnyErrorParams = true;
+ break;
+ }
+ }
+ }
+ // Check for CFError *.
+ if (const auto *PtrTy = Param->getAs<PointerType>()) {
+ if (const auto *RT = PtrTy->getPointeeType()->getAs<RecordType>()) {
+ if (S.isCFError(RT->getDecl())) {
+ AnyErrorParams = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (!AnyErrorParams) {
+ S.Diag(ErrorAttr->getLocation(),
+ diag::err_swift_async_error_no_error_parameter)
+ << ErrorAttr << isa<ObjCMethodDecl>(D);
+ return;
+ }
+ break;
+ }
+ case SwiftAsyncErrorAttr::None:
+ break;
+ }
+}
+
+static void handleSwiftAsyncError(Sema &S, Decl *D, const ParsedAttr &AL) {
+ IdentifierLoc *IDLoc = AL.getArgAsIdent(0);
+ SwiftAsyncErrorAttr::ConventionKind ConvKind;
+ if (!SwiftAsyncErrorAttr::ConvertStrToConventionKind(IDLoc->Ident->getName(),
+ ConvKind)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << IDLoc->Ident;
+ return;
+ }
+
+ uint32_t ParamIdx = 0;
+ switch (ConvKind) {
+ case SwiftAsyncErrorAttr::ZeroArgument:
+ case SwiftAsyncErrorAttr::NonZeroArgument: {
+ if (!AL.checkExactlyNumArgs(S, 2))
+ return;
+
+ Expr *IdxExpr = AL.getArgAsExpr(1);
+ if (!checkUInt32Argument(S, AL, IdxExpr, ParamIdx))
+ return;
+ break;
+ }
+ case SwiftAsyncErrorAttr::NonNullError:
+ case SwiftAsyncErrorAttr::None: {
+ if (!AL.checkExactlyNumArgs(S, 1))
+ return;
+ break;
+ }
+ }
+
+ auto *ErrorAttr =
+ ::new (S.Context) SwiftAsyncErrorAttr(S.Context, AL, ConvKind, ParamIdx);
+ D->addAttr(ErrorAttr);
+
+ if (auto *AsyncAttr = D->getAttr<SwiftAsyncAttr>())
+ checkSwiftAsyncErrorBlock(S, D, ErrorAttr, AsyncAttr);
+}
+
// For a function, this will validate a compound Swift name, e.g.
// <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, and
// the function will output the number of parameter names, and whether this is a
@@ -6168,7 +6334,7 @@ static void handleSwiftAsyncName(Sema &S, Decl *D, const ParsedAttr &AL) {
static void handleSwiftNewType(Sema &S, Decl *D, const ParsedAttr &AL) {
// Make sure that there is an identifier as the annotation's single argument.
- if (!checkAttributeNumArgs(S, AL, 1))
+ if (!AL.checkExactlyNumArgs(S, 1))
return;
if (!AL.isArgIdent(0)) {
@@ -6210,11 +6376,11 @@ static void handleSwiftAsyncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
ParamIdx Idx;
if (Kind == SwiftAsyncAttr::None) {
// If this is 'none', then there shouldn't be any additional arguments.
- if (!checkAttributeNumArgs(S, AL, 1))
+ if (!AL.checkExactlyNumArgs(S, 1))
return;
} else {
// Non-none swift_async requires a completion handler index argument.
- if (!checkAttributeNumArgs(S, AL, 2))
+ if (!AL.checkExactlyNumArgs(S, 2))
return;
Expr *HandlerIdx = AL.getArgAsExpr(1);
@@ -6231,8 +6397,8 @@ static void handleSwiftAsyncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
QualType BlockTy =
- CompletionBlockType->getAs<BlockPointerType>()->getPointeeType();
- if (!BlockTy->getAs<FunctionType>()->getReturnType()->isVoidType()) {
+ CompletionBlockType->castAs<BlockPointerType>()->getPointeeType();
+ if (!BlockTy->castAs<FunctionType>()->getReturnType()->isVoidType()) {
S.Diag(CompletionBlock->getLocation(),
diag::err_swift_async_bad_block_type)
<< CompletionBlock->getType();
@@ -6240,7 +6406,12 @@ static void handleSwiftAsyncAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- D->addAttr(::new (S.Context) SwiftAsyncAttr(S.Context, AL, Kind, Idx));
+ auto *AsyncAttr =
+ ::new (S.Context) SwiftAsyncAttr(S.Context, AL, Kind, Idx);
+ D->addAttr(AsyncAttr);
+
+ if (auto *ErrorAttr = D->getAttr<SwiftAsyncErrorAttr>())
+ checkSwiftAsyncErrorBlock(S, D, ErrorAttr, AsyncAttr);
}
//===----------------------------------------------------------------------===//
@@ -6373,7 +6544,7 @@ static void handleAbiTagAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
if (AL.getNumArgs() == 0)
Tags.push_back(NS->getName());
- } else if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ } else if (!AL.checkAtLeastNumArgs(S, 1))
return;
// Store tags sorted and without duplicates.
@@ -6431,7 +6602,7 @@ static void handleMSP430InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
// The attribute takes one integer argument.
- if (!checkAttributeNumArgs(S, AL, 1))
+ if (!AL.checkExactlyNumArgs(S, 1))
return;
if (!AL.isArgExpr(0)) {
@@ -6503,6 +6674,8 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
+ // We still have to do this manually because the Interrupt attributes are
+ // a bit special due to sharing their spellings across targets.
if (checkAttrMutualExclusion<Mips16Attr>(S, D, AL))
return;
@@ -6516,6 +6689,39 @@ static void handleMipsInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) MipsInterruptAttr(S.Context, AL, Kind));
}
+static void handleM68kInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (!AL.checkExactlyNumArgs(S, 1))
+ return;
+
+ if (!AL.isArgExpr(0)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ Expr *NumParamsExpr = static_cast<Expr *>(AL.getArgAsExpr(0));
+ auto MaybeNumParams = NumParamsExpr->getIntegerConstantExpr(S.Context);
+ if (!MaybeNumParams) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_type)
+ << AL << AANT_ArgumentIntegerConstant
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ unsigned Num = MaybeNumParams->getLimitedValue(255);
+ if ((Num & 1) || Num > 30) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL << (int)MaybeNumParams->getSExtValue()
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (S.Context) M68kInterruptAttr(S.Context, AL, Num));
+ D->addAttr(UsedAttr::CreateImplicit(S.Context));
+}
+
static void handleAnyX86InterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Semantic checks for a function with the 'interrupt' attribute.
// a) Must be a function.
@@ -6587,7 +6793,7 @@ static void handleAVRInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (!checkAttributeNumArgs(S, AL, 0))
+ if (!AL.checkExactlyNumArgs(S, 0))
return;
handleSimpleAttribute<AVRInterruptAttr>(S, D, AL);
@@ -6600,7 +6806,7 @@ static void handleAVRSignalAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
}
- if (!checkAttributeNumArgs(S, AL, 0))
+ if (!AL.checkExactlyNumArgs(S, 0))
return;
handleSimpleAttribute<AVRSignalAttr>(S, D, AL);
@@ -6731,7 +6937,7 @@ static void handleRISCVInterruptAttr(Sema &S, Decl *D,
}
// Check the attribute argument. Argument is optional.
- if (!checkAttributeAtMostNumArgs(S, AL, 1))
+ if (!AL.checkAtMostNumArgs(S, 1))
return;
StringRef Str;
@@ -6788,6 +6994,9 @@ static void handleInterruptAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case llvm::Triple::mips:
handleMipsInterruptAttr(S, D, AL);
break;
+ case llvm::Triple::m68k:
+ handleM68kInterruptAttr(S, D, AL);
+ break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
handleAnyX86InterruptAttr(S, D, AL);
@@ -6901,8 +7110,7 @@ void Sema::addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
}
static void handleAMDGPUWavesPerEUAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1) ||
- !checkAttributeAtMostNumArgs(S, AL, 2))
+ if (!AL.checkAtLeastNumArgs(S, 1) || !AL.checkAtMostNumArgs(S, 2))
return;
Expr *MinExpr = AL.getArgAsExpr(0);
@@ -7125,7 +7333,7 @@ static void handleReleaseCapabilityAttr(Sema &S, Decl *D,
static void handleRequiresCapabilityAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
// check that all arguments are lockable objects
@@ -7149,6 +7357,11 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// namespace.
return;
}
+ } else if (isa<UsingDecl, UnresolvedUsingTypenameDecl,
+ UnresolvedUsingValueDecl>(D)) {
+ S.Diag(AL.getRange().getBegin(), diag::warn_deprecated_ignored_on_using)
+ << AL;
+ return;
}
// Handle the cases where the attribute has a text message.
@@ -7157,9 +7370,9 @@ static void handleDeprecatedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
!S.checkStringLiteralArgumentAttr(AL, 0, Str))
return;
- // Only support a single optional message for Declspec and CXX11.
- if (AL.isDeclspecAttribute() || AL.isCXX11Attribute())
- checkAttributeAtMostNumArgs(S, AL, 1);
+ // Support a single optional message only for Declspec and [[]] spellings.
+ if (AL.isDeclspecAttribute() || AL.isStandardAttributeSyntax())
+ AL.checkAtMostNumArgs(S, 1);
else if (AL.isArgExpr(1) && AL.getArgAsExpr(1) &&
!S.checkStringLiteralArgumentAttr(AL, 1, Replacement))
return;
@@ -7177,7 +7390,7 @@ static bool isGlobalVar(const Decl *D) {
}
static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
- if (!checkAttributeAtLeastNumArgs(S, AL, 1))
+ if (!AL.checkAtLeastNumArgs(S, 1))
return;
std::vector<StringRef> Sanitizers;
@@ -7190,7 +7403,8 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
return;
if (parseSanitizerValue(SanitizerName, /*AllowGroups=*/true) ==
- SanitizerMask())
+ SanitizerMask() &&
+ SanitizerName != "coverage")
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
else if (isGlobalVar(D) && SanitizerName != "address")
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
@@ -7224,7 +7438,7 @@ static void handleNoSanitizeSpecificAttr(Sema &S, Decl *D,
// getSpelling() or prettyPrint() on the resulting semantic attribute object
// without failing assertions.
unsigned TranslatedSpellingIndex = 0;
- if (AL.isC2xAttribute() || AL.isCXX11Attribute())
+ if (AL.isStandardAttributeSyntax())
TranslatedSpellingIndex = 1;
AttributeCommonInfo Info = AL;
@@ -7247,49 +7461,6 @@ static void handleOpenCLNoSVMAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
<< "2.0";
}
-/// Handles semantic checking for features that are common to all attributes,
-/// such as checking whether a parameter was properly specified, or the correct
-/// number of arguments were passed, etc.
-static bool handleCommonAttributeFeatures(Sema &S, Decl *D,
- const ParsedAttr &AL) {
- // Several attributes carry different semantics than the parsing requires, so
- // those are opted out of the common argument checks.
- //
- // We also bail on unknown and ignored attributes because those are handled
- // as part of the target-specific handling logic.
- if (AL.getKind() == ParsedAttr::UnknownAttribute)
- return false;
- // Check whether the attribute requires specific language extensions to be
- // enabled.
- if (!AL.diagnoseLangOpts(S))
- return true;
- // Check whether the attribute appertains to the given subject.
- if (!AL.diagnoseAppertainsTo(S, D))
- return true;
- if (AL.hasCustomParsing())
- return false;
-
- if (AL.getMinArgs() == AL.getMaxArgs()) {
- // If there are no optional arguments, then checking for the argument count
- // is trivial.
- if (!checkAttributeNumArgs(S, AL, AL.getMinArgs()))
- return true;
- } else {
- // There are optional arguments, so checking is slightly more involved.
- if (AL.getMinArgs() &&
- !checkAttributeAtLeastNumArgs(S, AL, AL.getMinArgs()))
- return true;
- else if (!AL.hasVariadicArg() && AL.getMaxArgs() &&
- !checkAttributeAtMostNumArgs(S, AL, AL.getMaxArgs()))
- return true;
- }
-
- if (S.CheckAttrTarget(AL))
- return true;
-
- return false;
-}
-
static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (D->isInvalidDecl())
return;
@@ -7308,16 +7479,24 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
- // OpenCL v2.0 s6.6 - read_write can be used for image types to specify that an
- // image object can be read and written.
- // OpenCL v2.0 s6.13.6 - A kernel cannot read from and write to the same pipe
- // object. Using the read_write (or __read_write) qualifier with the pipe
- // qualifier is a compilation error.
+ // OpenCL v2.0 s6.6 - read_write can be used for image types to specify that
+ // an image object can be read and written. OpenCL v2.0 s6.13.6 - A kernel
+ // cannot read from and write to the same pipe object. Using the read_write
+ // (or __read_write) qualifier with the pipe qualifier is a compilation error.
+ // OpenCL v3.0 s6.8 - For OpenCL C 2.0, or with the
+ // __opencl_c_read_write_images feature, image objects specified as arguments
+ // to a kernel can additionally be declared to be read-write.
+ // C++ for OpenCL inherits rule from OpenCL C v2.0.
if (const auto *PDecl = dyn_cast<ParmVarDecl>(D)) {
const Type *DeclTy = PDecl->getType().getCanonicalType().getTypePtr();
if (AL.getAttrName()->getName().find("read_write") != StringRef::npos) {
+ bool ReadWriteImagesUnsupportedForOCLC =
+ (S.getLangOpts().OpenCLVersion < 200) ||
+ (S.getLangOpts().OpenCLVersion == 300 &&
+ !S.getOpenCLOptions().isSupported("__opencl_c_read_write_images",
+ S.getLangOpts()));
if ((!S.getLangOpts().OpenCLCPlusPlus &&
- S.getLangOpts().OpenCLVersion < 200) ||
+ ReadWriteImagesUnsupportedForOCLC) ||
DeclTy->isPipeType()) {
S.Diag(AL.getLoc(), diag::err_opencl_invalid_read_write)
<< AL << PDecl->getType() << DeclTy->isImageType();
@@ -7377,9 +7556,9 @@ static void handleDestroyAttr(Sema &S, Decl *D, const ParsedAttr &A) {
}
if (A.getKind() == ParsedAttr::AT_AlwaysDestroy)
- handleSimpleAttributeWithExclusions<AlwaysDestroyAttr, NoDestroyAttr>(S, D, A);
+ handleSimpleAttribute<AlwaysDestroyAttr>(S, D, A);
else
- handleSimpleAttributeWithExclusions<NoDestroyAttr, AlwaysDestroyAttr>(S, D, A);
+ handleSimpleAttribute<NoDestroyAttr>(S, D, A);
}
static void handleUninitializedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -7643,7 +7822,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
return;
}
- if (handleCommonAttributeFeatures(S, D, AL))
+ if (S.checkCommonAttributeFeatures(D, AL))
return;
switch (AL.getKind()) {
@@ -7655,6 +7834,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
assert(AL.isTypeAttr() && "Non-type attribute not handled");
break;
}
+ // N.B., ClangAttrEmitter.cpp emits a diagnostic helper that ensures a
+ // statement attribute is not written on a declaration, but this code is
+ // needed for attributes in Attr.td that do not list any subjects.
S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
<< AL << D->getLocation();
break;
@@ -7668,21 +7850,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_DLLImport:
handleDLLAttr(S, D, AL);
break;
- case ParsedAttr::AT_Mips16:
- handleSimpleAttributeWithExclusions<Mips16Attr, MicroMipsAttr,
- MipsInterruptAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_MicroMips:
- handleSimpleAttributeWithExclusions<MicroMipsAttr, Mips16Attr>(S, D, AL);
- break;
- case ParsedAttr::AT_MipsLongCall:
- handleSimpleAttributeWithExclusions<MipsLongCallAttr, MipsShortCallAttr>(
- S, D, AL);
- break;
- case ParsedAttr::AT_MipsShortCall:
- handleSimpleAttributeWithExclusions<MipsShortCallAttr, MipsLongCallAttr>(
- S, D, AL);
- break;
case ParsedAttr::AT_AMDGPUFlatWorkGroupSize:
handleAMDGPUFlatWorkGroupSizeAttr(S, D, AL);
break;
@@ -7816,22 +7983,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_CUDADevice:
handleDeviceAttr(S, D, AL);
break;
- case ParsedAttr::AT_CUDAHost:
- handleSimpleAttributeWithExclusions<CUDAHostAttr, CUDAGlobalAttr>(S, D, AL);
- break;
case ParsedAttr::AT_HIPManaged:
handleManagedAttr(S, D, AL);
break;
- case ParsedAttr::AT_CUDADeviceBuiltinSurfaceType:
- handleSimpleAttributeWithExclusions<CUDADeviceBuiltinSurfaceTypeAttr,
- CUDADeviceBuiltinTextureTypeAttr>(S, D,
- AL);
- break;
- case ParsedAttr::AT_CUDADeviceBuiltinTextureType:
- handleSimpleAttributeWithExclusions<CUDADeviceBuiltinTextureTypeAttr,
- CUDADeviceBuiltinSurfaceTypeAttr>(S, D,
- AL);
- break;
case ParsedAttr::AT_GNUInline:
handleGNUInlineAttr(S, D, AL);
break;
@@ -7865,12 +8019,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Ownership:
handleOwnershipAttr(S, D, AL);
break;
- case ParsedAttr::AT_Cold:
- handleSimpleAttributeWithExclusions<ColdAttr, HotAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_Hot:
- handleSimpleAttributeWithExclusions<HotAttr, ColdAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Naked:
handleNakedAttr(S, D, AL);
break;
@@ -7880,9 +8028,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_AnyX86NoCfCheck:
handleNoCfCheckAttr(S, D, AL);
break;
- case ParsedAttr::AT_Leaf:
- handleSimpleAttribute<LeafAttr>(S, D, AL);
- break;
case ParsedAttr::AT_NoThrow:
if (!AL.isUsedAsTypeAttr())
handleSimpleAttribute<NoThrowAttr>(S, D, AL);
@@ -7926,14 +8071,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_NSErrorDomain:
handleNSErrorDomain(S, D, AL);
break;
- case ParsedAttr::AT_CFAuditedTransfer:
- handleSimpleAttributeWithExclusions<CFAuditedTransferAttr,
- CFUnknownTransferAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_CFUnknownTransfer:
- handleSimpleAttributeWithExclusions<CFUnknownTransferAttr,
- CFAuditedTransferAttr>(S, D, AL);
- break;
case ParsedAttr::AT_CFConsumed:
case ParsedAttr::AT_NSConsumed:
case ParsedAttr::AT_OSConsumed:
@@ -7974,10 +8111,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleVecTypeHint(S, D, AL);
break;
case ParsedAttr::AT_InitPriority:
- if (S.Context.getTargetInfo().getTriple().isOSAIX())
- llvm::report_fatal_error(
- "'init_priority' attribute is not yet supported on AIX");
- else
handleInitPriorityAttr(S, D, AL);
break;
case ParsedAttr::AT_Packed:
@@ -7989,15 +8122,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
- case ParsedAttr::AT_SpeculativeLoadHardening:
- handleSimpleAttributeWithExclusions<SpeculativeLoadHardeningAttr,
- NoSpeculativeLoadHardeningAttr>(S, D,
- AL);
- break;
- case ParsedAttr::AT_NoSpeculativeLoadHardening:
- handleSimpleAttributeWithExclusions<NoSpeculativeLoadHardeningAttr,
- SpeculativeLoadHardeningAttr>(S, D, AL);
- break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
break;
@@ -8016,9 +8140,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_ObjCDirect:
handleObjCDirectAttr(S, D, AL);
break;
- case ParsedAttr::AT_ObjCNonRuntimeProtocol:
- handleObjCNonRuntimeProtocolAttr(S, D, AL);
- break;
case ParsedAttr::AT_ObjCDirectMembers:
handleObjCDirectMembersAttr(S, D, AL);
handleSimpleAttribute<ObjCDirectMembersAttr>(S, D, AL);
@@ -8029,17 +8150,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Unused:
handleUnusedAttr(S, D, AL);
break;
- case ParsedAttr::AT_NotTailCalled:
- handleSimpleAttributeWithExclusions<NotTailCalledAttr, AlwaysInlineAttr>(
- S, D, AL);
- break;
- case ParsedAttr::AT_DisableTailCalls:
- handleSimpleAttributeWithExclusions<DisableTailCallsAttr, NakedAttr>(S, D,
- AL);
- break;
- case ParsedAttr::AT_NoMerge:
- handleSimpleAttribute<NoMergeAttr>(S, D, AL);
- break;
case ParsedAttr::AT_Visibility:
handleVisibilityAttr(S, D, AL, false);
break;
@@ -8089,6 +8199,7 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Pascal:
case ParsedAttr::AT_RegCall:
case ParsedAttr::AT_SwiftCall:
+ case ParsedAttr::AT_SwiftAsyncCall:
case ParsedAttr::AT_VectorCall:
case ParsedAttr::AT_MSABI:
case ParsedAttr::AT_SysVABI:
@@ -8115,6 +8226,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_SwiftContext:
S.AddParameterABIAttr(D, AL, ParameterABI::SwiftContext);
break;
+ case ParsedAttr::AT_SwiftAsyncContext:
+ S.AddParameterABIAttr(D, AL, ParameterABI::SwiftAsyncContext);
+ break;
case ParsedAttr::AT_SwiftErrorResult:
S.AddParameterABIAttr(D, AL, ParameterABI::SwiftErrorResult);
break;
@@ -8247,9 +8361,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_SwiftBridge:
handleSwiftBridge(S, D, AL);
break;
- case ParsedAttr::AT_SwiftBridgedTypedef:
- handleSimpleAttribute<SwiftBridgedTypedefAttr>(S, D, AL);
- break;
case ParsedAttr::AT_SwiftError:
handleSwiftError(S, D, AL);
break;
@@ -8259,15 +8370,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_SwiftNewType:
handleSwiftNewType(S, D, AL);
break;
- case ParsedAttr::AT_SwiftObjCMembers:
- handleSimpleAttribute<SwiftObjCMembersAttr>(S, D, AL);
- break;
- case ParsedAttr::AT_SwiftPrivate:
- handleSimpleAttribute<SwiftPrivateAttr>(S, D, AL);
- break;
case ParsedAttr::AT_SwiftAsync:
handleSwiftAsyncAttr(S, D, AL);
break;
+ case ParsedAttr::AT_SwiftAsyncError:
+ handleSwiftAsyncError(S, D, AL);
+ break;
// XRay attributes.
case ParsedAttr::AT_XRayLogArgs:
@@ -8287,10 +8395,6 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleUninitializedAttr(S, D, AL);
break;
- case ParsedAttr::AT_LoaderUninitialized:
- handleSimpleAttribute<LoaderUninitializedAttr>(S, D, AL);
- break;
-
case ParsedAttr::AT_ObjCExternallyRetained:
handleObjCExternallyRetainedAttr(S, D, AL);
break;
@@ -8326,6 +8430,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_EnforceTCBLeaf:
handleEnforceTCBAttr<EnforceTCBLeafAttr, EnforceTCBAttr>(S, D, AL);
break;
+
+ case ParsedAttr::AT_BuiltinAlias:
+ handleBuiltinAliasAttr(S, D, AL);
+ break;
+
+ case ParsedAttr::AT_UsingIfExists:
+ handleSimpleAttribute<UsingIfExistsAttr>(S, D, AL);
+ break;
}
}
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 8bfaa46162bc..83c97626ff7e 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -254,8 +254,7 @@ void Sema::ImplicitExceptionSpecification::CalledStmt(Stmt *S) {
ComputedEST = EST_None;
}
-ExprResult Sema::ConvertParamDefaultArgument(const ParmVarDecl *Param,
- Expr *Arg,
+ExprResult Sema::ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
SourceLocation EqualLoc) {
if (RequireCompleteType(Param->getLocation(), Param->getType(),
diag::err_typecheck_decl_incomplete_type))
@@ -316,7 +315,7 @@ Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
auto Fail = [&] {
Param->setInvalidDecl();
Param->setDefaultArg(new (Context) OpaqueValueExpr(
- EqualLoc, Param->getType().getNonReferenceType(), VK_RValue));
+ EqualLoc, Param->getType().getNonReferenceType(), VK_PRValue));
};
// Default arguments are only permitted in C++
@@ -381,10 +380,8 @@ void Sema::ActOnParamDefaultArgumentError(Decl *param,
ParmVarDecl *Param = cast<ParmVarDecl>(param);
Param->setInvalidDecl();
UnparsedDefaultArgLocs.erase(Param);
- Param->setDefaultArg(new(Context)
- OpaqueValueExpr(EqualLoc,
- Param->getType().getNonReferenceType(),
- VK_RValue));
+ Param->setDefaultArg(new (Context) OpaqueValueExpr(
+ EqualLoc, Param->getType().getNonReferenceType(), VK_PRValue));
}
/// CheckExtraCXXDefaultArguments - Check for any extra default
@@ -857,17 +854,25 @@ Sema::ActOnDecompositionDeclarator(Scope *S, Declarator &D,
Previous.clear();
}
+ auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, B.Name);
+
+ // Find the shadowed declaration before filtering for scope.
+ NamedDecl *ShadowedDecl = D.getCXXScopeSpec().isEmpty()
+ ? getShadowedDeclaration(BD, Previous)
+ : nullptr;
+
bool ConsiderLinkage = DC->isFunctionOrMethod() &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern;
FilterLookupForScope(Previous, DC, S, ConsiderLinkage,
/*AllowInlineNamespace*/false);
+
if (!Previous.empty()) {
auto *Old = Previous.getRepresentativeDecl();
Diag(B.NameLoc, diag::err_redefinition) << B.Name;
Diag(Old->getLocation(), diag::note_previous_definition);
+ } else if (ShadowedDecl && !D.isRedeclaration()) {
+ CheckShadow(BD, ShadowedDecl, Previous);
}
-
- auto *BD = BindingDecl::Create(Context, DC, B.NameLoc, B.Name);
PushOnScopeChains(BD, S, true);
Bindings.push_back(BD);
ParsingInitForAutoVars.insert(BD);
@@ -903,8 +908,8 @@ static bool checkSimpleDecomposition(
if ((int64_t)Bindings.size() != NumElems) {
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
<< DecompType << (unsigned)Bindings.size()
- << (unsigned)NumElems.getLimitedValue(UINT_MAX) << NumElems.toString(10)
- << (NumElems < Bindings.size());
+ << (unsigned)NumElems.getLimitedValue(UINT_MAX)
+ << toString(NumElems, 10) << (NumElems < Bindings.size());
return true;
}
@@ -969,15 +974,20 @@ static bool checkComplexDecomposition(Sema &S,
}
static std::string printTemplateArgs(const PrintingPolicy &PrintingPolicy,
- TemplateArgumentListInfo &Args) {
+ TemplateArgumentListInfo &Args,
+ const TemplateParameterList *Params) {
SmallString<128> SS;
llvm::raw_svector_ostream OS(SS);
bool First = true;
+ unsigned I = 0;
for (auto &Arg : Args.arguments()) {
if (!First)
OS << ", ";
- Arg.getArgument().print(PrintingPolicy, OS);
+ Arg.getArgument().print(
+ PrintingPolicy, OS,
+ TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
First = false;
+ I++;
}
return std::string(OS.str());
}
@@ -989,7 +999,7 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
auto DiagnoseMissing = [&] {
if (DiagID)
S.Diag(Loc, DiagID) << printTemplateArgs(S.Context.getPrintingPolicy(),
- Args);
+ Args, /*Params*/ nullptr);
return true;
};
@@ -1027,7 +1037,8 @@ static bool lookupStdTypeTraitMember(Sema &S, LookupResult &TraitMemberLookup,
if (DiagID)
S.RequireCompleteType(
Loc, TraitTy, DiagID,
- printTemplateArgs(S.Context.getPrintingPolicy(), Args));
+ printTemplateArgs(S.Context.getPrintingPolicy(), Args,
+ TraitTD->getTemplateParameters()));
return true;
}
@@ -1082,7 +1093,8 @@ static IsTupleLike isTupleLike(Sema &S, SourceLocation Loc, QualType T,
Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) override {
return S.Diag(Loc, diag::err_decomp_decl_std_tuple_size_not_constant)
- << printTemplateArgs(S.Context.getPrintingPolicy(), Args);
+ << printTemplateArgs(S.Context.getPrintingPolicy(), Args,
+ /*Params*/ nullptr);
}
} Diagnoser(R, Args);
@@ -1118,7 +1130,8 @@ static QualType getTupleLikeElementType(Sema &S, SourceLocation Loc,
if (!TD) {
R.suppressDiagnostics();
S.Diag(Loc, diag::err_decomp_decl_std_tuple_element_not_specialized)
- << printTemplateArgs(S.Context.getPrintingPolicy(), Args);
+ << printTemplateArgs(S.Context.getPrintingPolicy(), Args,
+ /*Params*/ nullptr);
if (!R.empty())
S.Diag(R.getRepresentativeDecl()->getLocation(), diag::note_declared_at);
return QualType();
@@ -1151,7 +1164,7 @@ static bool checkTupleLikeDecomposition(Sema &S,
S.Diag(Src->getLocation(), diag::err_decomp_decl_wrong_number_bindings)
<< DecompType << (unsigned)Bindings.size()
<< (unsigned)TupleSize.getLimitedValue(UINT_MAX)
- << TupleSize.toString(10) << (TupleSize < Bindings.size());
+ << toString(TupleSize, 10) << (TupleSize < Bindings.size());
return true;
}
@@ -1820,9 +1833,11 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
case Decl::UsingDirective:
case Decl::UnresolvedUsingTypename:
case Decl::UnresolvedUsingValue:
+ case Decl::UsingEnum:
// - static_assert-declarations
// - using-declarations,
// - using-directives,
+ // - using-enum-declaration
continue;
case Decl::Typedef:
@@ -2493,6 +2508,14 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
}
}
+ // Make sure that we don't make an ill-formed AST where the type of the
+ // Class is non-dependent and its attached base class specifier is an
+ // dependent type, which violates invariants in many clang code paths (e.g.
+ // constexpr evaluator). If this case happens (in errory-recovery mode), we
+ // explicitly mark the Class decl invalid. The diagnostic was already
+ // emitted.
+ if (!Class->getTypeForDecl()->isDependentType())
+ Class->setInvalidDecl();
return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
Class->getTagKind() == TTK_Class,
Access, TInfo, EllipsisLoc);
@@ -4122,13 +4145,9 @@ ValueDecl *Sema::tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
IdentifierInfo *MemberOrBase) {
if (SS.getScopeRep() || TemplateTypeTy)
return nullptr;
- DeclContext::lookup_result Result = ClassDecl->lookup(MemberOrBase);
- if (Result.empty())
- return nullptr;
- ValueDecl *Member;
- if ((Member = dyn_cast<FieldDecl>(Result.front())) ||
- (Member = dyn_cast<IndirectFieldDecl>(Result.front())))
- return Member;
+ for (auto *D : ClassDecl->lookup(MemberOrBase))
+ if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D))
+ return cast<ValueDecl>(D);
return nullptr;
}
@@ -5231,6 +5250,20 @@ static const void *GetKeyForMember(ASTContext &Context,
return Member->getAnyMember()->getCanonicalDecl();
}
+static void AddInitializerToDiag(const Sema::SemaDiagnosticBuilder &Diag,
+ const CXXCtorInitializer *Previous,
+ const CXXCtorInitializer *Current) {
+ if (Previous->isAnyMemberInitializer())
+ Diag << 0 << Previous->getAnyMember();
+ else
+ Diag << 1 << Previous->getTypeSourceInfo()->getType();
+
+ if (Current->isAnyMemberInitializer())
+ Diag << 0 << Current->getAnyMember();
+ else
+ Diag << 1 << Current->getTypeSourceInfo()->getType();
+}
+
static void DiagnoseBaseOrMemInitializerOrder(
Sema &SemaRef, const CXXConstructorDecl *Constructor,
ArrayRef<CXXCtorInitializer *> Inits) {
@@ -5280,10 +5313,15 @@ static void DiagnoseBaseOrMemInitializerOrder(
unsigned NumIdealInits = IdealInitKeys.size();
unsigned IdealIndex = 0;
- CXXCtorInitializer *PrevInit = nullptr;
+ // Track initializers that are in an incorrect order for either a warning or
+ // note if multiple ones occur.
+ SmallVector<unsigned> WarnIndexes;
+ // Correlates the index of an initializer in the init-list to the index of
+ // the field/base in the class.
+ SmallVector<std::pair<unsigned, unsigned>, 32> CorrelatedInitOrder;
+
for (unsigned InitIndex = 0; InitIndex != Inits.size(); ++InitIndex) {
- CXXCtorInitializer *Init = Inits[InitIndex];
- const void *InitKey = GetKeyForMember(SemaRef.Context, Init);
+ const void *InitKey = GetKeyForMember(SemaRef.Context, Inits[InitIndex]);
// Scan forward to try to find this initializer in the idealized
// initializers list.
@@ -5294,20 +5332,8 @@ static void DiagnoseBaseOrMemInitializerOrder(
// If we didn't find this initializer, it must be because we
// scanned past it on a previous iteration. That can only
// happen if we're out of order; emit a warning.
- if (IdealIndex == NumIdealInits && PrevInit) {
- Sema::SemaDiagnosticBuilder D =
- SemaRef.Diag(PrevInit->getSourceLocation(),
- diag::warn_initializer_out_of_order);
-
- if (PrevInit->isAnyMemberInitializer())
- D << 0 << PrevInit->getAnyMember()->getDeclName();
- else
- D << 1 << PrevInit->getTypeSourceInfo()->getType();
-
- if (Init->isAnyMemberInitializer())
- D << 0 << Init->getAnyMember()->getDeclName();
- else
- D << 1 << Init->getTypeSourceInfo()->getType();
+ if (IdealIndex == NumIdealInits && InitIndex) {
+ WarnIndexes.push_back(InitIndex);
// Move back to the initializer's location in the ideal list.
for (IdealIndex = 0; IdealIndex != NumIdealInits; ++IdealIndex)
@@ -5317,8 +5343,54 @@ static void DiagnoseBaseOrMemInitializerOrder(
assert(IdealIndex < NumIdealInits &&
"initializer not found in initializer list");
}
+ CorrelatedInitOrder.emplace_back(IdealIndex, InitIndex);
+ }
- PrevInit = Init;
+ if (WarnIndexes.empty())
+ return;
+
+ // Sort based on the ideal order, first in the pair.
+ llvm::sort(CorrelatedInitOrder,
+ [](auto &LHS, auto &RHS) { return LHS.first < RHS.first; });
+
+ // Introduce a new scope as SemaDiagnosticBuilder needs to be destroyed to
+ // emit the diagnostic before we can try adding notes.
+ {
+ Sema::SemaDiagnosticBuilder D = SemaRef.Diag(
+ Inits[WarnIndexes.front() - 1]->getSourceLocation(),
+ WarnIndexes.size() == 1 ? diag::warn_initializer_out_of_order
+ : diag::warn_some_initializers_out_of_order);
+
+ for (unsigned I = 0; I < CorrelatedInitOrder.size(); ++I) {
+ if (CorrelatedInitOrder[I].second == I)
+ continue;
+ // Ideally we would be using InsertFromRange here, but clang doesn't
+ // appear to handle InsertFromRange correctly when the source range is
+ // modified by another fix-it.
+ D << FixItHint::CreateReplacement(
+ Inits[I]->getSourceRange(),
+ Lexer::getSourceText(
+ CharSourceRange::getTokenRange(
+ Inits[CorrelatedInitOrder[I].second]->getSourceRange()),
+ SemaRef.getSourceManager(), SemaRef.getLangOpts()));
+ }
+
+ // If there is only 1 item out of order, the warning expects the name and
+ // type of each being added to it.
+ if (WarnIndexes.size() == 1) {
+ AddInitializerToDiag(D, Inits[WarnIndexes.front() - 1],
+ Inits[WarnIndexes.front()]);
+ return;
+ }
+ }
+ // More than 1 item to warn, create notes letting the user know which ones
+ // are bad.
+ for (unsigned WarnIndex : WarnIndexes) {
+ const clang::CXXCtorInitializer *PrevInit = Inits[WarnIndex - 1];
+ auto D = SemaRef.Diag(PrevInit->getSourceLocation(),
+ diag::note_initializer_out_of_order);
+ AddInitializerToDiag(D, PrevInit, Inits[WarnIndex]);
+ D << PrevInit->getSourceRange();
}
}
@@ -5386,7 +5458,7 @@ bool CheckRedundantUnionInit(Sema &S,
return false;
}
-}
+} // namespace
/// ActOnMemInitializers - Handle the member initializers for a constructor.
void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
@@ -6928,7 +7000,7 @@ public:
: S(S), UseLoc(UseLoc) {
bool DiagnosedMultipleConstructedBases = false;
CXXRecordDecl *ConstructedBase = nullptr;
- UsingDecl *ConstructedBaseUsing = nullptr;
+ BaseUsingDecl *ConstructedBaseIntroducer = nullptr;
// Find the set of such base class subobjects and check that there's a
// unique constructed subobject.
@@ -6952,18 +7024,18 @@ public:
// of type B, the program is ill-formed.
if (!ConstructedBase) {
ConstructedBase = DConstructedBase;
- ConstructedBaseUsing = D->getUsingDecl();
+ ConstructedBaseIntroducer = D->getIntroducer();
} else if (ConstructedBase != DConstructedBase &&
!Shadow->isInvalidDecl()) {
if (!DiagnosedMultipleConstructedBases) {
S.Diag(UseLoc, diag::err_ambiguous_inherited_constructor)
<< Shadow->getTargetDecl();
- S.Diag(ConstructedBaseUsing->getLocation(),
- diag::note_ambiguous_inherited_constructor_using)
+ S.Diag(ConstructedBaseIntroducer->getLocation(),
+ diag::note_ambiguous_inherited_constructor_using)
<< ConstructedBase;
DiagnosedMultipleConstructedBases = true;
}
- S.Diag(D->getUsingDecl()->getLocation(),
+ S.Diag(D->getIntroducer()->getLocation(),
diag::note_ambiguous_inherited_constructor_using)
<< DConstructedBase;
}
@@ -7220,6 +7292,9 @@ void Sema::CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *FD) {
return;
}
+ if (DefKind.isComparison())
+ UnusedPrivateFields.clear();
+
if (DefKind.isSpecialMember()
? CheckExplicitlyDefaultedSpecialMember(cast<CXXMethodDecl>(FD),
DefKind.asSpecialMember())
@@ -7624,7 +7699,7 @@ public:
private:
Subobject getCompleteObject() {
- return Subobject{Subobject::CompleteObject, nullptr, FD->getLocation()};
+ return Subobject{Subobject::CompleteObject, RD, FD->getLocation()};
}
Subobject getBase(CXXBaseSpecifier *Base) {
@@ -7677,12 +7752,11 @@ private:
if (Args[0]->getType()->isOverloadableType())
S.LookupOverloadedBinOp(CandidateSet, OO, Fns, Args);
- else {
+ else
// FIXME: We determine whether this is a valid expression by checking to
// see if there's a viable builtin operator candidate for it. That isn't
// really what the rules ask us to do, but should give the right results.
S.AddBuiltinOperatorCandidates(OO, FD->getLocation(), Args, CandidateSet);
- }
Result R;
@@ -7726,11 +7800,14 @@ private:
return Result::deleted();
}
- // C++2a [class.compare.default]p3 [P2002R0]:
- // A defaulted comparison function is constexpr-compatible if [...]
- // no overlod resolution performed [...] results in a non-constexpr
- // function.
+ bool NeedsDeducing =
+ OO == OO_Spaceship && FD->getReturnType()->isUndeducedAutoType();
+
if (FunctionDecl *BestFD = Best->Function) {
+ // C++2a [class.compare.default]p3 [P2002R0]:
+ // A defaulted comparison function is constexpr-compatible if
+ // [...] no overlod resolution performed [...] results in a
+ // non-constexpr function.
assert(!BestFD->isDeleted() && "wrong overload resolution result");
// If it's not constexpr, explain why not.
if (Diagnose == ExplainConstexpr && !BestFD->isConstexpr()) {
@@ -7743,10 +7820,8 @@ private:
return Result::deleted();
}
R.Constexpr &= BestFD->isConstexpr();
- }
- if (OO == OO_Spaceship && FD->getReturnType()->isUndeducedAutoType()) {
- if (auto *BestFD = Best->Function) {
+ if (NeedsDeducing) {
// If any callee has an undeduced return type, deduce it now.
// FIXME: It's not clear how a failure here should be handled. For
// now, we produce an eager diagnostic, because that is forward
@@ -7772,10 +7847,9 @@ private:
}
return Result::deleted();
}
- if (auto *Info = S.Context.CompCategories.lookupInfoForType(
- BestFD->getCallResultType())) {
- R.Category = Info->Kind;
- } else {
+ auto *Info = S.Context.CompCategories.lookupInfoForType(
+ BestFD->getCallResultType());
+ if (!Info) {
if (Diagnose == ExplainDeleted) {
S.Diag(Subobj.Loc, diag::note_defaulted_comparison_cannot_deduce)
<< Subobj.Kind << Subobj.Decl
@@ -7786,9 +7860,18 @@ private:
}
return Result::deleted();
}
- } else {
+ R.Category = Info->Kind;
+ }
+ } else {
+ QualType T = Best->BuiltinParamTypes[0];
+ assert(T == Best->BuiltinParamTypes[1] &&
+ "builtin comparison for different types?");
+ assert(Best->BuiltinParamTypes[2].isNull() &&
+ "invalid builtin comparison");
+
+ if (NeedsDeducing) {
Optional<ComparisonCategoryType> Cat =
- getComparisonCategoryForBuiltinCmp(Args[0]->getType());
+ getComparisonCategoryForBuiltinCmp(T);
assert(Cat && "no category for builtin comparison?");
R.Category = *Cat;
}
@@ -8249,7 +8332,7 @@ private:
assert(!R->isUndeducedType() && "type should have been deduced already");
// Don't bother forming a no-op cast in the common case.
- if (E->isRValue() && S.Context.hasSameType(E->getType(), R))
+ if (E->isPRValue() && S.Context.hasSameType(E->getType(), R))
return E;
return S.BuildCXXNamedCast(Loc, tok::kw_static_cast,
S.Context.getTrivialTypeSourceInfo(R, Loc), E,
@@ -9660,9 +9743,9 @@ public:
bool foundSameNameMethod = false;
SmallVector<CXXMethodDecl *, 8> overloadedMethods;
- for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- NamedDecl *D = Path.Decls.front();
+ for (Path.Decls = BaseRecord->lookup(Name).begin();
+ Path.Decls != DeclContext::lookup_iterator(); ++Path.Decls) {
+ NamedDecl *D = *Path.Decls;
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
MD = MD->getCanonicalDecl();
foundSameNameMethod = true;
@@ -10814,26 +10897,6 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
NamespaceDecl *PrevNS) {
assert(*IsInline != PrevNS->isInline());
- // HACK: Work around a bug in libstdc++4.6's <atomic>, where
- // std::__atomic[0,1,2] are defined as non-inline namespaces, then reopened as
- // inline namespaces, with the intention of bringing names into namespace std.
- //
- // We support this just well enough to get that case working; this is not
- // sufficient to support reopening namespaces as inline in general.
- if (*IsInline && II && II->getName().startswith("__atomic") &&
- S.getSourceManager().isInSystemHeader(Loc)) {
- // Mark all prior declarations of the namespace as inline.
- for (NamespaceDecl *NS = PrevNS->getMostRecentDecl(); NS;
- NS = NS->getPreviousDecl())
- NS->setInline(*IsInline);
- // Patch up the lookup table for the containing namespace. This isn't really
- // correct, but it's good enough for this particular case.
- for (auto *I : PrevNS->decls())
- if (auto *ND = dyn_cast<NamedDecl>(I))
- PrevNS->getParent()->makeDeclVisibleInContext(ND);
- return;
- }
-
if (PrevNS->isInline())
// The user probably just forgot the 'inline', so suggest that it
// be added back.
@@ -11551,7 +11614,41 @@ Decl *Sema::ActOnUsingDeclaration(Scope *S, AccessSpecifier AS,
NamedDecl *UD =
BuildUsingDeclaration(S, AS, UsingLoc, TypenameLoc.isValid(), TypenameLoc,
SS, TargetNameInfo, EllipsisLoc, AttrList,
- /*IsInstantiation*/false);
+ /*IsInstantiation*/ false,
+ AttrList.hasAttribute(ParsedAttr::AT_UsingIfExists));
+ if (UD)
+ PushOnScopeChains(UD, S, /*AddToContext*/ false);
+
+ return UD;
+}
+
+Decl *Sema::ActOnUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ SourceLocation EnumLoc,
+ const DeclSpec &DS) {
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_error:
+ // This will already have been diagnosed
+ return nullptr;
+
+ case DeclSpec::TST_enum:
+ break;
+
+ case DeclSpec::TST_typename:
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_using_enum_is_dependent);
+ return nullptr;
+
+ default:
+ llvm_unreachable("unexpected DeclSpec type");
+ }
+
+ // As with enum-decls, we ignore attributes for now.
+ auto *Enum = cast<EnumDecl>(DS.getRepAsDecl());
+ if (auto *Def = Enum->getDefinition())
+ Enum = Def;
+
+ auto *UD = BuildUsingEnumDeclaration(S, AS, UsingLoc, EnumLoc,
+ DS.getTypeSpecTypeNameLoc(), Enum);
if (UD)
PushOnScopeChains(UD, S, /*AddToContext*/ false);
@@ -11571,13 +11668,19 @@ IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2) {
return Context.hasSameType(TD1->getUnderlyingType(),
TD2->getUnderlyingType());
+ // Two using_if_exists using-declarations are equivalent if both are
+ // unresolved.
+ if (isa<UnresolvedUsingIfExistsDecl>(D1) &&
+ isa<UnresolvedUsingIfExistsDecl>(D2))
+ return true;
+
return false;
}
/// Determines whether to create a using shadow decl for a particular
/// decl, given the set of decls existing prior to this using lookup.
-bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
+bool Sema::CheckUsingShadowDecl(BaseUsingDecl *BUD, NamedDecl *Orig,
const LookupResult &Previous,
UsingShadowDecl *&PrevShadow) {
// Diagnose finding a decl which is not from a base class of the
@@ -11596,38 +11699,39 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// This is invalid (during instantiation) in C++03 because B::foo
// resolves to the using decl in B, which is not a base class of D<T>.
// We can't diagnose it immediately because C<T> is an unknown
- // specialization. The UsingShadowDecl in D<T> then points directly
+ // specialization. The UsingShadowDecl in D<T> then points directly
// to A::foo, which will look well-formed when we instantiate.
// The right solution is to not collapse the shadow-decl chain.
- if (!getLangOpts().CPlusPlus11 && CurContext->isRecord()) {
- DeclContext *OrigDC = Orig->getDeclContext();
-
- // Handle enums and anonymous structs.
- if (isa<EnumDecl>(OrigDC)) OrigDC = OrigDC->getParent();
- CXXRecordDecl *OrigRec = cast<CXXRecordDecl>(OrigDC);
- while (OrigRec->isAnonymousStructOrUnion())
- OrigRec = cast<CXXRecordDecl>(OrigRec->getDeclContext());
-
- if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(OrigRec)) {
- if (OrigDC == CurContext) {
- Diag(Using->getLocation(),
- diag::err_using_decl_nested_name_specifier_is_current_class)
- << Using->getQualifierLoc().getSourceRange();
+ if (!getLangOpts().CPlusPlus11 && CurContext->isRecord())
+ if (auto *Using = dyn_cast<UsingDecl>(BUD)) {
+ DeclContext *OrigDC = Orig->getDeclContext();
+
+ // Handle enums and anonymous structs.
+ if (isa<EnumDecl>(OrigDC))
+ OrigDC = OrigDC->getParent();
+ CXXRecordDecl *OrigRec = cast<CXXRecordDecl>(OrigDC);
+ while (OrigRec->isAnonymousStructOrUnion())
+ OrigRec = cast<CXXRecordDecl>(OrigRec->getDeclContext());
+
+ if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(OrigRec)) {
+ if (OrigDC == CurContext) {
+ Diag(Using->getLocation(),
+ diag::err_using_decl_nested_name_specifier_is_current_class)
+ << Using->getQualifierLoc().getSourceRange();
+ Diag(Orig->getLocation(), diag::note_using_decl_target);
+ Using->setInvalidDecl();
+ return true;
+ }
+
+ Diag(Using->getQualifierLoc().getBeginLoc(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << Using->getQualifier() << cast<CXXRecordDecl>(CurContext)
+ << Using->getQualifierLoc().getSourceRange();
Diag(Orig->getLocation(), diag::note_using_decl_target);
Using->setInvalidDecl();
return true;
}
-
- Diag(Using->getQualifierLoc().getBeginLoc(),
- diag::err_using_decl_nested_name_specifier_is_not_base_class)
- << Using->getQualifier()
- << cast<CXXRecordDecl>(CurContext)
- << Using->getQualifierLoc().getSourceRange();
- Diag(Orig->getLocation(), diag::note_using_decl_target);
- Using->setInvalidDecl();
- return true;
}
- }
if (Previous.empty()) return false;
@@ -11648,7 +11752,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// We can have UsingDecls in our Previous results because we use the same
// LookupResult for checking whether the UsingDecl itself is a valid
// redeclaration.
- if (isa<UsingDecl>(D) || isa<UsingPackDecl>(D))
+ if (isa<UsingDecl>(D) || isa<UsingPackDecl>(D) || isa<UsingEnumDecl>(D))
continue;
if (auto *RD = dyn_cast<CXXRecordDecl>(D)) {
@@ -11660,7 +11764,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
!isa<UnresolvedUsingValueDecl>(Target) &&
DiagnoseClassNameShadow(
CurContext,
- DeclarationNameInfo(Using->getDeclName(), Using->getLocation())))
+ DeclarationNameInfo(BUD->getDeclName(), BUD->getLocation())))
return true;
}
@@ -11681,6 +11785,20 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
if (FoundEquivalentDecl)
return false;
+ // Always emit a diagnostic for a mismatch between an unresolved
+ // using_if_exists and a resolved using declaration in either direction.
+ if (isa<UnresolvedUsingIfExistsDecl>(Target) !=
+ (isa_and_nonnull<UnresolvedUsingIfExistsDecl>(NonTag))) {
+ if (!NonTag && !Tag)
+ return false;
+ Diag(BUD->getLocation(), diag::err_using_decl_conflict);
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag((NonTag ? NonTag : Tag)->getLocation(),
+ diag::note_using_decl_conflict);
+ BUD->setInvalidDecl();
+ return true;
+ }
+
if (FunctionDecl *FD = Target->getAsFunction()) {
NamedDecl *OldDecl = nullptr;
switch (CheckOverload(nullptr, FD, Previous, OldDecl,
@@ -11689,7 +11807,7 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
return false;
case Ovl_NonFunction:
- Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(BUD->getLocation(), diag::err_using_decl_conflict);
break;
// We found a decl with the exact signature.
@@ -11701,13 +11819,13 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
return true;
// If we're not in a record, this is an error.
- Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(BUD->getLocation(), diag::err_using_decl_conflict);
break;
}
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(OldDecl->getLocation(), diag::note_using_decl_conflict);
- Using->setInvalidDecl();
+ BUD->setInvalidDecl();
return true;
}
@@ -11717,20 +11835,20 @@ bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
// No conflict between a tag and a non-tag.
if (!Tag) return false;
- Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(BUD->getLocation(), diag::err_using_decl_conflict);
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(Tag->getLocation(), diag::note_using_decl_conflict);
- Using->setInvalidDecl();
+ BUD->setInvalidDecl();
return true;
}
// No conflict between a tag and a non-tag.
if (!NonTag) return false;
- Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(BUD->getLocation(), diag::err_using_decl_conflict);
Diag(Target->getLocation(), diag::note_using_decl_target);
Diag(NonTag->getLocation(), diag::note_using_decl_conflict);
- Using->setInvalidDecl();
+ BUD->setInvalidDecl();
return true;
}
@@ -11745,8 +11863,7 @@ static bool isVirtualDirectBase(CXXRecordDecl *Derived, CXXRecordDecl *Base) {
}
/// Builds a shadow declaration corresponding to a 'using' declaration.
-UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
- UsingDecl *UD,
+UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S, BaseUsingDecl *BUD,
NamedDecl *Orig,
UsingShadowDecl *PrevDecl) {
// If we resolved to another shadow declaration, just coalesce them.
@@ -11762,19 +11879,20 @@ UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
UsingShadowDecl *Shadow;
if (NonTemplateTarget && isa<CXXConstructorDecl>(NonTemplateTarget)) {
+ UsingDecl *Using = cast<UsingDecl>(BUD);
bool IsVirtualBase =
isVirtualDirectBase(cast<CXXRecordDecl>(CurContext),
- UD->getQualifier()->getAsRecordDecl());
+ Using->getQualifier()->getAsRecordDecl());
Shadow = ConstructorUsingShadowDecl::Create(
- Context, CurContext, UD->getLocation(), UD, Orig, IsVirtualBase);
+ Context, CurContext, Using->getLocation(), Using, Orig, IsVirtualBase);
} else {
- Shadow = UsingShadowDecl::Create(Context, CurContext, UD->getLocation(), UD,
- Target);
+ Shadow = UsingShadowDecl::Create(Context, CurContext, BUD->getLocation(),
+ Target->getDeclName(), BUD, Target);
}
- UD->addShadowDecl(Shadow);
+ BUD->addShadowDecl(Shadow);
- Shadow->setAccess(UD->getAccess());
- if (Orig->isInvalidDecl() || UD->isInvalidDecl())
+ Shadow->setAccess(BUD->getAccess());
+ if (Orig->isInvalidDecl() || BUD->isInvalidDecl())
Shadow->setInvalidDecl();
Shadow->setPreviousDecl(PrevDecl);
@@ -11830,7 +11948,7 @@ void Sema::HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow) {
}
// ...and the using decl.
- Shadow->getUsingDecl()->removeShadowDecl(Shadow);
+ Shadow->getIntroducer()->removeShadowDecl(Shadow);
// TODO: complain somehow if Shadow was used. It shouldn't
// be possible for this to happen, because...?
@@ -11936,6 +12054,29 @@ private:
};
} // end anonymous namespace
+/// Remove decls we can't actually see from a lookup being used to declare
+/// shadow using decls.
+///
+/// \param S - The scope of the potential shadow decl
+/// \param Previous - The lookup of a potential shadow decl's name.
+void Sema::FilterUsingLookup(Scope *S, LookupResult &Previous) {
+ // It is really dumb that we have to do this.
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!isDeclInScope(D, CurContext, S))
+ F.erase();
+ // If we found a local extern declaration that's not ordinarily visible,
+ // and this declaration is being added to a non-block scope, ignore it.
+ // We're only checking for scope conflicts here, not also for violations
+ // of the linkage rules.
+ else if (!CurContext->isFunctionOrMethod() && D->isLocalExternDecl() &&
+ !(D->getIdentifierNamespace() & Decl::IDNS_Ordinary))
+ F.erase();
+ }
+ F.done();
+}
+
/// Builds a using declaration.
///
/// \param IsInstantiation - Whether this call arises from an
@@ -11945,7 +12086,8 @@ NamedDecl *Sema::BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
- const ParsedAttributesView &AttrList, bool IsInstantiation) {
+ const ParsedAttributesView &AttrList, bool IsInstantiation,
+ bool IsUsingIfExists) {
assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
SourceLocation IdentLoc = NameInfo.getLoc();
assert(IdentLoc.isValid() && "Invalid TargetName location.");
@@ -11968,21 +12110,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
if (S) {
LookupName(Previous, S);
- // It is really dumb that we have to do this.
- LookupResult::Filter F = Previous.makeFilter();
- while (F.hasNext()) {
- NamedDecl *D = F.next();
- if (!isDeclInScope(D, CurContext, S))
- F.erase();
- // If we found a local extern declaration that's not ordinarily visible,
- // and this declaration is being added to a non-block scope, ignore it.
- // We're only checking for scope conflicts here, not also for violations
- // of the linkage rules.
- else if (!CurContext->isFunctionOrMethod() && D->isLocalExternDecl() &&
- !(D->getIdentifierNamespace() & Decl::IDNS_Ordinary))
- F.erase();
- }
- F.done();
+ FilterUsingLookup(S, Previous);
} else {
assert(IsInstantiation && "no scope in non-instantiation");
if (CurContext->isRecord())
@@ -12009,15 +12137,22 @@ NamedDecl *Sema::BuildUsingDeclaration(
SS, IdentLoc, Previous))
return nullptr;
- // Check for bad qualifiers.
- if (CheckUsingDeclQualifier(UsingLoc, HasTypenameKeyword, SS, NameInfo,
- IdentLoc))
+ // 'using_if_exists' doesn't make sense on an inherited constructor.
+ if (IsUsingIfExists && UsingName.getName().getNameKind() ==
+ DeclarationName::CXXConstructorName) {
+ Diag(UsingLoc, diag::err_using_if_exists_on_ctor);
return nullptr;
+ }
DeclContext *LookupContext = computeDeclContext(SS);
- NamedDecl *D;
NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
if (!LookupContext || EllipsisLoc.isValid()) {
+ NamedDecl *D;
+ // Dependent scope, or an unexpanded pack
+ if (!LookupContext && CheckUsingDeclQualifier(UsingLoc, HasTypenameKeyword,
+ SS, NameInfo, IdentLoc))
+ return nullptr;
+
if (HasTypenameKeyword) {
// FIXME: not all declaration name kinds are legal here
D = UnresolvedUsingTypenameDecl::Create(Context, CurContext,
@@ -12031,6 +12166,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
}
D->setAccess(AS);
CurContext->addDecl(D);
+ ProcessDeclAttributeList(S, D, AttrList);
return D;
}
@@ -12040,6 +12176,7 @@ NamedDecl *Sema::BuildUsingDeclaration(
UsingName, HasTypenameKeyword);
UD->setAccess(AS);
CurContext->addDecl(UD);
+ ProcessDeclAttributeList(S, UD, AttrList);
UD->setInvalidDecl(Invalid);
return UD;
};
@@ -12068,16 +12205,25 @@ NamedDecl *Sema::BuildUsingDeclaration(
LookupQualifiedName(R, LookupContext);
+ // Validate the context, now we have a lookup
+ if (CheckUsingDeclQualifier(UsingLoc, HasTypenameKeyword, SS, NameInfo,
+ IdentLoc, &R))
+ return nullptr;
+
+ if (R.empty() && IsUsingIfExists)
+ R.addDecl(UnresolvedUsingIfExistsDecl::Create(Context, CurContext, UsingLoc,
+ UsingName.getName()),
+ AS_public);
+
// Try to correct typos if possible. If constructor name lookup finds no
// results, that means the named class has no explicit constructors, and we
// suppressed declaring implicit ones (probably because it's dependent or
// invalid).
if (R.empty() &&
NameInfo.getName().getNameKind() != DeclarationName::CXXConstructorName) {
- // HACK: Work around a bug in libstdc++'s detection of ::gets. Sometimes
- // it will believe that glibc provides a ::gets in cases where it does not,
- // and will try to pull it into namespace std with a using-declaration.
- // Just ignore the using-declaration in that case.
+ // HACK 2017-01-08: Work around an issue with libstdc++'s detection of
+ // ::gets. Sometimes it believes that glibc provides a ::gets in cases where
+ // it does not. The issue was fixed in libstdc++ 6.3 (2016-12-21) and later.
auto *II = NameInfo.getName().getAsIdentifierInfo();
if (getLangOpts().CPlusPlus14 && II && II->isStr("gets") &&
CurContext->isStdNamespace() &&
@@ -12142,7 +12288,8 @@ NamedDecl *Sema::BuildUsingDeclaration(
if (HasTypenameKeyword) {
// If we asked for a typename and got a non-type decl, error out.
- if (!R.getAsSingle<TypeDecl>()) {
+ if (!R.getAsSingle<TypeDecl>() &&
+ !R.getAsSingle<UnresolvedUsingIfExistsDecl>()) {
Diag(IdentLoc, diag::err_using_typename_non_type);
for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
Diag((*I)->getUnderlyingDecl()->getLocation(),
@@ -12168,16 +12315,6 @@ NamedDecl *Sema::BuildUsingDeclaration(
return BuildInvalid();
}
- // C++14 [namespace.udecl]p7:
- // A using-declaration shall not name a scoped enumerator.
- if (auto *ED = R.getAsSingle<EnumConstantDecl>()) {
- if (cast<EnumDecl>(ED->getDeclContext())->isScoped()) {
- Diag(IdentLoc, diag::err_using_decl_can_not_refer_to_scoped_enum)
- << SS.getRange();
- return BuildInvalid();
- }
- }
-
UsingDecl *UD = BuildValid();
// Some additional rules apply to inheriting constructors.
@@ -12199,6 +12336,61 @@ NamedDecl *Sema::BuildUsingDeclaration(
return UD;
}
+NamedDecl *Sema::BuildUsingEnumDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ SourceLocation EnumLoc,
+ SourceLocation NameLoc,
+ EnumDecl *ED) {
+ bool Invalid = false;
+
+ if (CurContext->getRedeclContext()->isRecord()) {
+ /// In class scope, check if this is a duplicate, for better a diagnostic.
+ DeclarationNameInfo UsingEnumName(ED->getDeclName(), NameLoc);
+ LookupResult Previous(*this, UsingEnumName, LookupUsingDeclName,
+ ForVisibleRedeclaration);
+
+ LookupName(Previous, S);
+
+ for (NamedDecl *D : Previous)
+ if (UsingEnumDecl *UED = dyn_cast<UsingEnumDecl>(D))
+ if (UED->getEnumDecl() == ED) {
+ Diag(UsingLoc, diag::err_using_enum_decl_redeclaration)
+ << SourceRange(EnumLoc, NameLoc);
+ Diag(D->getLocation(), diag::note_using_enum_decl) << 1;
+ Invalid = true;
+ break;
+ }
+ }
+
+ if (RequireCompleteEnumDecl(ED, NameLoc))
+ Invalid = true;
+
+ UsingEnumDecl *UD = UsingEnumDecl::Create(Context, CurContext, UsingLoc,
+ EnumLoc, NameLoc, ED);
+ UD->setAccess(AS);
+ CurContext->addDecl(UD);
+
+ if (Invalid) {
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ // Create the shadow decls for each enumerator
+ for (EnumConstantDecl *EC : ED->enumerators()) {
+ UsingShadowDecl *PrevDecl = nullptr;
+ DeclarationNameInfo DNI(EC->getDeclName(), EC->getLocation());
+ LookupResult Previous(*this, DNI, LookupOrdinaryName,
+ ForVisibleRedeclaration);
+ LookupName(Previous, S);
+ FilterUsingLookup(S, Previous);
+
+ if (!CheckUsingShadowDecl(UD, EC, Previous, PrevDecl))
+ BuildUsingShadowDecl(S, UD, EC, PrevDecl);
+ }
+
+ return UD;
+}
+
NamedDecl *Sema::BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions) {
assert(isa<UnresolvedUsingValueDecl>(InstantiatedFrom) ||
@@ -12317,48 +12509,83 @@ bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
return false;
}
-
/// Checks that the given nested-name qualifier used in a using decl
/// in the current context is appropriately related to the current
/// scope. If an error is found, diagnoses it and returns true.
-bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
- bool HasTypename,
+/// R is nullptr, if the caller has not (yet) done a lookup, otherwise it's the
+/// result of that lookup. UD is likewise nullptr, except when we have an
+/// already-populated UsingDecl whose shadow decls contain the same information
+/// (i.e. we're instantiating a UsingDecl with non-dependent scope).
+bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
- SourceLocation NameLoc) {
+ SourceLocation NameLoc,
+ const LookupResult *R, const UsingDecl *UD) {
DeclContext *NamedContext = computeDeclContext(SS);
+ assert(bool(NamedContext) == (R || UD) && !(R && UD) &&
+ "resolvable context must have exactly one set of decls");
+
+ // C++ 20 permits using an enumerator that does not have a class-hierarchy
+ // relationship.
+ bool Cxx20Enumerator = false;
+ if (NamedContext) {
+ EnumConstantDecl *EC = nullptr;
+ if (R)
+ EC = R->getAsSingle<EnumConstantDecl>();
+ else if (UD && UD->shadow_size() == 1)
+ EC = dyn_cast<EnumConstantDecl>(UD->shadow_begin()->getTargetDecl());
+ if (EC)
+ Cxx20Enumerator = getLangOpts().CPlusPlus20;
+
+ if (auto *ED = dyn_cast<EnumDecl>(NamedContext)) {
+ // C++14 [namespace.udecl]p7:
+ // A using-declaration shall not name a scoped enumerator.
+ // C++20 p1099 permits enumerators.
+ if (EC && R && ED->isScoped())
+ Diag(SS.getBeginLoc(),
+ getLangOpts().CPlusPlus20
+ ? diag::warn_cxx17_compat_using_decl_scoped_enumerator
+ : diag::ext_using_decl_scoped_enumerator)
+ << SS.getRange();
+
+ // We want to consider the scope of the enumerator
+ NamedContext = ED->getDeclContext();
+ }
+ }
if (!CurContext->isRecord()) {
// C++03 [namespace.udecl]p3:
// C++0x [namespace.udecl]p8:
// A using-declaration for a class member shall be a member-declaration.
+ // C++20 [namespace.udecl]p7
+ // ... other than an enumerator ...
// If we weren't able to compute a valid scope, it might validly be a
- // dependent class scope or a dependent enumeration unscoped scope. If
- // we have a 'typename' keyword, the scope must resolve to a class type.
- if ((HasTypename && !NamedContext) ||
- (NamedContext && NamedContext->getRedeclContext()->isRecord())) {
- auto *RD = NamedContext
- ? cast<CXXRecordDecl>(NamedContext->getRedeclContext())
- : nullptr;
- if (RD && RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), RD))
- RD = nullptr;
-
- Diag(NameLoc, diag::err_using_decl_can_not_refer_to_class_member)
+ // dependent class or enumeration scope. If we have a 'typename' keyword,
+ // the scope must resolve to a class type.
+ if (NamedContext ? !NamedContext->getRedeclContext()->isRecord()
+ : !HasTypename)
+ return false; // OK
+
+ Diag(NameLoc,
+ Cxx20Enumerator
+ ? diag::warn_cxx17_compat_using_decl_class_member_enumerator
+ : diag::err_using_decl_can_not_refer_to_class_member)
<< SS.getRange();
- // If we have a complete, non-dependent source type, try to suggest a
- // way to get the same effect.
- if (!RD)
- return true;
+ if (Cxx20Enumerator)
+ return false; // OK
- // Find what this using-declaration was referring to.
- LookupResult R(*this, NameInfo, LookupOrdinaryName);
- R.setHideTags(false);
- R.suppressDiagnostics();
- LookupQualifiedName(R, RD);
+ auto *RD = NamedContext
+ ? cast<CXXRecordDecl>(NamedContext->getRedeclContext())
+ : nullptr;
+ if (RD && !RequireCompleteDeclContext(const_cast<CXXScopeSpec &>(SS), RD)) {
+ // See if there's a helpful fixit
- if (R.getAsSingle<TypeDecl>()) {
+ if (!R) {
+ // We will have already diagnosed the problem on the template
+ // definition, Maybe we should do so again?
+ } else if (R->getAsSingle<TypeDecl>()) {
if (getLangOpts().CPlusPlus11) {
// Convert 'using X::Y;' to 'using Y = X::Y;'.
Diag(SS.getBeginLoc(), diag::note_using_decl_class_member_workaround)
@@ -12375,7 +12602,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
<< FixItHint::CreateInsertion(
InsertLoc, " " + NameInfo.getName().getAsString());
}
- } else if (R.getAsSingle<VarDecl>()) {
+ } else if (R->getAsSingle<VarDecl>()) {
// Don't provide a fixit outside C++11 mode; we don't want to suggest
// repeating the type of the static data member here.
FixItHint FixIt;
@@ -12388,7 +12615,7 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
Diag(UsingLoc, diag::note_using_decl_class_member_workaround)
<< 2 // reference declaration
<< FixIt;
- } else if (R.getAsSingle<EnumConstantDecl>()) {
+ } else if (R->getAsSingle<EnumConstantDecl>()) {
// Don't provide a fixit outside C++11 mode; we don't want to suggest
// repeating the type of the enumeration here, and we can't do so if
// the type is anonymous.
@@ -12404,15 +12631,11 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
<< (getLangOpts().CPlusPlus11 ? 4 : 3) // const[expr] variable
<< FixIt;
}
- return true;
}
- // Otherwise, this might be valid.
- return false;
+ return true; // Fail
}
- // The current scope is a record.
-
// If the named context is dependent, we can't decide much.
if (!NamedContext) {
// FIXME: in C++0x, we can diagnose if we can prove that the
@@ -12424,12 +12647,19 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
return false;
}
+ // The current scope is a record.
if (!NamedContext->isRecord()) {
// Ideally this would point at the last name in the specifier,
// but we don't have that level of source info.
- Diag(SS.getRange().getBegin(),
- diag::err_using_decl_nested_name_specifier_is_not_class)
- << SS.getScopeRep() << SS.getRange();
+ Diag(SS.getBeginLoc(),
+ Cxx20Enumerator
+ ? diag::warn_cxx17_compat_using_decl_non_member_enumerator
+ : diag::err_using_decl_nested_name_specifier_is_not_class)
+ << SS.getScopeRep() << SS.getRange();
+
+ if (Cxx20Enumerator)
+ return false; // OK
+
return true;
}
@@ -12445,19 +12675,25 @@ bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(
cast<CXXRecordDecl>(NamedContext))) {
+
+ if (Cxx20Enumerator) {
+ Diag(NameLoc, diag::warn_cxx17_compat_using_decl_non_member_enumerator)
+ << SS.getRange();
+ return false;
+ }
+
if (CurContext == NamedContext) {
- Diag(NameLoc,
+ Diag(SS.getBeginLoc(),
diag::err_using_decl_nested_name_specifier_is_current_class)
- << SS.getRange();
- return true;
+ << SS.getRange();
+ return !getLangOpts().CPlusPlus20;
}
if (!cast<CXXRecordDecl>(NamedContext)->isInvalidDecl()) {
- Diag(SS.getRange().getBegin(),
+ Diag(SS.getBeginLoc(),
diag::err_using_decl_nested_name_specifier_is_not_base_class)
- << SS.getScopeRep()
- << cast<CXXRecordDecl>(CurContext)
- << SS.getRange();
+ << SS.getScopeRep() << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
}
return true;
}
@@ -12957,6 +13193,16 @@ void Sema::setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
auto QT = Context.getFunctionType(ResultTy, Args, EPI);
SpecialMem->setType(QT);
+
+ // During template instantiation of implicit special member functions we need
+ // a reliable TypeSourceInfo for the function prototype in order to allow
+ // functions to be substituted.
+ if (inTemplateInstantiation() &&
+ cast<CXXRecordDecl>(SpecialMem->getParent())->isLambda()) {
+ TypeSourceInfo *TSI =
+ Context.getTrivialTypeSourceInfo(SpecialMem->getType());
+ SpecialMem->setTypeSourceInfo(TSI);
+ }
}
CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
@@ -13592,11 +13838,11 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
Expr *From = FromB.build(S, Loc);
From = UnaryOperator::Create(
S.Context, From, UO_AddrOf, S.Context.getPointerType(From->getType()),
- VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
+ VK_PRValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
Expr *To = ToB.build(S, Loc);
To = UnaryOperator::Create(
S.Context, To, UO_AddrOf, S.Context.getPointerType(To->getType()),
- VK_RValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
+ VK_PRValue, OK_Ordinary, Loc, false, S.CurFPFeatureOverrides());
const Type *E = T->getBaseElementTypeUnsafe();
bool NeedsCollectableMemCpy =
@@ -13618,7 +13864,7 @@ buildMemcpyForAssignmentOp(Sema &S, SourceLocation Loc, QualType T,
return StmtError();
ExprResult MemCpyRef = S.BuildDeclRefExpr(MemCpy, S.Context.BuiltinFnTy,
- VK_RValue, Loc, nullptr);
+ VK_PRValue, Loc, nullptr);
assert(MemCpyRef.isUsable() && "Builtin reference cannot fail");
Expr *CallArgs[] = {
@@ -13833,7 +14079,8 @@ buildSingleCopyAssignRecursively(Sema &S, SourceLocation Loc, QualType T,
Expr *Comparison = BinaryOperator::Create(
S.Context, IterationVarRefRVal.build(S, Loc),
IntegerLiteral::Create(S.Context, Upper, SizeType, Loc), BO_NE,
- S.Context.BoolTy, VK_RValue, OK_Ordinary, Loc, S.CurFPFeatureOverrides());
+ S.Context.BoolTy, VK_PRValue, OK_Ordinary, Loc,
+ S.CurFPFeatureOverrides());
// Create the pre-increment of the iteration variable. We can determine
// whether the increment will overflow based on the value of the array
@@ -13988,12 +14235,20 @@ static void diagnoseDeprecatedCopyOperation(Sema &S, CXXMethodDecl *CopyOp) {
assert(UserDeclaredOperation);
}
- if (UserDeclaredOperation && UserDeclaredOperation->isUserProvided()) {
- S.Diag(UserDeclaredOperation->getLocation(),
- isa<CXXDestructorDecl>(UserDeclaredOperation)
- ? diag::warn_deprecated_copy_dtor_operation
- : diag::warn_deprecated_copy_operation)
- << RD << /*copy assignment*/ !isa<CXXConstructorDecl>(CopyOp);
+ if (UserDeclaredOperation) {
+ bool UDOIsUserProvided = UserDeclaredOperation->isUserProvided();
+ bool UDOIsDestructor = isa<CXXDestructorDecl>(UserDeclaredOperation);
+ bool IsCopyAssignment = !isa<CXXConstructorDecl>(CopyOp);
+ unsigned DiagID =
+ (UDOIsUserProvided && UDOIsDestructor)
+ ? diag::warn_deprecated_copy_with_user_provided_dtor
+ : (UDOIsUserProvided && !UDOIsDestructor)
+ ? diag::warn_deprecated_copy_with_user_provided_copy
+ : (!UDOIsUserProvided && UDOIsDestructor)
+ ? diag::warn_deprecated_copy_with_dtor
+ : diag::warn_deprecated_copy;
+ S.Diag(UserDeclaredOperation->getLocation(), DiagID)
+ << RD << IsCopyAssignment;
}
}
@@ -14244,10 +14499,7 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
/* Diagnose */ false);
}
- // Build an exception specification pointing back at this member.
- FunctionProtoType::ExtProtoInfo EPI =
- getImplicitMethodEPI(*this, MoveAssignment);
- MoveAssignment->setType(Context.getFunctionType(RetType, ArgType, EPI));
+ setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
@@ -14629,12 +14881,18 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
+ // During template instantiation of special member functions we need a
+ // reliable TypeSourceInfo for the parameter types in order to allow functions
+ // to be substituted.
+ TypeSourceInfo *TSI = nullptr;
+ if (inTemplateInstantiation() && ClassDecl->isLambda())
+ TSI = Context.getTrivialTypeSourceInfo(ArgType);
+
// Add the parameter to the constructor.
- ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
- ClassLoc, ClassLoc,
- /*IdentifierInfo=*/nullptr,
- ArgType, /*TInfo=*/nullptr,
- SC_None, nullptr);
+ ParmVarDecl *FromParam =
+ ParmVarDecl::Create(Context, CopyConstructor, ClassLoc, ClassLoc,
+ /*IdentifierInfo=*/nullptr, ArgType,
+ /*TInfo=*/TSI, SC_None, nullptr);
CopyConstructor->setParams(FromParam);
CopyConstructor->setTrivial(
@@ -14852,9 +15110,9 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
SynthesizedFunctionScope Scope(*this, Conv);
assert(!Conv->getReturnType()->isUndeducedType());
- QualType ConvRT = Conv->getType()->getAs<FunctionType>()->getReturnType();
+ QualType ConvRT = Conv->getType()->castAs<FunctionType>()->getReturnType();
CallingConv CC =
- ConvRT->getPointeeType()->getAs<FunctionType>()->getCallConv();
+ ConvRT->getPointeeType()->castAs<FunctionType>()->getCallConv();
CXXRecordDecl *Lambda = Conv->getParent();
FunctionDecl *CallOp = Lambda->getLambdaCallOperator();
@@ -14931,7 +15189,7 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount)
BuildBlock = ImplicitCastExpr::Create(
Context, BuildBlock.get()->getType(), CK_CopyAndAutoreleaseBlockObject,
- BuildBlock.get(), nullptr, VK_RValue, FPOptionsOverride());
+ BuildBlock.get(), nullptr, VK_PRValue, FPOptionsOverride());
if (BuildBlock.isInvalid()) {
Diag(CurrentLocation, diag::note_lambda_to_block_conv);
@@ -15201,13 +15459,12 @@ void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
/// to form a proper call to this constructor.
///
/// \returns true if an error occurred, false otherwise.
-bool
-Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
- MultiExprArg ArgsPtr,
- SourceLocation Loc,
- SmallVectorImpl<Expr*> &ConvertedArgs,
- bool AllowExplicit,
- bool IsListInitialization) {
+bool Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
+ QualType DeclInitType, MultiExprArg ArgsPtr,
+ SourceLocation Loc,
+ SmallVectorImpl<Expr *> &ConvertedArgs,
+ bool AllowExplicit,
+ bool IsListInitialization) {
// FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
unsigned NumArgs = ArgsPtr.size();
Expr **Args = ArgsPtr.data();
@@ -15234,7 +15491,7 @@ Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
DiagnoseSentinelCalls(Constructor, Loc, AllArgs);
- CheckConstructorCall(Constructor,
+ CheckConstructorCall(Constructor, DeclInitType,
llvm::makeArrayRef(AllArgs.data(), AllArgs.size()),
Proto, Loc);
@@ -15261,11 +15518,13 @@ CheckOperatorNewDeleteDeclarationScope(Sema &SemaRef,
return false;
}
-static QualType
-RemoveAddressSpaceFromPtr(Sema &SemaRef, const PointerType *PtrTy) {
- QualType QTy = PtrTy->getPointeeType();
- QTy = SemaRef.Context.removeAddrSpaceQualType(QTy);
- return SemaRef.Context.getPointerType(QTy);
+static CanQualType RemoveAddressSpaceFromPtr(Sema &SemaRef,
+ const PointerType *PtrTy) {
+ auto &Ctx = SemaRef.Context;
+ Qualifiers PtrQuals = PtrTy->getPointeeType().getQualifiers();
+ PtrQuals.removeAddressSpace();
+ return Ctx.getPointerType(Ctx.getCanonicalType(Ctx.getQualifiedType(
+ PtrTy->getPointeeType().getUnqualifiedType(), PtrQuals)));
}
static inline bool
@@ -15277,11 +15536,14 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
QualType ResultType =
FnDecl->getType()->castAs<FunctionType>()->getReturnType();
- // The operator is valid on any address space for OpenCL.
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
- if (auto *PtrTy = ResultType->getAs<PointerType>()) {
+ // The operator is valid on any address space for OpenCL.
+ // Drop address space from actual and expected result types.
+ if (const auto *PtrTy = ResultType->getAs<PointerType>())
ResultType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
- }
+
+ if (auto ExpectedPtrTy = ExpectedResultType->getAs<PointerType>())
+ ExpectedResultType = RemoveAddressSpaceFromPtr(SemaRef, ExpectedPtrTy);
}
// Check that the result type is what we expect.
@@ -15311,10 +15573,14 @@ CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
if (SemaRef.getLangOpts().OpenCLCPlusPlus) {
// The operator is valid on any address space for OpenCL.
- if (auto *PtrTy =
- FnDecl->getParamDecl(0)->getType()->getAs<PointerType>()) {
+ // Drop address space from actual and expected first parameter types.
+ if (const auto *PtrTy =
+ FnDecl->getParamDecl(0)->getType()->getAs<PointerType>())
FirstParamType = RemoveAddressSpaceFromPtr(SemaRef, PtrTy);
- }
+
+ if (auto ExpectedPtrTy = ExpectedFirstParamType->getAs<PointerType>())
+ ExpectedFirstParamType =
+ RemoveAddressSpaceFromPtr(SemaRef, ExpectedPtrTy);
}
// Check that the first parameter type is what we expect.
@@ -16723,6 +16989,8 @@ NamedDecl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
}
}
+ warnOnReservedIdentifier(ND);
+
return ND;
}
diff --git a/clang/lib/Sema/SemaDeclObjC.cpp b/clang/lib/Sema/SemaDeclObjC.cpp
index 60253a82e93a..e0f8c6e92d5a 100644
--- a/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/clang/lib/Sema/SemaDeclObjC.cpp
@@ -4407,10 +4407,12 @@ private:
void Sema::CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden) {
- if (const auto *attr = overridden->getAttr<ObjCDirectAttr>()) {
+ if (overridden->isDirectMethod()) {
+ const auto *attr = overridden->getAttr<ObjCDirectAttr>();
Diag(method->getLocation(), diag::err_objc_override_direct_method);
Diag(attr->getLocation(), diag::note_previous_declaration);
- } else if (const auto *attr = method->getAttr<ObjCDirectAttr>()) {
+ } else if (method->isDirectMethod()) {
+ const auto *attr = method->getAttr<ObjCDirectAttr>();
Diag(attr->getLocation(), diag::err_objc_direct_on_override)
<< isa<ObjCProtocolDecl>(overridden->getDeclContext());
Diag(overridden->getLocation(), diag::note_previous_declaration);
@@ -4856,7 +4858,8 @@ Decl *Sema::ActOnMethodDeclaration(
// the canonical declaration.
if (!ObjCMethod->isDirectMethod()) {
const ObjCMethodDecl *CanonicalMD = ObjCMethod->getCanonicalDecl();
- if (const auto *attr = CanonicalMD->getAttr<ObjCDirectAttr>()) {
+ if (CanonicalMD->isDirectMethod()) {
+ const auto *attr = CanonicalMD->getAttr<ObjCDirectAttr>();
ObjCMethod->addAttr(
ObjCDirectAttr::CreateImplicit(Context, attr->getLocation()));
}
@@ -4901,14 +4904,16 @@ Decl *Sema::ActOnMethodDeclaration(
Diag(IMD->getLocation(), diag::note_previous_declaration);
};
- if (const auto *attr = ObjCMethod->getAttr<ObjCDirectAttr>()) {
+ if (ObjCMethod->isDirectMethod()) {
+ const auto *attr = ObjCMethod->getAttr<ObjCDirectAttr>();
if (ObjCMethod->getCanonicalDecl() != IMD) {
diagContainerMismatch();
} else if (!IMD->isDirectMethod()) {
Diag(attr->getLocation(), diag::err_objc_direct_missing_on_decl);
Diag(IMD->getLocation(), diag::note_previous_declaration);
}
- } else if (const auto *attr = IMD->getAttr<ObjCDirectAttr>()) {
+ } else if (IMD->isDirectMethod()) {
+ const auto *attr = IMD->getAttr<ObjCDirectAttr>();
if (ObjCMethod->getCanonicalDecl() != IMD) {
diagContainerMismatch();
} else {
diff --git a/clang/lib/Sema/SemaExceptionSpec.cpp b/clang/lib/Sema/SemaExceptionSpec.cpp
index 851e28741e49..8816c9c1fea0 100644
--- a/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -35,10 +35,12 @@ static const FunctionProtoType *GetUnderlyingFunction(QualType T)
return T->getAs<FunctionProtoType>();
}
-/// HACK: libstdc++ has a bug where it shadows std::swap with a member
-/// swap function then tries to call std::swap unqualified from the exception
-/// specification of that function. This function detects whether we're in
-/// such a case and turns off delay-parsing of exception specifications.
+/// HACK: 2014-11-14 libstdc++ had a bug where it shadows std::swap with a
+/// member swap function then tries to call std::swap unqualified from the
+/// exception specification of that function. This function detects whether
+/// we're in such a case and turns off delay-parsing of exception
+/// specifications. Libstdc++ 6.1 (released 2016-04-27) appears to have
+/// resolved it as side-effect of commit ddb63209a8d (2015-06-05).
bool Sema::isLibstdcxxEagerExceptionSpecHack(const Declarator &D) {
auto *RD = dyn_cast<CXXRecordDecl>(CurContext);
@@ -1448,6 +1450,7 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPMasterTaskLoopDirectiveClass:
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
case Stmt::OMPOrderedDirectiveClass:
+ case Stmt::OMPCanonicalLoopClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPParallelForDirectiveClass:
case Stmt::OMPParallelForSimdDirectiveClass:
@@ -1458,6 +1461,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPSectionDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
case Stmt::OMPSimdDirectiveClass:
+ case Stmt::OMPTileDirectiveClass:
+ case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPSingleDirectiveClass:
case Stmt::OMPTargetDataDirectiveClass:
case Stmt::OMPTargetDirectiveClass:
@@ -1484,6 +1489,9 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
case Stmt::OMPTeamsDistributeSimdDirectiveClass:
+ case Stmt::OMPInteropDirectiveClass:
+ case Stmt::OMPDispatchDirectiveClass:
+ case Stmt::OMPMaskedDirectiveClass:
case Stmt::ReturnStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
@@ -1568,6 +1576,8 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
return mergeCanThrow(CT, canThrow(TS->getTryBody()));
}
+ case Stmt::SYCLUniqueStableNameExprClass:
+ return CT_Cannot;
case Stmt::NoStmtClass:
llvm_unreachable("Invalid class for statement");
}
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 45616dadcbee..0e6c933cd4f3 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -46,8 +46,11 @@
#include "clang/Sema/SemaFixItUtils.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/SaveAndRestore.h"
+
using namespace clang;
using namespace sema;
using llvm::RoundingMode;
@@ -82,6 +85,9 @@ bool Sema::CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid) {
cast<Decl>(CurContext)->getAvailability() != AR_Unavailable)
return false;
+ if (isa<UnresolvedUsingIfExistsDecl>(D))
+ return false;
+
return true;
}
@@ -347,6 +353,12 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
return true;
}
+ if (const auto *EmptyD = dyn_cast<UnresolvedUsingIfExistsDecl>(D)) {
+ Diag(Loc, diag::err_use_of_empty_using_if_exists);
+ Diag(EmptyD->getLocation(), diag::note_empty_using_if_exists_here);
+ return true;
+ }
+
DiagnoseAvailabilityOfDecl(D, Locs, UnknownObjCClass, ObjCPropertyAccess,
AvoidPartialAvailabilityChecks, ClassReceiver);
@@ -354,26 +366,8 @@ bool Sema::DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
diagnoseUseOfInternalDeclInInlineFunction(*this, D, Loc);
- // CUDA/HIP: Diagnose invalid references of host global variables in device
- // functions. Reference of device global variables in host functions is
- // allowed through shadow variables therefore it is not diagnosed.
- if (LangOpts.CUDAIsDevice) {
- auto *FD = dyn_cast_or_null<FunctionDecl>(CurContext);
- auto Target = IdentifyCUDATarget(FD);
- if (FD && Target != CFT_Host) {
- const auto *VD = dyn_cast<VarDecl>(D);
- if (VD && VD->hasGlobalStorage() && !VD->hasAttr<CUDADeviceAttr>() &&
- !VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
- !VD->getType()->isCUDADeviceBuiltinSurfaceType() &&
- !VD->getType()->isCUDADeviceBuiltinTextureType() &&
- !VD->isConstexpr() && !VD->getType().isConstQualified())
- targetDiag(*Locs.begin(), diag::err_ref_bad_target)
- << /*host*/ 2 << /*variable*/ 1 << VD << Target;
- }
- }
-
if (LangOpts.SYCLIsDevice || (LangOpts.OpenMP && LangOpts.OpenMPIsDevice)) {
- if (const auto *VD = dyn_cast<ValueDecl>(D))
+ if (auto *VD = dyn_cast<ValueDecl>(D))
checkDeviceDecl(VD, Loc);
if (!Context.getTargetInfo().isTLSSupported())
@@ -531,9 +525,13 @@ ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) {
// An lvalue or rvalue of type "array of N T" or "array of unknown bound of
// T" can be converted to an rvalue of type "pointer to T".
//
- if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue())
- E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
- CK_ArrayToPointerDecay).get();
+ if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue()) {
+ ExprResult Res = ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
+ CK_ArrayToPointerDecay);
+ if (Res.isInvalid())
+ return ExprError();
+ E = Res.get();
+ }
}
return E;
}
@@ -657,7 +655,8 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
return E;
// OpenCL usually rejects direct accesses to values of 'half' type.
- if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") &&
+ if (getLangOpts().OpenCL &&
+ !getOpenCLOptions().isAvailableOption("cl_khr_fp16", getLangOpts()) &&
T->isHalfType()) {
Diag(E->getExprLoc(), diag::err_opencl_half_load_store)
<< 0 << T;
@@ -714,7 +713,7 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
// C++ [conv.lval]p3:
// If T is cv std::nullptr_t, the result is a null pointer constant.
CastKind CK = T->isNullPtrType() ? CK_NullToPointer : CK_LValueToRValue;
- Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_RValue,
+ Res = ImplicitCastExpr::Create(Context, T, CK, E, nullptr, VK_PRValue,
CurFPFeatureOverrides());
// C11 6.3.2.1p2:
@@ -723,7 +722,7 @@ ExprResult Sema::DefaultLvalueConversion(Expr *E) {
if (const AtomicType *Atomic = T->getAs<AtomicType>()) {
T = Atomic->getValueType().getUnqualifiedType();
Res = ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic, Res.get(),
- nullptr, VK_RValue, FPOptionsOverride());
+ nullptr, VK_PRValue, FPOptionsOverride());
}
return Res;
@@ -829,14 +828,27 @@ ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
if (BTy && (BTy->getKind() == BuiltinType::Half ||
BTy->getKind() == BuiltinType::Float)) {
if (getLangOpts().OpenCL &&
- !getOpenCLOptions().isEnabled("cl_khr_fp64")) {
- if (BTy->getKind() == BuiltinType::Half) {
- E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
- }
+ !getOpenCLOptions().isAvailableOption("cl_khr_fp64", getLangOpts())) {
+ if (BTy->getKind() == BuiltinType::Half) {
+ E = ImpCastExprToType(E, Context.FloatTy, CK_FloatingCast).get();
+ }
} else {
E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).get();
}
}
+ if (BTy &&
+ getLangOpts().getExtendIntArgs() ==
+ LangOptions::ExtendArgsKind::ExtendTo64 &&
+ Context.getTargetInfo().supportsExtendIntArgs() && Ty->isIntegerType() &&
+ Context.getTypeSizeInChars(BTy) <
+ Context.getTypeSizeInChars(Context.LongLongTy)) {
+ E = (Ty->isUnsignedIntegerType())
+ ? ImpCastExprToType(E, Context.UnsignedLongLongTy, CK_IntegralCast)
+ .get()
+ : ImpCastExprToType(E, Context.LongLongTy, CK_IntegralCast).get();
+ assert(8 == Context.getTypeSizeInChars(Context.LongLongTy).getQuantity() &&
+ "Unexpected typesize for LongLongTy");
+ }
// C++ performs lvalue-to-rvalue conversion as a default argument
// promotion, even on class types, but note:
@@ -1533,11 +1545,6 @@ QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
if (LHSType == RHSType)
return LHSType;
- // ExtInt types aren't subject to conversions between them or normal integers,
- // so this fails.
- if(LHSType->isExtIntType() || RHSType->isExtIntType())
- return QualType();
-
// At this point, we have two different arithmetic types.
// Diagnose attempts to convert between __float128 and long double where
@@ -3211,8 +3218,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
}
// Make sure that we're referring to a value.
- ValueDecl *VD = dyn_cast<ValueDecl>(D);
- if (!VD) {
+ if (!isa<ValueDecl, UnresolvedUsingIfExistsDecl>(D)) {
Diag(Loc, diag::err_ref_non_value)
<< D << SS.getRange();
Diag(D->getLocation(), diag::note_declared_at);
@@ -3223,9 +3229,11 @@ ExprResult Sema::BuildDeclarationNameExpr(
// this check when we're going to perform argument-dependent lookup
// on this function name, because this might not be the function
// that overload resolution actually selects.
- if (DiagnoseUseOfDecl(VD, Loc))
+ if (DiagnoseUseOfDecl(D, Loc))
return ExprError();
+ auto *VD = cast<ValueDecl>(D);
+
// Only create DeclRefExpr's for valid Decl's.
if (VD->isInvalidDecl() && !AcceptInvalidDecl)
return ExprError();
@@ -3242,7 +3250,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
QualType type = VD->getType();
if (type.isNull())
return ExprError();
- ExprValueKind valueKind = VK_RValue;
+ ExprValueKind valueKind = VK_PRValue;
// In 'T ...V;', the type of the declaration 'V' is 'T...', but the type of
// a reference to 'V' is simply (unexpanded) 'T'. The type, like the value,
@@ -3268,7 +3276,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::UnresolvedUsingValue:
case Decl::OMPDeclareReduction:
case Decl::OMPDeclareMapper:
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
// Fields and indirect fields that got here must be for
@@ -3309,7 +3317,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
// For non-references, we need to strip qualifiers just in case
// the template parameter was declared as 'const int' or whatever.
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
type = type.getUnqualifiedType();
break;
}
@@ -3323,7 +3331,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
if (!getLangOpts().CPlusPlus &&
!type.hasQualifiers() &&
type->isVoidType()) {
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
LLVM_FALLTHROUGH;
@@ -3365,7 +3373,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
type = Context.BuiltinFnTy;
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
}
@@ -3376,7 +3384,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
// result type, make the entire expression __unknown_anytype.
if (fty->getReturnType() == Context.UnknownAnyTy) {
type = Context.UnknownAnyTy;
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
@@ -3397,7 +3405,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
fty->getExtInfo());
// Functions are r-values in C.
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
@@ -3420,7 +3428,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
= dyn_cast<FunctionProtoType>(VD->getType()))
if (proto->getReturnType() == Context.UnknownAnyTy) {
type = Context.UnknownAnyTy;
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
@@ -3434,7 +3442,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::CXXConversion:
case Decl::CXXDestructor:
case Decl::CXXConstructor:
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
break;
}
@@ -3509,6 +3517,28 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
return PredefinedExpr::Create(Context, Loc, ResTy, IK, SL);
}
+ExprResult Sema::BuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ TypeSourceInfo *TSI) {
+ return SYCLUniqueStableNameExpr::Create(Context, OpLoc, LParen, RParen, TSI);
+}
+
+ExprResult Sema::ActOnSYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ ParsedType ParsedTy) {
+ TypeSourceInfo *TSI = nullptr;
+ QualType Ty = GetTypeFromParser(ParsedTy, &TSI);
+
+ if (Ty.isNull())
+ return ExprError();
+ if (!TSI)
+ TSI = Context.getTrivialTypeSourceInfo(Ty, LParen);
+
+ return BuildSYCLUniqueStableNameExpr(OpLoc, LParen, RParen, TSI);
+}
+
ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
PredefinedExpr::IdentKind IK;
@@ -3642,7 +3672,7 @@ bool Sema::CheckLoopHintExpr(Expr *E, SourceLocation Loc) {
bool ValueIsPositive = ValueAPS.isStrictlyPositive();
if (!ValueIsPositive || ValueAPS.getActiveBits() > 31) {
Diag(E->getExprLoc(), diag::err_pragma_loop_invalid_argument_value)
- << ValueAPS.toString(10) << ValueIsPositive;
+ << toString(ValueAPS, 10) << ValueIsPositive;
return true;
}
@@ -3819,7 +3849,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
} else if (Literal.isFloatingLiteral()) {
QualType Ty;
if (Literal.isHalf){
- if (getOpenCLOptions().isEnabled("cl_khr_fp16"))
+ if (getOpenCLOptions().isAvailableOption("cl_khr_fp16", getLangOpts()))
Ty = Context.HalfTy;
else {
Diag(Tok.getLocation(), diag::err_half_const_requires_fp16);
@@ -3843,10 +3873,11 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
if (Ty->castAs<BuiltinType>()->getKind() != BuiltinType::Float) {
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
}
- } else if (getLangOpts().OpenCL &&
- !getOpenCLOptions().isEnabled("cl_khr_fp64")) {
+ } else if (getLangOpts().OpenCL && !getOpenCLOptions().isAvailableOption(
+ "cl_khr_fp64", getLangOpts())) {
// Impose single-precision float type when cl_khr_fp64 is not enabled.
- Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64);
+ Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64)
+ << (getLangOpts().OpenCLVersion >= 300);
Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).get();
}
}
@@ -3865,6 +3896,14 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
Diag(Tok.getLocation(), diag::ext_c99_longlong);
}
+ // 'z/uz' literals are a C++2b feature.
+ if (Literal.isSizeT)
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus
+ ? getLangOpts().CPlusPlus2b
+ ? diag::warn_cxx20_compat_size_t_suffix
+ : diag::ext_cxx2b_size_t_suffix
+ : diag::err_cxx2b_size_t_suffix);
+
// Get the value in the widest-possible width.
unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
llvm::APInt ResultVal(MaxWidth, 0);
@@ -3899,7 +3938,26 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
}
- if (Ty.isNull() && !Literal.isLong && !Literal.isLongLong) {
+ // Check C++2b size_t literals.
+ if (Literal.isSizeT) {
+ assert(!Literal.MicrosoftInteger &&
+ "size_t literals can't be Microsoft literals");
+ unsigned SizeTSize = Context.getTargetInfo().getTypeWidth(
+ Context.getTargetInfo().getSizeType());
+
+ // Does it fit in size_t?
+ if (ResultVal.isIntN(SizeTSize)) {
+ // Does it fit in ssize_t?
+ if (!Literal.isUnsigned && ResultVal[SizeTSize - 1] == 0)
+ Ty = Context.getSignedSizeType();
+ else if (AllowUnsigned)
+ Ty = Context.getSizeType();
+ Width = SizeTSize;
+ }
+ }
+
+ if (Ty.isNull() && !Literal.isLong && !Literal.isLongLong &&
+ !Literal.isSizeT) {
// Are int/unsigned possibilities?
unsigned IntSize = Context.getTargetInfo().getIntWidth();
@@ -3915,7 +3973,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
// Are long/unsigned long possibilities?
- if (Ty.isNull() && !Literal.isLongLong) {
+ if (Ty.isNull() && !Literal.isLongLong && !Literal.isSizeT) {
unsigned LongSize = Context.getTargetInfo().getLongWidth();
// Does it fit in a unsigned long?
@@ -3946,7 +4004,7 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
// Check long long if needed.
- if (Ty.isNull()) {
+ if (Ty.isNull() && !Literal.isSizeT) {
unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth();
// Does it fit in a unsigned long long?
@@ -3963,10 +4021,16 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
}
- // If we still couldn't decide a type, we probably have something that
- // does not fit in a signed long long, but has no U suffix.
+ // If we still couldn't decide a type, we either have 'size_t' literal
+ // that is out of range, or a decimal literal that does not fit in a
+ // signed long long and has no U suffix.
if (Ty.isNull()) {
- Diag(Tok.getLocation(), diag::ext_integer_literal_too_large_for_signed);
+ if (Literal.isSizeT)
+ Diag(Tok.getLocation(), diag::err_size_t_literal_too_large)
+ << Literal.isUnsigned;
+ else
+ Diag(Tok.getLocation(),
+ diag::ext_integer_literal_too_large_for_signed);
Ty = Context.UnsignedLongLongTy;
Width = Context.getTargetInfo().getLongLongWidth();
}
@@ -3989,6 +4053,10 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
ExprResult Sema::ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E) {
assert(E && "ActOnParenExpr() missing expr");
+ QualType ExprTy = E->getType();
+ if (getLangOpts().ProtectParens && CurFPFeatures.getAllowFPReassociate() &&
+ !E->isLValue() && ExprTy->hasFloatingRepresentation())
+ return BuildBuiltinCallExpr(R, Builtin::BI__arithmetic_fence, E);
return new (Context) ParenExpr(L, R, E);
}
@@ -4995,7 +5063,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
llvm::APSInt LengthValue = Result.Val.getInt();
if (LengthValue.isNegative()) {
Diag(Length->getExprLoc(), diag::err_omp_section_length_negative)
- << LengthValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << toString(LengthValue, /*Radix=*/10, /*Signed=*/true)
<< Length->getSourceRange();
return ExprError();
}
@@ -5019,7 +5087,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
llvm::APSInt StrideValue = Result.Val.getInt();
if (!StrideValue.isStrictlyPositive()) {
Diag(Stride->getExprLoc(), diag::err_omp_section_stride_non_positive)
- << StrideValue.toString(/*Radix=*/10, /*Signed=*/true)
+ << toString(StrideValue, /*Radix=*/10, /*Signed=*/true)
<< Stride->getSourceRange();
return ExprError();
}
@@ -5098,7 +5166,7 @@ ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
llvm::APSInt Value = EvResult.Val.getInt();
if (!Value.isStrictlyPositive()) {
Diag(Dim->getExprLoc(), diag::err_omp_shaping_dimension_not_positive)
- << Value.toString(/*Radix=*/10, /*Signed=*/true)
+ << toString(Value, /*Radix=*/10, /*Signed=*/true)
<< Dim->getSourceRange();
ErrorFound = true;
continue;
@@ -5478,14 +5546,14 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
// We apply C++ DR1213 to vector subscripting too.
- if (getLangOpts().CPlusPlus11 && LHSExp->getValueKind() == VK_RValue) {
+ if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
if (Materialized.isInvalid())
return ExprError();
LHSExp = Materialized.get();
}
VK = LHSExp->getValueKind();
- if (VK != VK_RValue)
+ if (VK != VK_PRValue)
OK = OK_VectorComponent;
ResultType = VTy->getElementType();
@@ -5509,7 +5577,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
BaseExpr = LHSExp;
IndexExpr = RHSExp;
- ResultType = LHSTy->getAs<PointerType>()->getPointeeType();
+ ResultType = LHSTy->castAs<PointerType>()->getPointeeType();
} else if (RHSTy->isArrayType()) {
// Same as previous, except for 123[f().a] case
Diag(RHSExp->getBeginLoc(), diag::ext_subscript_non_lvalue)
@@ -5520,7 +5588,7 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
BaseExpr = RHSExp;
IndexExpr = LHSExp;
- ResultType = RHSTy->getAs<PointerType>()->getPointeeType();
+ ResultType = RHSTy->castAs<PointerType>()->getPointeeType();
} else {
return ExprError(Diag(LLoc, diag::err_typecheck_subscript_value)
<< LHSExp->getSourceRange() << RHSExp->getSourceRange());
@@ -5552,14 +5620,15 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
// C forbids expressions of unqualified void type from being l-values.
// See IsCForbiddenLValueType.
- if (!ResultType.hasQualifiers()) VK = VK_RValue;
+ if (!ResultType.hasQualifiers())
+ VK = VK_PRValue;
} else if (!ResultType->isDependentType() &&
RequireCompleteSizedType(
LLoc, ResultType,
diag::err_subscript_incomplete_or_sizeless_type, BaseExpr))
return ExprError();
- assert(VK == VK_RValue || LangOpts.CPlusPlus ||
+ assert(VK == VK_PRValue || LangOpts.CPlusPlus ||
!ResultType.isCForbiddenLValueType());
if (LHSExp->IgnoreParenImpCasts()->getType()->isVariablyModifiedType() &&
@@ -5858,6 +5927,7 @@ Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
for (unsigned i = 0; i < TotalNumArgs; ++i)
Call->setArg(i, AllArgs[i]);
+ Call->computeDependence();
return false;
}
@@ -5894,7 +5964,8 @@ bool Sema::GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
(!Param || !Param->hasAttr<CFConsumedAttr>()))
CFAudited = true;
- if (Proto->getExtParameterInfo(i).isNoEscape())
+ if (Proto->getExtParameterInfo(i).isNoEscape() &&
+ ProtoArgType->isBlockPointerType())
if (auto *BE = dyn_cast<BlockExpr>(Arg->IgnoreParenNoopCasts(Context)))
BE->getBlockDecl()->setDoesNotEscape();
@@ -6061,6 +6132,8 @@ static bool isPlaceholderToRemoveAsArg(QualType type) {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
#define PLACEHOLDER_TYPE(ID, SINGLETON_ID)
#define BUILTIN_TYPE(ID, SINGLETON_ID) case BuiltinType::ID:
#include "clang/AST/BuiltinTypes.def"
@@ -6363,7 +6436,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
}
return CallExpr::Create(Context, Fn, /*Args=*/{}, Context.VoidTy,
- VK_RValue, RParenLoc, CurFPFeatureOverrides());
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
}
if (Fn->getType() == Context.PseudoObjectTy) {
ExprResult result = CheckPlaceholderExpr(Fn);
@@ -6375,9 +6448,10 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
// in which case we won't do any semantic analysis now.
if (Fn->isTypeDependent() || Expr::hasAnyTypeDependentArguments(ArgExprs)) {
if (ExecConfig) {
- return CUDAKernelCallExpr::Create(
- Context, Fn, cast<CallExpr>(ExecConfig), ArgExprs,
- Context.DependentTy, VK_RValue, RParenLoc, CurFPFeatureOverrides());
+ return CUDAKernelCallExpr::Create(Context, Fn,
+ cast<CallExpr>(ExecConfig), ArgExprs,
+ Context.DependentTy, VK_PRValue,
+ RParenLoc, CurFPFeatureOverrides());
} else {
tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
@@ -6385,7 +6459,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
Fn->getBeginLoc());
return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
- VK_RValue, RParenLoc, CurFPFeatureOverrides());
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
}
}
@@ -6414,7 +6488,7 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (!find.HasFormOfMemberPointer) {
if (Expr::hasAnyTypeDependentArguments(ArgExprs))
return CallExpr::Create(Context, Fn, ArgExprs, Context.DependentTy,
- VK_RValue, RParenLoc, CurFPFeatureOverrides());
+ VK_PRValue, RParenLoc, CurFPFeatureOverrides());
OverloadExpr *ovl = find.Expression;
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(ovl))
return BuildOverloadedCallExpr(
@@ -6468,9 +6542,6 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
FD, /*Complain=*/true, Fn->getBeginLoc()))
return ExprError();
- if (getLangOpts().OpenCL && checkOpenCLDisabledDecl(*FD, *Fn))
- return ExprError();
-
checkDirectCallValidity(*this, Fn, FD, ArgExprs);
}
@@ -6493,24 +6564,53 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
ExecConfig, IsExecConfig);
}
-/// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments.
+/// BuildBuiltinCallExpr - Create a call to a builtin function specified by Id
+// with the specified CallArgs
+Expr *Sema::BuildBuiltinCallExpr(SourceLocation Loc, Builtin::ID Id,
+ MultiExprArg CallArgs) {
+ StringRef Name = Context.BuiltinInfo.getName(Id);
+ LookupResult R(*this, &Context.Idents.get(Name), Loc,
+ Sema::LookupOrdinaryName);
+ LookupName(R, TUScope, /*AllowBuiltinCreation=*/true);
+
+ auto *BuiltInDecl = R.getAsSingle<FunctionDecl>();
+ assert(BuiltInDecl && "failed to find builtin declaration");
+
+ ExprResult DeclRef =
+ BuildDeclRefExpr(BuiltInDecl, BuiltInDecl->getType(), VK_LValue, Loc);
+ assert(DeclRef.isUsable() && "Builtin reference cannot fail");
+
+ ExprResult Call =
+ BuildCallExpr(/*Scope=*/nullptr, DeclRef.get(), Loc, CallArgs, Loc);
+
+ assert(!Call.isInvalid() && "Call to builtin cannot fail!");
+ return Call.get();
+}
+
+/// Parse a __builtin_astype expression.
///
/// __builtin_astype( value, dst type )
///
ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc) {
- ExprValueKind VK = VK_RValue;
- ExprObjectKind OK = OK_Ordinary;
QualType DstTy = GetTypeFromParser(ParsedDestTy);
+ return BuildAsTypeExpr(E, DstTy, BuiltinLoc, RParenLoc);
+}
+
+/// Create a new AsTypeExpr node (bitcast) from the arguments.
+ExprResult Sema::BuildAsTypeExpr(Expr *E, QualType DestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
+ ExprValueKind VK = VK_PRValue;
+ ExprObjectKind OK = OK_Ordinary;
QualType SrcTy = E->getType();
- if (Context.getTypeSize(DstTy) != Context.getTypeSize(SrcTy))
- return ExprError(Diag(BuiltinLoc,
- diag::err_invalid_astype_of_different_size)
- << DstTy
- << SrcTy
- << E->getSourceRange());
- return new (Context) AsTypeExpr(E, DstTy, VK, OK, BuiltinLoc, RParenLoc);
+ if (!SrcTy->isDependentType() &&
+ Context.getTypeSize(DestTy) != Context.getTypeSize(SrcTy))
+ return ExprError(
+ Diag(BuiltinLoc, diag::err_invalid_astype_of_different_size)
+ << DestTy << SrcTy << E->getSourceRange());
+ return new (Context) AsTypeExpr(E, DestTy, VK, OK, BuiltinLoc, RParenLoc);
}
/// ActOnConvertVectorExpr - create a new convert-vector expression from the
@@ -6550,12 +6650,25 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
// so there's some risk when calling out to non-interrupt handler functions
// that the callee might not preserve them. This is easy to diagnose here,
// but can be very challenging to debug.
- if (auto *Caller = getCurFunctionDecl())
+ // Likewise, X86 interrupt handlers may only call routines with attribute
+ // no_caller_saved_registers since there is no efficient way to
+ // save and restore the non-GPR state.
+ if (auto *Caller = getCurFunctionDecl()) {
if (Caller->hasAttr<ARMInterruptAttr>()) {
bool VFP = Context.getTargetInfo().hasFeature("vfp");
- if (VFP && (!FDecl || !FDecl->hasAttr<ARMInterruptAttr>()))
+ if (VFP && (!FDecl || !FDecl->hasAttr<ARMInterruptAttr>())) {
Diag(Fn->getExprLoc(), diag::warn_arm_interrupt_calling_convention);
+ if (FDecl)
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ }
}
+ if (Caller->hasAttr<AnyX86InterruptAttr>() &&
+ ((!FDecl || !FDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>()))) {
+ Diag(Fn->getExprLoc(), diag::warn_anyx86_interrupt_regsave);
+ if (FDecl)
+ Diag(FDecl->getLocation(), diag::note_callee_decl) << FDecl;
+ }
+ }
// Promote the function operand.
// We special-case function promotion here because we only allow promoting
@@ -6621,11 +6734,11 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
assert(UsesADL == ADLCallKind::NotADL &&
"CUDAKernelCallExpr should not use ADL");
TheCall = CUDAKernelCallExpr::Create(Context, Fn, cast<CallExpr>(Config),
- Args, ResultTy, VK_RValue, RParenLoc,
+ Args, ResultTy, VK_PRValue, RParenLoc,
CurFPFeatureOverrides(), NumParams);
} else {
TheCall =
- CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc,
+ CallExpr::Create(Context, Fn, Args, ResultTy, VK_PRValue, RParenLoc,
CurFPFeatureOverrides(), NumParams, UsesADL);
}
@@ -6652,11 +6765,11 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
if (CorrectedTypos && Args.size() < NumParams) {
if (Config)
TheCall = CUDAKernelCallExpr::Create(
- Context, Fn, cast<CallExpr>(Config), Args, ResultTy, VK_RValue,
+ Context, Fn, cast<CallExpr>(Config), Args, ResultTy, VK_PRValue,
RParenLoc, CurFPFeatureOverrides(), NumParams);
else
TheCall =
- CallExpr::Create(Context, Fn, Args, ResultTy, VK_RValue, RParenLoc,
+ CallExpr::Create(Context, Fn, Args, ResultTy, VK_PRValue, RParenLoc,
CurFPFeatureOverrides(), NumParams, UsesADL);
}
// We can now handle the nulled arguments for the default arguments.
@@ -6750,6 +6863,7 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
TheCall->setArg(i, Arg);
}
+ TheCall->computeDependence();
}
if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
@@ -6818,9 +6932,12 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
diag::err_array_incomplete_or_sizeless_type,
SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd())))
return ExprError();
- if (literalType->isVariableArrayType())
- return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
- << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()));
+ if (literalType->isVariableArrayType()) {
+ if (!tryToFixVariablyModifiedVarType(TInfo, literalType, LParenLoc,
+ diag::err_variable_object_no_init)) {
+ return ExprError();
+ }
+ }
} else if (!literalType->isDependentType() &&
RequireCompleteType(LParenLoc, literalType,
diag::err_typecheck_decl_incomplete_type,
@@ -6862,7 +6979,7 @@ Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
// obviously have a value kind derived from the kind of reference involved.
ExprValueKind VK =
(getLangOpts().CPlusPlus && !(isFileScope && literalType->isArrayType()))
- ? VK_RValue
+ ? VK_PRValue
: VK_LValue;
if (isFileScope)
@@ -7013,14 +7130,14 @@ Sema::BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
/// Do an explicit extend of the given block pointer if we're in ARC.
void Sema::maybeExtendBlockObject(ExprResult &E) {
assert(E.get()->getType()->isBlockPointerType());
- assert(E.get()->isRValue());
+ assert(E.get()->isPRValue());
// Only do this in an r-value context.
if (!getLangOpts().ObjCAutoRefCount) return;
E = ImplicitCastExpr::Create(
Context, E.get()->getType(), CK_ARCExtendBlockObject, E.get(),
- /*base path*/ nullptr, VK_RValue, FPOptionsOverride());
+ /*base path*/ nullptr, VK_PRValue, FPOptionsOverride());
Cleanup.setExprNeedsCleanups(true);
}
@@ -7289,6 +7406,38 @@ bool Sema::isValidSveBitcast(QualType srcTy, QualType destTy) {
ValidScalableConversion(destTy, srcTy);
}
+/// Are the two types matrix types and do they have the same dimensions i.e.
+/// do they have the same number of rows and the same number of columns?
+bool Sema::areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy) {
+ if (!destTy->isMatrixType() || !srcTy->isMatrixType())
+ return false;
+
+ const ConstantMatrixType *matSrcType = srcTy->getAs<ConstantMatrixType>();
+ const ConstantMatrixType *matDestType = destTy->getAs<ConstantMatrixType>();
+
+ return matSrcType->getNumRows() == matDestType->getNumRows() &&
+ matSrcType->getNumColumns() == matDestType->getNumColumns();
+}
+
+bool Sema::areVectorTypesSameSize(QualType SrcTy, QualType DestTy) {
+ assert(DestTy->isVectorType() || SrcTy->isVectorType());
+
+ uint64_t SrcLen, DestLen;
+ QualType SrcEltTy, DestEltTy;
+ if (!breakDownVectorType(SrcTy, SrcLen, SrcEltTy))
+ return false;
+ if (!breakDownVectorType(DestTy, DestLen, DestEltTy))
+ return false;
+
+ // ASTContext::getTypeSize will return the size rounded up to a
+ // power of 2, so instead of using that, we need to use the raw
+ // element size multiplied by the element count.
+ uint64_t SrcEltSize = Context.getTypeSize(SrcEltTy);
+ uint64_t DestEltSize = Context.getTypeSize(DestEltTy);
+
+ return (SrcLen * SrcEltSize == DestLen * DestEltSize);
+}
+
/// Are the two types lax-compatible vector types? That is, given
/// that one of them is a vector, do they have equal storage sizes,
/// where the storage size is the number of elements times the element
@@ -7307,18 +7456,7 @@ bool Sema::areLaxCompatibleVectorTypes(QualType srcTy, QualType destTy) {
if (srcTy->isScalarType() && destTy->isExtVectorType()) return false;
if (destTy->isScalarType() && srcTy->isExtVectorType()) return false;
- uint64_t srcLen, destLen;
- QualType srcEltTy, destEltTy;
- if (!breakDownVectorType(srcTy, srcLen, srcEltTy)) return false;
- if (!breakDownVectorType(destTy, destLen, destEltTy)) return false;
-
- // ASTContext::getTypeSize will return the size rounded up to a
- // power of 2, so instead of using that, we need to use the raw
- // element size multiplied by the element count.
- uint64_t srcEltSize = Context.getTypeSize(srcEltTy);
- uint64_t destEltSize = Context.getTypeSize(destEltTy);
-
- return (srcLen * srcEltSize == destLen * destEltSize);
+ return areVectorTypesSameSize(srcTy, destTy);
}
/// Is this a legal conversion between two types, one of which is
@@ -7351,6 +7489,27 @@ bool Sema::isLaxVectorConversion(QualType srcTy, QualType destTy) {
return areLaxCompatibleVectorTypes(srcTy, destTy);
}
+bool Sema::CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
+ CastKind &Kind) {
+ if (SrcTy->isMatrixType() && DestTy->isMatrixType()) {
+ if (!areMatrixTypesOfTheSameDimension(SrcTy, DestTy)) {
+ return Diag(R.getBegin(), diag::err_invalid_conversion_between_matrixes)
+ << DestTy << SrcTy << R;
+ }
+ } else if (SrcTy->isMatrixType()) {
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_matrix_and_type)
+ << SrcTy << DestTy << R;
+ } else if (DestTy->isMatrixType()) {
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_matrix_and_type)
+ << DestTy << SrcTy << R;
+ }
+
+ Kind = CK_MatrixCast;
+ return false;
+}
+
bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind) {
assert(VectorTy->isVectorType() && "Not a vector type!");
@@ -7547,7 +7706,7 @@ ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
// initializers must be one or must match the size of the vector.
// If a single value is specified in the initializer then it will be
// replicated to all the components of the vector
- if (VTy->getVectorKind() == VectorType::AltiVecVector) {
+ if (ShouldSplatAltivecScalarInCast(VTy)) {
// The number of initializers must be one or must match the size of the
// vector. If a single value is specified in the initializer then it will
// be replicated to all the components of the vector
@@ -8134,7 +8293,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (getLangOpts().CPlusPlus)
return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc);
- VK = VK_RValue;
+ VK = VK_PRValue;
OK = OK_Ordinary;
if (Context.isDependenceAllowed() &&
@@ -8266,7 +8425,7 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Allow ?: operations in which both operands have the same
// built-in sizeless type.
- if (LHSTy->isSizelessBuiltinType() && LHSTy == RHSTy)
+ if (LHSTy->isSizelessBuiltinType() && Context.hasSameType(LHSTy, RHSTy))
return LHSTy;
// Emit a better diagnostic if one of the expressions is a null pointer
@@ -8672,8 +8831,8 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
// If the common expression is a class or array prvalue, materialize it
// so that we can safely refer to it multiple times.
- if (commonExpr->isRValue() && (commonExpr->getType()->isRecordType() ||
- commonExpr->getType()->isArrayType())) {
+ if (commonExpr->isPRValue() && (commonExpr->getType()->isRecordType() ||
+ commonExpr->getType()->isArrayType())) {
ExprResult MatExpr = TemporaryMaterializationConversion(commonExpr);
if (MatExpr.isInvalid())
return ExprError();
@@ -8689,7 +8848,7 @@ ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
}
QualType LHSTy = LHSExpr->getType(), RHSTy = RHSExpr->getType();
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
ExprResult Cond = CondExpr, LHS = LHSExpr, RHS = RHSExpr;
QualType result = CheckConditionalOperands(Cond, LHS, RHS,
@@ -8973,7 +9132,7 @@ Sema::CheckAssignmentConstraints(SourceLocation Loc,
// cast operations are required, so if CheckAssignmentConstraints
// adds casts to this they'll be wasted, but fortunately that doesn't
// usually happen on valid code.
- OpaqueValueExpr RHSExpr(Loc, RHSType, VK_RValue);
+ OpaqueValueExpr RHSExpr(Loc, RHSType, VK_PRValue);
ExprResult RHSPtr = &RHSExpr;
CastKind K;
@@ -9342,7 +9501,7 @@ static void ConstructTransparentUnion(Sema &S, ASTContext &C,
// union type from this initializer list.
TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType);
EResult = new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType,
- VK_RValue, Initializer, false);
+ VK_PRValue, Initializer, false);
}
Sema::AssignConvertType
@@ -9481,7 +9640,7 @@ Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &CallerRHS,
CheckPointerConversion(RHS.get(), LHSType, Kind, Path,
/*IgnoreBaseAccess=*/false, Diagnose);
if (ConvertRHS)
- RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_RValue, &Path);
+ RHS = ImpCastExprToType(RHS.get(), LHSType, Kind, VK_PRValue, &Path);
}
return Compatible;
}
@@ -10000,7 +10159,7 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
RHSType, DiagID))
return RHSType;
} else {
- if (LHS.get()->getValueKind() == VK_LValue ||
+ if (LHS.get()->isLValue() ||
!tryGCCVectorConvertAndSplat(*this, &LHS, &RHS))
return RHSType;
}
@@ -10184,14 +10343,19 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign, bool IsDiv) {
checkArithmeticNull(*this, LHS, RHS, Loc, /*IsCompare=*/false);
- if (LHS.get()->getType()->isVectorType() ||
- RHS.get()->getType()->isVectorType())
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+ if (LHSTy->isVectorType() || RHSTy->isVectorType())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
/*AllowBothBool*/getLangOpts().AltiVec,
/*AllowBoolConversions*/false);
- if (!IsDiv && (LHS.get()->getType()->isConstantMatrixType() ||
- RHS.get()->getType()->isConstantMatrixType()))
+ if (!IsDiv &&
+ (LHSTy->isConstantMatrixType() || RHSTy->isConstantMatrixType()))
return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign);
+ // For division, only matrix-by-scalar is supported. Other combinations with
+ // matrix types are invalid.
+ if (IsDiv && LHSTy->isConstantMatrixType() && RHSTy->isArithmeticType())
+ return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign);
QualType compType = UsualArithmeticConversions(
LHS, RHS, Loc, IsCompAssign ? ACK_CompAssign : ACK_Arithmetic);
@@ -10267,6 +10431,22 @@ static void diagnoseArithmeticOnNullPointer(Sema &S, SourceLocation Loc,
<< S.getLangOpts().CPlusPlus << Pointer->getSourceRange();
}
+/// Diagnose invalid subraction on a null pointer.
+///
+static void diagnoseSubtractionOnNullPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer, bool BothNull) {
+ // Null - null is valid in C++ [expr.add]p7
+ if (BothNull && S.getLangOpts().CPlusPlus)
+ return;
+
+ // Is this s a macro from a system header?
+ if (S.Diags.getSuppressSystemWarnings() && S.SourceMgr.isInSystemMacro(Loc))
+ return;
+
+ S.Diag(Loc, diag::warn_pointer_sub_null_ptr)
+ << S.getLangOpts().CPlusPlus << Pointer->getSourceRange();
+}
+
/// Diagnose invalid arithmetic on two function pointers.
static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
Expr *LHS, Expr *RHS) {
@@ -10513,7 +10693,11 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
- return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ QualType compType =
+ CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
}
QualType compType = UsualArithmeticConversions(
@@ -10613,7 +10797,11 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
- return CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ QualType compType =
+ CheckMatrixElementwiseOperands(LHS, RHS, Loc, CompLHSTy);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
}
QualType compType = UsualArithmeticConversions(
@@ -10690,7 +10878,16 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
LHS.get(), RHS.get()))
return QualType();
- // FIXME: Add warnings for nullptr - ptr.
+ bool LHSIsNullPtr = LHS.get()->IgnoreParenCasts()->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNotNull);
+ bool RHSIsNullPtr = RHS.get()->IgnoreParenCasts()->isNullPointerConstant(
+ Context, Expr::NPC_ValueDependentIsNotNull);
+
+ // Subtracting nullptr or from nullptr is suspect
+ if (LHSIsNullPtr)
+ diagnoseSubtractionOnNullPointer(*this, Loc, LHS.get(), RHSIsNullPtr);
+ if (RHSIsNullPtr)
+ diagnoseSubtractionOnNullPointer(*this, Loc, RHS.get(), LHSIsNullPtr);
// The pointee type may have zero size. As an extension, a structure or
// union may have zero size or an array may have zero length. In this
@@ -11658,6 +11855,21 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
LHS.get()->getSourceRange());
}
+ if (IsOrdered && LHSType->isFunctionPointerType() &&
+ RHSType->isFunctionPointerType()) {
+ // Valid unless a relational comparison of function pointers
+ bool IsError = Opc == BO_Cmp;
+ auto DiagID =
+ IsError ? diag::err_typecheck_ordered_comparison_of_function_pointers
+ : getLangOpts().CPlusPlus
+ ? diag::warn_typecheck_ordered_comparison_of_function_pointers
+ : diag::ext_typecheck_ordered_comparison_of_function_pointers;
+ Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ if (IsError)
+ return QualType();
+ }
+
if ((LHSType->isIntegerType() && !LHSIsNull) ||
(RHSType->isIntegerType() && !RHSIsNull)) {
// Skip normal pointer conversion checks in this case; we have better
@@ -11725,12 +11937,6 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
<< LHSType << RHSType << LCanPointeeTy->isIncompleteType()
<< RCanPointeeTy->isIncompleteType();
}
- if (LCanPointeeTy->isFunctionType()) {
- // Valid unless a relational comparison of function pointers
- Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
- << LHSType << RHSType << LHS.get()->getSourceRange()
- << RHS.get()->getSourceRange();
- }
}
} else if (!IsRelational &&
(LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
@@ -12071,11 +12277,30 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
QualType LHSType = LHS.get()->getType();
- // If AltiVec, the comparison results in a numeric type, i.e.
- // bool for C++, int for C
- if (getLangOpts().AltiVec &&
- vType->castAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector)
- return Context.getLogicalOperationType();
+ // Determine the return type of a vector compare. By default clang will return
+ // a scalar for all vector compares except vector bool and vector pixel.
+ // With the gcc compiler we will always return a vector type and with the xl
+ // compiler we will always return a scalar type. This switch allows choosing
+ // which behavior is prefered.
+ if (getLangOpts().AltiVec) {
+ switch (getLangOpts().getAltivecSrcCompat()) {
+ case LangOptions::AltivecSrcCompatKind::Mixed:
+ // If AltiVec, the comparison results in a numeric type, i.e.
+ // bool for C++, int for C
+ if (vType->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecVector)
+ return Context.getLogicalOperationType();
+ else
+ Diag(Loc, diag::warn_deprecated_altivec_src_compat);
+ break;
+ case LangOptions::AltivecSrcCompatKind::GCC:
+ // For GCC we always return the vector type.
+ break;
+ case LangOptions::AltivecSrcCompatKind::XL:
+ return Context.getLogicalOperationType();
+ break;
+ }
+ }
// For non-floating point types, check for self-comparisons of the form
// x == x, x != x, x < x, etc. These always evaluate to a constant, and
@@ -12100,6 +12325,11 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
if (Loc.isMacroID())
return;
+ // Do not diagnose if both LHS and RHS are macros.
+ if (XorLHS.get()->getExprLoc().isMacroID() &&
+ XorRHS.get()->getExprLoc().isMacroID())
+ return;
+
bool Negative = false;
bool ExplicitPlus = false;
const auto *LHSInt = dyn_cast<IntegerLiteral>(XorLHS.get());
@@ -12172,7 +12402,8 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
RHSStrRef.find('\'') != StringRef::npos)
return;
- bool SuggestXor = S.getLangOpts().CPlusPlus || S.getPreprocessor().isMacroDefined("xor");
+ bool SuggestXor =
+ S.getLangOpts().CPlusPlus || S.getPreprocessor().isMacroDefined("xor");
const llvm::APInt XorValue = LeftSideValue ^ RightSideValue;
int64_t RightSideIntValue = RightSideValue.getSExtValue();
if (LeftSideValue == 2 && RightSideIntValue >= 0) {
@@ -12183,27 +12414,30 @@ static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
if (Overflow) {
if (RightSideIntValue < 64)
S.Diag(Loc, diag::warn_xor_used_as_pow_base)
- << ExprStr << XorValue.toString(10, true) << ("1LL << " + RHSStr)
+ << ExprStr << toString(XorValue, 10, true) << ("1LL << " + RHSStr)
<< FixItHint::CreateReplacement(ExprRange, "1LL << " + RHSStr);
else if (RightSideIntValue == 64)
- S.Diag(Loc, diag::warn_xor_used_as_pow) << ExprStr << XorValue.toString(10, true);
+ S.Diag(Loc, diag::warn_xor_used_as_pow)
+ << ExprStr << toString(XorValue, 10, true);
else
return;
} else {
S.Diag(Loc, diag::warn_xor_used_as_pow_base_extra)
- << ExprStr << XorValue.toString(10, true) << SuggestedExpr
- << PowValue.toString(10, true)
+ << ExprStr << toString(XorValue, 10, true) << SuggestedExpr
+ << toString(PowValue, 10, true)
<< FixItHint::CreateReplacement(
ExprRange, (RightSideIntValue == 0) ? "1" : SuggestedExpr);
}
- S.Diag(Loc, diag::note_xor_used_as_pow_silence) << ("0x2 ^ " + RHSStr) << SuggestXor;
+ S.Diag(Loc, diag::note_xor_used_as_pow_silence)
+ << ("0x2 ^ " + RHSStr) << SuggestXor;
} else if (LeftSideValue == 10) {
std::string SuggestedValue = "1e" + std::to_string(RightSideIntValue);
S.Diag(Loc, diag::warn_xor_used_as_pow_base)
- << ExprStr << XorValue.toString(10, true) << SuggestedValue
+ << ExprStr << toString(XorValue, 10, true) << SuggestedValue
<< FixItHint::CreateReplacement(ExprRange, SuggestedValue);
- S.Diag(Loc, diag::note_xor_used_as_pow_silence) << ("0xA ^ " + RHSStr) << SuggestXor;
+ S.Diag(Loc, diag::note_xor_used_as_pow_silence)
+ << ("0xA ^ " + RHSStr) << SuggestXor;
}
}
@@ -12900,8 +13134,9 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
// OpenCL v1.2 s6.1.1.1 p2:
// The half data type can only be used to declare a pointer to a buffer that
// contains half values
- if (getLangOpts().OpenCL && !getOpenCLOptions().isEnabled("cl_khr_fp16") &&
- LHSType->isHalfType()) {
+ if (getLangOpts().OpenCL &&
+ !getOpenCLOptions().isAvailableOption("cl_khr_fp16", getLangOpts()) &&
+ LHSType->isHalfType()) {
Diag(Loc, diag::err_opencl_half_load_store) << 1
<< LHSType.getUnqualifiedType();
return QualType();
@@ -13208,7 +13443,7 @@ static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
OK = Op->getObjectKind();
return ResType;
} else {
- VK = VK_RValue;
+ VK = VK_PRValue;
return ResType.getUnqualifiedType();
}
}
@@ -13595,7 +13830,7 @@ static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
// ...except that certain expressions are never l-values in C.
if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType())
- VK = VK_RValue;
+ VK = VK_PRValue;
return Result;
}
@@ -13864,7 +14099,7 @@ ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
// The following two variables are used for compound assignment operators
QualType CompLHSTy; // Type of LHS after promotions for computation
QualType CompResultTy; // Type of computation result
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
bool ConvertHalfVec = false;
@@ -14483,7 +14718,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
// assignment, but is not an lvalue.
return CompoundAssignOperator::Create(
Context, LHSExpr, RHSExpr, Opc,
- LHSExpr->getType().getUnqualifiedType(), VK_RValue, OK_Ordinary,
+ LHSExpr->getType().getUnqualifiedType(), VK_PRValue, OK_Ordinary,
OpLoc, CurFPFeatureOverrides());
QualType ResultType;
switch (Opc) {
@@ -14509,7 +14744,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
break;
}
return BinaryOperator::Create(Context, LHSExpr, RHSExpr, Opc, ResultType,
- VK_RValue, OK_Ordinary, OpLoc,
+ VK_PRValue, OK_Ordinary, OpLoc,
CurFPFeatureOverrides());
}
@@ -14531,7 +14766,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
Expr *InputExpr) {
ExprResult Input = InputExpr;
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
QualType resultType;
bool CanOverflow = false;
@@ -14704,7 +14939,7 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
// complex l-values to ordinary l-values and all other values to r-values.
if (Input.isInvalid()) return ExprError();
if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
- if (Input.get()->getValueKind() != VK_RValue &&
+ if (Input.get()->isGLValue() &&
Input.get()->getObjectKind() == OK_Ordinary)
VK = Input.get()->getValueKind();
} else if (!getLangOpts().CPlusPlus) {
@@ -15123,7 +15358,7 @@ ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
SourceLocation RPLoc) {
assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)");
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
ExprObjectKind OK = OK_Ordinary;
QualType resType;
bool CondIsTrue = false;
@@ -15604,8 +15839,46 @@ ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
QualType PromoteType;
if (TInfo->getType()->isPromotableIntegerType()) {
PromoteType = Context.getPromotedIntegerType(TInfo->getType());
- if (Context.typesAreCompatible(PromoteType, TInfo->getType()))
+ // [cstdarg.syn]p1 defers the C++ behavior to what the C standard says,
+ // and C2x 7.16.1.1p2 says, in part:
+ // If type is not compatible with the type of the actual next argument
+ // (as promoted according to the default argument promotions), the
+ // behavior is undefined, except for the following cases:
+ // - both types are pointers to qualified or unqualified versions of
+ // compatible types;
+ // - one type is a signed integer type, the other type is the
+ // corresponding unsigned integer type, and the value is
+ // representable in both types;
+ // - one type is pointer to qualified or unqualified void and the
+ // other is a pointer to a qualified or unqualified character type.
+ // Given that type compatibility is the primary requirement (ignoring
+ // qualifications), you would think we could call typesAreCompatible()
+ // directly to test this. However, in C++, that checks for *same type*,
+ // which causes false positives when passing an enumeration type to
+ // va_arg. Instead, get the underlying type of the enumeration and pass
+ // that.
+ QualType UnderlyingType = TInfo->getType();
+ if (const auto *ET = UnderlyingType->getAs<EnumType>())
+ UnderlyingType = ET->getDecl()->getIntegerType();
+ if (Context.typesAreCompatible(PromoteType, UnderlyingType,
+ /*CompareUnqualified*/ true))
PromoteType = QualType();
+
+ // If the types are still not compatible, we need to test whether the
+ // promoted type and the underlying type are the same except for
+ // signedness. Ask the AST for the correctly corresponding type and see
+ // if that's compatible.
+ if (!PromoteType.isNull() &&
+ PromoteType->isUnsignedIntegerType() !=
+ UnderlyingType->isUnsignedIntegerType()) {
+ UnderlyingType =
+ UnderlyingType->isUnsignedIntegerType()
+ ? Context.getCorrespondingSignedType(UnderlyingType)
+ : Context.getCorrespondingUnsignedType(UnderlyingType);
+ if (Context.typesAreCompatible(PromoteType, UnderlyingType,
+ /*CompareUnqualified*/ true))
+ PromoteType = QualType();
+ }
}
if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float))
PromoteType = Context.DoubleTy;
@@ -16138,7 +16411,8 @@ Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
if (Result)
*Result = E->EvaluateKnownConstIntCheckOverflow(Context);
if (!isa<ConstantExpr>(E))
- E = ConstantExpr::Create(Context, E);
+ E = Result ? ConstantExpr::Create(Context, E, APValue(*Result))
+ : ConstantExpr::Create(Context, E);
return E;
}
@@ -16520,8 +16794,10 @@ void Sema::PopExpressionEvaluationContext() {
if (!Rec.Lambdas.empty()) {
using ExpressionKind = ExpressionEvaluationContextRecord::ExpressionKind;
- if (Rec.ExprContext == ExpressionKind::EK_TemplateArgument || Rec.isUnevaluated() ||
- (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17)) {
+ if (!getLangOpts().CPlusPlus20 &&
+ (Rec.ExprContext == ExpressionKind::EK_TemplateArgument ||
+ Rec.isUnevaluated() ||
+ (Rec.isConstantEvaluated() && !getLangOpts().CPlusPlus17))) {
unsigned D;
if (Rec.isUnevaluated()) {
// C++11 [expr.prim.lambda]p2:
@@ -17030,6 +17306,42 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
CaptureType, DeclRefType,
FunctionScopeIndexToStopAt);
+ if (SemaRef.LangOpts.CUDA && Var && Var->hasGlobalStorage()) {
+ auto *FD = dyn_cast_or_null<FunctionDecl>(SemaRef.CurContext);
+ auto VarTarget = SemaRef.IdentifyCUDATarget(Var);
+ auto UserTarget = SemaRef.IdentifyCUDATarget(FD);
+ if (VarTarget == Sema::CVT_Host &&
+ (UserTarget == Sema::CFT_Device || UserTarget == Sema::CFT_HostDevice ||
+ UserTarget == Sema::CFT_Global)) {
+ // Diagnose ODR-use of host global variables in device functions.
+ // Reference of device global variables in host functions is allowed
+ // through shadow variables therefore it is not diagnosed.
+ if (SemaRef.LangOpts.CUDAIsDevice) {
+ SemaRef.targetDiag(Loc, diag::err_ref_bad_target)
+ << /*host*/ 2 << /*variable*/ 1 << Var << UserTarget;
+ SemaRef.targetDiag(Var->getLocation(),
+ Var->getType().isConstQualified()
+ ? diag::note_cuda_const_var_unpromoted
+ : diag::note_cuda_host_var);
+ }
+ } else if (VarTarget == Sema::CVT_Device &&
+ (UserTarget == Sema::CFT_Host ||
+ UserTarget == Sema::CFT_HostDevice) &&
+ !Var->hasExternalStorage()) {
+ // Record a CUDA/HIP device side variable if it is ODR-used
+ // by host code. This is done conservatively, when the variable is
+ // referenced in any of the following contexts:
+ // - a non-function context
+ // - a host function
+ // - a host device function
+ // This makes the ODR-use of the device side variable by host code to
+ // be visible in the device compilation for the compiler to be able to
+ // emit template variables instantiated by host code only and to
+ // externalize the static device side variable ODR-used by host code.
+ SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
+ }
+ }
+
Var->markUsed(SemaRef.Context);
}
@@ -17263,18 +17575,17 @@ static bool captureInBlock(BlockScopeInfo *BSI, VarDecl *Var,
/// Capture the given variable in the captured region.
-static bool captureInCapturedRegion(CapturedRegionScopeInfo *RSI,
- VarDecl *Var,
- SourceLocation Loc,
- const bool BuildAndDiagnose,
- QualType &CaptureType,
- QualType &DeclRefType,
- const bool RefersToCapturedVariable,
- Sema &S, bool Invalid) {
+static bool captureInCapturedRegion(
+ CapturedRegionScopeInfo *RSI, VarDecl *Var, SourceLocation Loc,
+ const bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType,
+ const bool RefersToCapturedVariable, Sema::TryCaptureKind Kind,
+ bool IsTopScope, Sema &S, bool Invalid) {
// By default, capture variables by reference.
bool ByRef = true;
- // Using an LValue reference type is consistent with Lambdas (see below).
- if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) {
+ if (IsTopScope && Kind != Sema::TryCapture_Implicit) {
+ ByRef = (Kind == Sema::TryCapture_ExplicitByRef);
+ } else if (S.getLangOpts().OpenMP && RSI->CapRegionKind == CR_OpenMP) {
+ // Using an LValue reference type is consistent with Lambdas (see below).
if (S.isOpenMPCapturedDecl(Var)) {
bool HasConst = DeclRefType.isConstQualified();
DeclRefType = DeclRefType.getUnqualifiedType();
@@ -17403,6 +17714,107 @@ static bool captureInLambda(LambdaScopeInfo *LSI,
return !Invalid;
}
+static bool canCaptureVariableByCopy(VarDecl *Var, const ASTContext &Context) {
+ // Offer a Copy fix even if the type is dependent.
+ if (Var->getType()->isDependentType())
+ return true;
+ QualType T = Var->getType().getNonReferenceType();
+ if (T.isTriviallyCopyableType(Context))
+ return true;
+ if (CXXRecordDecl *RD = T->getAsCXXRecordDecl()) {
+
+ if (!(RD = RD->getDefinition()))
+ return false;
+ if (RD->hasSimpleCopyConstructor())
+ return true;
+ if (RD->hasUserDeclaredCopyConstructor())
+ for (CXXConstructorDecl *Ctor : RD->ctors())
+ if (Ctor->isCopyConstructor())
+ return !Ctor->isDeleted();
+ }
+ return false;
+}
+
+/// Create up to 4 fix-its for explicit reference and value capture of \p Var or
+/// default capture. Fixes may be omitted if they aren't allowed by the
+/// standard, for example we can't emit a default copy capture fix-it if we
+/// already explicitly copy capture capture another variable.
+static void buildLambdaCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI,
+ VarDecl *Var) {
+ assert(LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None);
+ // Don't offer Capture by copy of default capture by copy fixes if Var is
+ // known not to be copy constructible.
+ bool ShouldOfferCopyFix = canCaptureVariableByCopy(Var, Sema.getASTContext());
+
+ SmallString<32> FixBuffer;
+ StringRef Separator = LSI->NumExplicitCaptures > 0 ? ", " : "";
+ if (Var->getDeclName().isIdentifier() && !Var->getName().empty()) {
+ SourceLocation VarInsertLoc = LSI->IntroducerRange.getEnd();
+ if (ShouldOfferCopyFix) {
+ // Offer fixes to insert an explicit capture for the variable.
+ // [] -> [VarName]
+ // [OtherCapture] -> [OtherCapture, VarName]
+ FixBuffer.assign({Separator, Var->getName()});
+ Sema.Diag(VarInsertLoc, diag::note_lambda_variable_capture_fixit)
+ << Var << /*value*/ 0
+ << FixItHint::CreateInsertion(VarInsertLoc, FixBuffer);
+ }
+ // As above but capture by reference.
+ FixBuffer.assign({Separator, "&", Var->getName()});
+ Sema.Diag(VarInsertLoc, diag::note_lambda_variable_capture_fixit)
+ << Var << /*reference*/ 1
+ << FixItHint::CreateInsertion(VarInsertLoc, FixBuffer);
+ }
+
+ // Only try to offer default capture if there are no captures excluding this
+ // and init captures.
+ // [this]: OK.
+ // [X = Y]: OK.
+ // [&A, &B]: Don't offer.
+ // [A, B]: Don't offer.
+ if (llvm::any_of(LSI->Captures, [](Capture &C) {
+ return !C.isThisCapture() && !C.isInitCapture();
+ }))
+ return;
+
+ // The default capture specifiers, '=' or '&', must appear first in the
+ // capture body.
+ SourceLocation DefaultInsertLoc =
+ LSI->IntroducerRange.getBegin().getLocWithOffset(1);
+
+ if (ShouldOfferCopyFix) {
+ bool CanDefaultCopyCapture = true;
+ // [=, *this] OK since c++17
+ // [=, this] OK since c++20
+ if (LSI->isCXXThisCaptured() && !Sema.getLangOpts().CPlusPlus20)
+ CanDefaultCopyCapture = Sema.getLangOpts().CPlusPlus17
+ ? LSI->getCXXThisCapture().isCopyCapture()
+ : false;
+ // We can't use default capture by copy if any captures already specified
+ // capture by copy.
+ if (CanDefaultCopyCapture && llvm::none_of(LSI->Captures, [](Capture &C) {
+ return !C.isThisCapture() && !C.isInitCapture() && C.isCopyCapture();
+ })) {
+ FixBuffer.assign({"=", Separator});
+ Sema.Diag(DefaultInsertLoc, diag::note_lambda_default_capture_fixit)
+ << /*value*/ 0
+ << FixItHint::CreateInsertion(DefaultInsertLoc, FixBuffer);
+ }
+ }
+
+ // We can't use default capture by reference if any captures already specified
+ // capture by reference.
+ if (llvm::none_of(LSI->Captures, [](Capture &C) {
+ return !C.isInitCapture() && C.isReferenceCapture() &&
+ !C.isThisCapture();
+ })) {
+ FixBuffer.assign({"&", Separator});
+ Sema.Diag(DefaultInsertLoc, diag::note_lambda_default_capture_fixit)
+ << /*reference*/ 1
+ << FixItHint::CreateInsertion(DefaultInsertLoc, FixBuffer);
+ }
+}
+
bool Sema::tryCaptureVariable(
VarDecl *Var, SourceLocation ExprLoc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType,
@@ -17492,6 +17904,7 @@ bool Sema::tryCaptureVariable(
Diag(ExprLoc, diag::err_lambda_impcap) << Var;
Diag(Var->getLocation(), diag::note_previous_decl) << Var;
Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl);
+ buildLambdaCaptureFixit(*this, LSI, Var);
} else
diagnoseUncapturableValueReference(*this, ExprLoc, Var, DC);
}
@@ -17570,9 +17983,11 @@ bool Sema::tryCaptureVariable(
if (BuildAndDiagnose) {
Diag(ExprLoc, diag::err_lambda_impcap) << Var;
Diag(Var->getLocation(), diag::note_previous_decl) << Var;
- if (cast<LambdaScopeInfo>(CSI)->Lambda)
- Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getBeginLoc(),
- diag::note_lambda_decl);
+ auto *LSI = cast<LambdaScopeInfo>(CSI);
+ if (LSI->Lambda) {
+ Diag(LSI->Lambda->getBeginLoc(), diag::note_lambda_decl);
+ buildLambdaCaptureFixit(*this, LSI, Var);
+ }
// FIXME: If we error out because an outer lambda can not implicitly
// capture a variable that an inner lambda explicitly captures, we
// should have the inner lambda do the explicit capture - because
@@ -17620,9 +18035,9 @@ bool Sema::tryCaptureVariable(
DeclRefType, Nested, *this, Invalid);
Nested = true;
} else if (CapturedRegionScopeInfo *RSI = dyn_cast<CapturedRegionScopeInfo>(CSI)) {
- Invalid = !captureInCapturedRegion(RSI, Var, ExprLoc, BuildAndDiagnose,
- CaptureType, DeclRefType, Nested,
- *this, Invalid);
+ Invalid = !captureInCapturedRegion(
+ RSI, Var, ExprLoc, BuildAndDiagnose, CaptureType, DeclRefType, Nested,
+ Kind, /*IsTopScope*/ I == N - 1, *this, Invalid);
Nested = true;
} else {
LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
@@ -18079,8 +18494,9 @@ void Sema::CleanupVarDeclMarking() {
"MarkVarDeclODRUsed failed to cleanup MaybeODRUseExprs?");
}
-static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
- VarDecl *Var, Expr *E) {
+static void DoMarkVarDeclReferenced(
+ Sema &SemaRef, SourceLocation Loc, VarDecl *Var, Expr *E,
+ llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
assert((!E || isa<DeclRefExpr>(E) || isa<MemberExpr>(E) ||
isa<FunctionParmPackExpr>(E)) &&
"Invalid Expr argument to DoMarkVarDeclReferenced");
@@ -18089,24 +18505,6 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
if (Var->isInvalidDecl())
return;
- // Record a CUDA/HIP static device/constant variable if it is referenced
- // by host code. This is done conservatively, when the variable is referenced
- // in any of the following contexts:
- // - a non-function context
- // - a host function
- // - a host device function
- // This also requires the reference of the static device/constant variable by
- // host code to be visible in the device compilation for the compiler to be
- // able to externalize the static device/constant variable.
- if (SemaRef.getASTContext().mayExternalizeStaticVar(Var)) {
- auto *CurContext = SemaRef.CurContext;
- if (!CurContext || !isa<FunctionDecl>(CurContext) ||
- cast<FunctionDecl>(CurContext)->hasAttr<CUDAHostAttr>() ||
- (!cast<FunctionDecl>(CurContext)->hasAttr<CUDADeviceAttr>() &&
- !cast<FunctionDecl>(CurContext)->hasAttr<CUDAGlobalAttr>()))
- SemaRef.getASTContext().CUDAStaticDeviceVarReferencedByHost.insert(Var);
- }
-
auto *MSI = Var->getMemberSpecializationInfo();
TemplateSpecializationKind TSK = MSI ? MSI->getTemplateSpecializationKind()
: Var->getTemplateSpecializationKind();
@@ -18115,6 +18513,10 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
bool UsableInConstantExpr =
Var->mightBeUsableInConstantExpressions(SemaRef.Context);
+ if (Var->isLocalVarDeclOrParm() && !Var->hasExternalStorage()) {
+ RefsMinusAssignments.insert({Var, 0}).first->getSecond()++;
+ }
+
// C++20 [expr.const]p12:
// A variable [...] is needed for constant evaluation if it is [...] a
// variable whose name appears as a potentially constant evaluated
@@ -18270,16 +18672,18 @@ static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
/// used directly for normal expressions referring to VarDecl.
void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
- DoMarkVarDeclReferenced(*this, Loc, Var, nullptr);
+ DoMarkVarDeclReferenced(*this, Loc, Var, nullptr, RefsMinusAssignments);
}
-static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
- Decl *D, Expr *E, bool MightBeOdrUse) {
+static void
+MarkExprReferenced(Sema &SemaRef, SourceLocation Loc, Decl *D, Expr *E,
+ bool MightBeOdrUse,
+ llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
if (SemaRef.isInOpenMPDeclareTargetContext())
SemaRef.checkDeclIsAllowedInOpenMPTarget(E, D);
if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- DoMarkVarDeclReferenced(SemaRef, Loc, Var, E);
+ DoMarkVarDeclReferenced(SemaRef, Loc, Var, E, RefsMinusAssignments);
return;
}
@@ -18325,7 +18729,8 @@ void Sema::MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base) {
if (!isConstantEvaluated() && FD->isConsteval() &&
!RebuildingImmediateInvocation)
ExprEvalContexts.back().ReferenceToConsteval.insert(E);
- MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse);
+ MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E, OdrUse,
+ RefsMinusAssignments);
}
/// Perform reference-marking and odr-use handling for a MemberExpr.
@@ -18344,13 +18749,15 @@ void Sema::MarkMemberReferenced(MemberExpr *E) {
}
SourceLocation Loc =
E->getMemberLoc().isValid() ? E->getMemberLoc() : E->getBeginLoc();
- MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse);
+ MarkExprReferenced(*this, Loc, E->getMemberDecl(), E, MightBeOdrUse,
+ RefsMinusAssignments);
}
/// Perform reference-marking and odr-use handling for a FunctionParmPackExpr.
void Sema::MarkFunctionParmPackReferenced(FunctionParmPackExpr *E) {
for (VarDecl *VD : *E)
- MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true);
+ MarkExprReferenced(*this, E->getParameterPackLocation(), VD, E, true,
+ RefsMinusAssignments);
}
/// Perform marking for a reference to an arbitrary declaration. It
@@ -18769,7 +19176,7 @@ namespace {
Expr *SubExpr = SubResult.get();
E->setSubExpr(SubExpr);
E->setType(S.Context.getPointerType(SubExpr->getType()));
- assert(E->getValueKind() == VK_RValue);
+ assert(E->isPRValue());
assert(E->getObjectKind() == OK_Ordinary);
return E;
}
@@ -18779,7 +19186,7 @@ namespace {
E->setType(VD->getType());
- assert(E->getValueKind() == VK_RValue);
+ assert(E->isPRValue());
if (S.getLangOpts().CPlusPlus &&
!(isa<CXXMethodDecl>(VD) &&
cast<CXXMethodDecl>(VD)->isInstance()))
@@ -18870,7 +19277,7 @@ namespace {
return ExprError();
}
- assert(E->getValueKind() == VK_RValue);
+ assert(E->isPRValue());
assert(E->getObjectKind() == OK_Ordinary);
E->setType(DestType);
@@ -19030,7 +19437,7 @@ ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) {
ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) {
// The only case we should ever see here is a function-to-pointer decay.
if (E->getCastKind() == CK_FunctionToPointerDecay) {
- assert(E->getValueKind() == VK_RValue);
+ assert(E->isPRValue());
assert(E->getObjectKind() == OK_Ordinary);
E->setType(DestType);
@@ -19044,7 +19451,7 @@ ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) {
E->setSubExpr(Result.get());
return E;
} else if (E->getCastKind() == CK_LValueToRValue) {
- assert(E->getValueKind() == VK_RValue);
+ assert(E->isPRValue());
assert(E->getObjectKind() == OK_Ordinary);
assert(isa<BlockPointerType>(E->getType()));
@@ -19076,8 +19483,8 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
DestType = Ptr->getPointeeType();
ExprResult Result = resolveDecl(E, VD);
if (Result.isInvalid()) return ExprError();
- return S.ImpCastExprToType(Result.get(), Type,
- CK_FunctionToPointerDecay, VK_RValue);
+ return S.ImpCastExprToType(Result.get(), Type, CK_FunctionToPointerDecay,
+ VK_PRValue);
}
if (!Type->isFunctionType()) {
@@ -19119,13 +19526,13 @@ ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
if (MD->isInstance()) {
- ValueKind = VK_RValue;
+ ValueKind = VK_PRValue;
Type = S.Context.BoundMemberTy;
}
// Function references aren't l-values in C.
if (!S.getLangOpts().CPlusPlus)
- ValueKind = VK_RValue;
+ ValueKind = VK_PRValue;
// - variables
} else if (isa<VarDecl>(VD)) {
@@ -19324,7 +19731,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
CK_BuiltinFnToFnPtr)
.get();
return CallExpr::Create(Context, E, /*Args=*/{}, Context.IntTy,
- VK_RValue, SourceLocation(),
+ VK_PRValue, SourceLocation(),
FPOptionsOverride());
}
}
@@ -19365,6 +19772,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
#define PPC_VECTOR_TYPE(Name, Id, Size) \
case BuiltinType::Id:
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) case BuiltinType::Id:
+#include "clang/Basic/RISCVVTypes.def"
#define BUILTIN_TYPE(Id, SingletonId) case BuiltinType::Id:
#define PLACEHOLDER_TYPE(Id, SingletonId)
#include "clang/AST/BuiltinTypes.def"
@@ -19406,23 +19815,31 @@ Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
ExprResult Sema::ActOnObjCAvailabilityCheckExpr(
llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc,
SourceLocation RParen) {
-
- StringRef Platform = getASTContext().getTargetInfo().getPlatformName();
-
- auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
- return Spec.getPlatform() == Platform;
- });
+ auto FindSpecVersion = [&](StringRef Platform) -> Optional<VersionTuple> {
+ auto Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
+ return Spec.getPlatform() == Platform;
+ });
+ // Transcribe the "ios" availability check to "maccatalyst" when compiling
+ // for "maccatalyst" if "maccatalyst" is not specified.
+ if (Spec == AvailSpecs.end() && Platform == "maccatalyst") {
+ Spec = llvm::find_if(AvailSpecs, [&](const AvailabilitySpec &Spec) {
+ return Spec.getPlatform() == "ios";
+ });
+ }
+ if (Spec == AvailSpecs.end())
+ return None;
+ return Spec->getVersion();
+ };
VersionTuple Version;
- if (Spec != AvailSpecs.end())
- Version = Spec->getVersion();
+ if (auto MaybeVersion =
+ FindSpecVersion(Context.getTargetInfo().getPlatformName()))
+ Version = *MaybeVersion;
- // The use of `@available` in the enclosing function should be analyzed to
+ // The use of `@available` in the enclosing context should be analyzed to
// warn when it's used inappropriately (i.e. not if(@available)).
- if (getCurFunctionOrMethodDecl())
- getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
- else if (getCurBlock() || getCurLambda())
- getCurFunction()->HasPotentialAvailabilityViolations = true;
+ if (FunctionScopeInfo *Context = getCurFunctionAvailabilityContext())
+ Context->HasPotentialAvailabilityViolations = true;
return new (Context)
ObjCAvailabilityCheckExpr(Version, AtLoc, RParen, Context.BoolTy);
@@ -19436,8 +19853,10 @@ ExprResult Sema::CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
if (isSFINAEContext())
return ExprError();
- if (T.isNull() || !Context.getLangOpts().RecoveryASTType)
+ if (T.isNull() || T->isUndeducedType() ||
+ !Context.getLangOpts().RecoveryASTType)
// We don't know the concrete type, fallback to dependent type.
T = Context.DependentTy;
+
return RecoveryExpr::Create(Context, T, Begin, End, SubExprs);
}
diff --git a/clang/lib/Sema/SemaExprCXX.cpp b/clang/lib/Sema/SemaExprCXX.cpp
index d91db60f17a0..111ffa1f04a0 100644
--- a/clang/lib/Sema/SemaExprCXX.cpp
+++ b/clang/lib/Sema/SemaExprCXX.cpp
@@ -484,8 +484,25 @@ ParsedType Sema::getDestructorTypeForDecltype(const DeclSpec &DS,
}
bool Sema::checkLiteralOperatorId(const CXXScopeSpec &SS,
- const UnqualifiedId &Name) {
+ const UnqualifiedId &Name, bool IsUDSuffix) {
assert(Name.getKind() == UnqualifiedIdKind::IK_LiteralOperatorId);
+ if (!IsUDSuffix) {
+ // [over.literal] p8
+ //
+ // double operator""_Bq(long double); // OK: not a reserved identifier
+ // double operator"" _Bq(long double); // ill-formed, no diagnostic required
+ IdentifierInfo *II = Name.Identifier;
+ ReservedIdentifierStatus Status = II->isReserved(PP.getLangOpts());
+ SourceLocation Loc = Name.getEndLoc();
+ if (Status != ReservedIdentifierStatus::NotReserved &&
+ !PP.getSourceManager().isInSystemHeader(Loc)) {
+ Diag(Loc, diag::warn_reserved_extern_symbol)
+ << II << static_cast<int>(Status)
+ << FixItHint::CreateReplacement(
+ Name.getSourceRange(),
+ (StringRef("operator\"\"") + II->getName()).str());
+ }
+ }
if (!SS.isValid())
return false;
@@ -567,11 +584,14 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
// polymorphic class type [...] [the] expression is an unevaluated
// operand. [...]
if (RecordD->isPolymorphic() && E->isGLValue()) {
- // The subexpression is potentially evaluated; switch the context
- // and recheck the subexpression.
- ExprResult Result = TransformToPotentiallyEvaluated(E);
- if (Result.isInvalid()) return ExprError();
- E = Result.get();
+ if (isUnevaluatedContext()) {
+ // The operand was processed in unevaluated context, switch the
+ // context and recheck the subexpression.
+ ExprResult Result = TransformToPotentiallyEvaluated(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.get();
+ }
// We require a vtable to query the type at run time.
MarkVTableUsed(TypeidLoc, RecordD);
@@ -851,10 +871,6 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
Diag(OpLoc, diag::err_omp_simd_region_cannot_use_stmt) << "throw";
if (Ex && !Ex->isTypeDependent()) {
- QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
- if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
- return ExprError();
-
// Initialize the exception result. This implicitly weeds out
// abstract types or types with inaccessible copy constructors.
@@ -870,15 +886,17 @@ ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
// operation from the operand to the exception object (15.1) can be
// omitted by constructing the automatic object directly into the
// exception object
- const VarDecl *NRVOVariable = nullptr;
- if (IsThrownVarInScope)
- NRVOVariable = getCopyElisionCandidate(QualType(), Ex, CES_Strict);
+ NamedReturnInfo NRInfo =
+ IsThrownVarInScope ? getNamedReturnInfo(Ex) : NamedReturnInfo();
+
+ QualType ExceptionObjectTy = Context.getExceptionObjectType(Ex->getType());
+ if (CheckCXXThrowOperand(OpLoc, ExceptionObjectTy, Ex))
+ return ExprError();
InitializedEntity Entity = InitializedEntity::InitializeException(
OpLoc, ExceptionObjectTy,
- /*NRVO=*/NRVOVariable != nullptr);
- ExprResult Res = PerformMoveOrCopyInitialization(
- Entity, NRVOVariable, QualType(), Ex, IsThrownVarInScope);
+ /*NRVO=*/NRInfo.isCopyElidable());
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRInfo, Ex);
if (Res.isInvalid())
return ExprError();
Ex = Res.get();
@@ -1176,15 +1194,11 @@ QualType Sema::getCurrentThisType() {
}
if (ThisTy.isNull() && isLambdaCallOperator(CurContext) &&
- inTemplateInstantiation()) {
-
- assert(isa<CXXRecordDecl>(DC) &&
- "Trying to get 'this' type from static method?");
+ inTemplateInstantiation() && isa<CXXRecordDecl>(DC)) {
// This is a lambda call operator that is being instantiated as a default
// initializer. DC must point to the enclosing class type, so we can recover
// the 'this' type from it.
-
QualType ClassTy = Context.getTypeDeclType(cast<CXXRecordDecl>(DC));
// There are no cv-qualifiers for 'this' within default initializers,
// per [expr.prim.general]p4.
@@ -1230,6 +1244,18 @@ Sema::CXXThisScopeRAII::~CXXThisScopeRAII() {
}
}
+static void buildLambdaThisCaptureFixit(Sema &Sema, LambdaScopeInfo *LSI) {
+ SourceLocation DiagLoc = LSI->IntroducerRange.getEnd();
+ assert(!LSI->isCXXThisCaptured());
+ // [=, this] {}; // until C++20: Error: this when = is the default
+ if (LSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval &&
+ !Sema.getLangOpts().CPlusPlus20)
+ return;
+ Sema.Diag(DiagLoc, diag::note_lambda_this_capture_fixit)
+ << FixItHint::CreateInsertion(
+ DiagLoc, LSI->NumExplicitCaptures > 0 ? ", this" : "this");
+}
+
bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
bool BuildAndDiagnose, const unsigned *const FunctionScopeIndexToStopAt,
const bool ByCopy) {
@@ -1278,9 +1304,12 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI);
if (LSI && isGenericLambdaCallOperatorSpecialization(LSI->CallOperator)) {
// This context can't implicitly capture 'this'; fail out.
- if (BuildAndDiagnose)
+ if (BuildAndDiagnose) {
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
+ if (!Explicit)
+ buildLambdaThisCaptureFixit(*this, LSI);
+ }
return true;
}
if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
@@ -1300,6 +1329,9 @@ bool Sema::CheckCXXThisCapture(SourceLocation Loc, const bool Explicit,
if (BuildAndDiagnose)
Diag(Loc, diag::err_this_capture)
<< (Explicit && idx == MaxFunctionScopesIndex);
+
+ if (!Explicit)
+ buildLambdaThisCaptureFixit(*this, LSI);
return true;
}
break;
@@ -2126,7 +2158,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
return ExprError(
Diag((*ArraySize)->getBeginLoc(), diag::err_array_too_large)
- << Value->toString(10) << (*ArraySize)->getSourceRange());
+ << toString(*Value, 10) << (*ArraySize)->getSourceRange());
}
KnownArraySize = Value->getZExtValue();
@@ -2221,7 +2253,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SizeTy, SourceLocation());
// Otherwise, if we failed to constant-fold the allocation size, we'll
// just give up and pass-in something opaque, that isn't a null pointer.
- OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_RValue,
+ OpaqueValueExpr OpaqueAllocationSize(SourceLocation(), SizeTy, VK_PRValue,
OK_Ordinary, /*SourceExpr=*/nullptr);
// Let's synthesize the alignment argument in case we will need it.
@@ -2237,7 +2269,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
SizeTy, SourceLocation());
ImplicitCastExpr DesiredAlignment(ImplicitCastExpr::OnStack, AlignValT,
CK_IntegralCast, &AlignmentLiteral,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
// Adjust placement args by prepending conjured size and alignment exprs.
llvm::SmallVector<Expr *, 8> CallArgs;
@@ -2462,12 +2494,27 @@ static bool resolveAllocationOverload(
}
if (Diagnose) {
- PartialDiagnosticAt PD(R.getNameLoc(), S.PDiag(diag::err_ovl_no_viable_function_in_call)
- << R.getLookupName() << Range);
+ // If this is an allocation of the form 'new (p) X' for some object
+ // pointer p (or an expression that will decay to such a pointer),
+ // diagnose the missing inclusion of <new>.
+ if (!R.isClassLookup() && Args.size() == 2 &&
+ (Args[1]->getType()->isObjectPointerType() ||
+ Args[1]->getType()->isArrayType())) {
+ S.Diag(R.getNameLoc(), diag::err_need_header_before_placement_new)
+ << R.getLookupName() << Range;
+ // Listing the candidates is unlikely to be useful; skip it.
+ return true;
+ }
- // If we have aligned candidates, only note the align_val_t candidates
- // from AlignedCandidates and the non-align_val_t candidates from
- // Candidates.
+ // Finish checking all candidates before we note any. This checking can
+ // produce additional diagnostics so can't be interleaved with our
+ // emission of notes.
+ //
+ // For an aligned allocation, separately check the aligned and unaligned
+ // candidates with their respective argument lists.
+ SmallVector<OverloadCandidate*, 32> Cands;
+ SmallVector<OverloadCandidate*, 32> AlignedCands;
+ llvm::SmallVector<Expr*, 4> AlignedArgs;
if (AlignedCandidates) {
auto IsAligned = [](OverloadCandidate &C) {
return C.Function->getNumParams() > 1 &&
@@ -2475,17 +2522,26 @@ static bool resolveAllocationOverload(
};
auto IsUnaligned = [&](OverloadCandidate &C) { return !IsAligned(C); };
- // This was an overaligned allocation, so list the aligned candidates
- // first.
- Args.insert(Args.begin() + 1, AlignArg);
- AlignedCandidates->NoteCandidates(PD, S, OCD_AllCandidates, Args, "",
- R.getNameLoc(), IsAligned);
- Args.erase(Args.begin() + 1);
- Candidates.NoteCandidates(PD, S, OCD_AllCandidates, Args, "", R.getNameLoc(),
- IsUnaligned);
+ AlignedArgs.reserve(Args.size() + 1);
+ AlignedArgs.push_back(Args[0]);
+ AlignedArgs.push_back(AlignArg);
+ AlignedArgs.append(Args.begin() + 1, Args.end());
+ AlignedCands = AlignedCandidates->CompleteCandidates(
+ S, OCD_AllCandidates, AlignedArgs, R.getNameLoc(), IsAligned);
+
+ Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
+ R.getNameLoc(), IsUnaligned);
} else {
- Candidates.NoteCandidates(PD, S, OCD_AllCandidates, Args);
+ Cands = Candidates.CompleteCandidates(S, OCD_AllCandidates, Args,
+ R.getNameLoc());
}
+
+ S.Diag(R.getNameLoc(), diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName() << Range;
+ if (AlignedCandidates)
+ AlignedCandidates->NoteCandidates(S, AlignedArgs, AlignedCands, "",
+ R.getNameLoc());
+ Candidates.NoteCandidates(S, Args, Cands, "", R.getNameLoc());
}
return true;
@@ -3855,7 +3911,7 @@ ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
- // C++ 6.4p4:
+ // C++11 6.4p4:
// The value of a condition that is an initialized declaration in a statement
// other than a switch statement is the value of the declared variable
// implicitly converted to type bool. If that conversion is ill-formed, the
@@ -3863,12 +3919,22 @@ ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr) {
// The value of a condition that is an expression is the value of the
// expression, implicitly converted to bool.
//
+ // C++2b 8.5.2p2
+ // If the if statement is of the form if constexpr, the value of the condition
+ // is contextually converted to bool and the converted expression shall be
+ // a constant expression.
+ //
+
+ ExprResult E = PerformContextuallyConvertToBool(CondExpr);
+ if (!IsConstexpr || E.isInvalid() || E.get()->isValueDependent())
+ return E;
+
// FIXME: Return this value to the caller so they don't need to recompute it.
- llvm::APSInt Value(/*BitWidth*/1);
- return (IsConstexpr && !CondExpr->isValueDependent())
- ? CheckConvertedConstantExpression(CondExpr, Context.BoolTy, Value,
- CCEK_ConstexprIf)
- : PerformContextuallyConvertToBool(CondExpr);
+ llvm::APSInt Cond;
+ E = VerifyIntegerConstantExpression(
+ E.get(), &Cond,
+ diag::err_constexpr_if_condition_expression_is_not_constant);
+ return E;
}
/// Helper function to determine whether this is the (deprecated) C++
@@ -3929,7 +3995,8 @@ static ExprResult BuildCXXCastArgument(Sema &S,
diag::err_allocation_of_abstract_type))
return ExprError();
- if (S.CompleteConstructorCall(Constructor, From, CastLoc, ConstructorArgs))
+ if (S.CompleteConstructorCall(Constructor, Ty, From, CastLoc,
+ ConstructorArgs))
return ExprError();
S.CheckConstructorAccess(CastLoc, Constructor, FoundDecl,
@@ -4097,9 +4164,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
assert(!ToType->isReferenceType());
if (SCS.Second == ICK_Derived_To_Base) {
SmallVector<Expr*, 8> ConstructorArgs;
- if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor),
- From, /*FIXME:ConstructLoc*/SourceLocation(),
- ConstructorArgs))
+ if (CompleteConstructorCall(
+ cast<CXXConstructorDecl>(SCS.CopyConstructor), ToType, From,
+ /*FIXME:ConstructLoc*/ SourceLocation(), ConstructorArgs))
return ExprError();
return BuildCXXConstructExpr(
/*FIXME:ConstructLoc*/ SourceLocation(), ToType,
@@ -4146,7 +4213,7 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (const AtomicType *FromAtomic = FromType->getAs<AtomicType>()) {
FromType = FromAtomic->getValueType().getUnqualifiedType();
From = ImplicitCastExpr::Create(Context, FromType, CK_AtomicToNonAtomic,
- From, /*BasePath=*/nullptr, VK_RValue,
+ From, /*BasePath=*/nullptr, VK_PRValue,
FPOptionsOverride());
}
break;
@@ -4154,7 +4221,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Lvalue_To_Rvalue: {
assert(From->getObjectKind() != OK_ObjCProperty);
ExprResult FromRes = DefaultLvalueConversion(From);
- assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!");
+ if (FromRes.isInvalid())
+ return ExprError();
+
From = FromRes.get();
FromType = From->getType();
break;
@@ -4162,14 +4231,16 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Array_To_Pointer:
FromType = Context.getArrayDecayedType(FromType);
- From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_Function_To_Pointer:
FromType = Context.getPointerType(FromType);
From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ VK_PRValue, /*BasePath=*/nullptr, CCK)
+ .get();
break;
default:
@@ -4211,18 +4282,21 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
assert(FromType->castAs<EnumType>()->getDecl()->isFixed() &&
SCS.Second == ICK_Integral_Promotion &&
"only enums with fixed underlying type can promote to bool");
- From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_IntegralToBoolean, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
} else {
- From = ImpCastExprToType(From, ToType, CK_IntegralCast,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_IntegralCast, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
}
break;
case ICK_Floating_Promotion:
case ICK_Floating_Conversion:
- From = ImpCastExprToType(From, ToType, CK_FloatingCast,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_FloatingCast, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_Complex_Promotion:
@@ -4240,18 +4314,21 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
} else {
CK = CK_IntegralComplexCast;
}
- From = ImpCastExprToType(From, ToType, CK,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK, VK_PRValue, /*BasePath=*/nullptr,
+ CCK)
+ .get();
break;
}
case ICK_Floating_Integral:
if (ToType->isRealFloatingType())
- From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFloating, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
else
- From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_Compatible_Conversion:
@@ -4319,8 +4396,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
if (getLangOpts().allowsNonTrivialObjCLifetimeQualifiers())
CheckObjCConversion(SourceRange(), NewToType, From, CCK);
- From = ImpCastExprToType(From, NewToType, Kind, VK_RValue, &BasePath, CCK)
- .get();
+ From = ImpCastExprToType(From, NewToType, Kind, VK_PRValue, &BasePath, CCK)
+ .get();
break;
}
@@ -4339,8 +4416,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
(void)isCompleteType(From->getExprLoc(), ToType);
}
- From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
- .get();
+ From =
+ ImpCastExprToType(From, ToType, Kind, VK_PRValue, &BasePath, CCK).get();
break;
}
@@ -4352,8 +4429,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
From = ImpCastExprToType(From, Context.BoolTy,
- ScalarTypeToBooleanCastKind(FromType),
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ ScalarTypeToBooleanCastKind(FromType), VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_Derived_To_Base: {
@@ -4370,12 +4448,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
}
case ICK_Vector_Conversion:
- From = ImpCastExprToType(From, ToType, CK_BitCast,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_SVE_Vector_Conversion:
- From = ImpCastExprToType(From, ToType, CK_BitCast, VK_RValue,
+ From = ImpCastExprToType(From, ToType, CK_BitCast, VK_PRValue,
/*BasePath=*/nullptr, CCK)
.get();
break;
@@ -4383,8 +4462,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
case ICK_Vector_Splat: {
// Vector splat from any arithmetic type to a vector.
Expr *Elem = prepareVectorSplat(ToType, From).get();
- From = ImpCastExprToType(Elem, ToType, CK_VectorSplat, VK_RValue,
- /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(Elem, ToType, CK_VectorSplat, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
}
@@ -4418,22 +4498,27 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
// _Complex x -> x
From = ImpCastExprToType(From, ElType,
- isFloatingComplex ? CK_FloatingComplexToReal
- : CK_IntegralComplexToReal,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ isFloatingComplex ? CK_FloatingComplexToReal
+ : CK_IntegralComplexToReal,
+ VK_PRValue, /*BasePath=*/nullptr, CCK)
+ .get();
// x -> y
if (Context.hasSameUnqualifiedType(ElType, ToType)) {
// do nothing
} else if (ToType->isRealFloatingType()) {
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ isFloatingComplex ? CK_FloatingCast
+ : CK_IntegralToFloating,
+ VK_PRValue, /*BasePath=*/nullptr, CCK)
+ .get();
} else {
assert(ToType->isIntegerType());
From = ImpCastExprToType(From, ToType,
- isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ isFloatingComplex ? CK_FloatingToIntegral
+ : CK_IntegralCast,
+ VK_PRValue, /*BasePath=*/nullptr, CCK)
+ .get();
}
}
break;
@@ -4448,7 +4533,8 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
CastKind Kind =
AddrSpaceL != AddrSpaceR ? CK_AddressSpaceConversion : CK_BitCast;
From = ImpCastExprToType(From, ToType.getUnqualifiedType(), Kind,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ VK_PRValue, /*BasePath=*/nullptr, CCK)
+ .get();
break;
}
@@ -4494,8 +4580,9 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
if (CheckExceptionSpecCompatibility(From, ToType))
return ExprError();
- From = ImpCastExprToType(From, ToType, CK_NoOp,
- VK_RValue, /*BasePath=*/nullptr, CCK).get();
+ From = ImpCastExprToType(From, ToType, CK_NoOp, VK_PRValue,
+ /*BasePath=*/nullptr, CCK)
+ .get();
break;
case ICK_Qualification: {
@@ -4538,13 +4625,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
assert(Context.hasSameType(
ToAtomicType->castAs<AtomicType>()->getValueType(), From->getType()));
From = ImpCastExprToType(From, ToAtomicType, CK_NonAtomicToAtomic,
- VK_RValue, nullptr, CCK).get();
+ VK_PRValue, nullptr, CCK)
+ .get();
}
// Materialize a temporary if we're implicitly converting to a reference
// type. This is not required by the C++ rules but is necessary to maintain
// AST invariants.
- if (ToType->isReferenceType() && From->isRValue()) {
+ if (ToType->isReferenceType() && From->isPRValue()) {
ExprResult Res = TemporaryMaterializationConversion(From);
if (Res.isInvalid())
return ExprError();
@@ -4835,9 +4923,11 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
case UTT_IsSigned:
// Enum types should always return false.
// Floating points should always return true.
- return !T->isEnumeralType() && (T->isFloatingType() || T->isSignedIntegerType());
+ return T->isFloatingType() ||
+ (T->isSignedIntegerType() && !T->isEnumeralType());
case UTT_IsUnsigned:
- return T->isUnsignedIntegerType();
+ // Enum types should always return false.
+ return T->isUnsignedIntegerType() && !T->isEnumeralType();
// Type trait expressions which query classes regarding their construction,
// destruction, and copying. Rather than being based directly on the
@@ -5595,7 +5685,8 @@ ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET,
static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) {
switch (ET) {
case ET_IsLValueExpr: return E->isLValue();
- case ET_IsRValueExpr: return E->isRValue();
+ case ET_IsRValueExpr:
+ return E->isPRValue();
}
llvm_unreachable("Expression trait not covered by switch");
}
@@ -5630,7 +5721,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
// temporary materialization conversion otherwise.
if (isIndirect)
LHS = DefaultLvalueConversion(LHS.get());
- else if (LHS.get()->isRValue())
+ else if (LHS.get()->isPRValue())
LHS = TemporaryMaterializationConversion(LHS.get());
if (LHS.isInvalid())
return QualType();
@@ -5700,7 +5791,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
QualType UseType = Context.getQualifiedType(Class, LHSType.getQualifiers());
if (isIndirect)
UseType = Context.getPointerType(UseType);
- ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind();
+ ExprValueKind VK = isIndirect ? VK_PRValue : LHS.get()->getValueKind();
LHS = ImpCastExprToType(LHS.get(), UseType, CK_DerivedToBase, VK,
&BasePath);
}
@@ -5762,7 +5853,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
// result of an ->* expression is an lvalue if its second operand
// is a pointer to data member and a prvalue otherwise.
if (Result->isFunctionType()) {
- VK = VK_RValue;
+ VK = VK_PRValue;
return Context.BoundMemberTy;
} else if (isIndirect) {
VK = VK_LValue;
@@ -5949,19 +6040,18 @@ static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
// extension.
static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
QualType CondTy) {
- if (!CondTy->isVectorType() || CondTy->isExtVectorType())
+ if (!CondTy->isVectorType() && !CondTy->isExtVectorType())
return false;
const QualType EltTy =
cast<VectorType>(CondTy.getCanonicalType())->getElementType();
-
assert(!EltTy->isBooleanType() && !EltTy->isEnumeralType() &&
"Vectors cant be boolean or enum types");
return EltTy->isIntegralType(Ctx);
}
-QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
- ExprResult &RHS,
- SourceLocation QuestionLoc) {
+QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc) {
LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
@@ -5976,24 +6066,17 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
QualType ResultType;
- // FIXME: In the future we should define what the Extvector conditional
- // operator looks like.
- if (LHSVT && isa<ExtVectorType>(LHSVT)) {
- Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
- << /*isExtVector*/ true << LHSType;
- return {};
- }
-
- if (RHSVT && isa<ExtVectorType>(RHSVT)) {
- Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
- << /*isExtVector*/ true << RHSType;
- return {};
- }
if (LHSVT && RHSVT) {
+ if (isa<ExtVectorType>(CondVT) != isa<ExtVectorType>(LHSVT)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_cond_result_mismatch)
+ << /*isExtVector*/ isa<ExtVectorType>(CondVT);
+ return {};
+ }
+
// If both are vector types, they must be the same type.
if (!Context.hasSameType(LHSType, RHSType)) {
- Diag(QuestionLoc, diag::err_conditional_vector_mismatched_vectors)
+ Diag(QuestionLoc, diag::err_conditional_vector_mismatched)
<< LHSType << RHSType;
return {};
}
@@ -6018,18 +6101,22 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
if (ResultElementTy->isEnumeralType()) {
Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
- << /*isExtVector*/ false << ResultElementTy;
+ << ResultElementTy;
return {};
}
- ResultType = Context.getVectorType(
- ResultElementTy, CondType->castAs<VectorType>()->getNumElements(),
- VectorType::GenericVector);
+ if (CondType->isExtVectorType())
+ ResultType =
+ Context.getExtVectorType(ResultElementTy, CondVT->getNumElements());
+ else
+ ResultType = Context.getVectorType(
+ ResultElementTy, CondVT->getNumElements(), VectorType::GenericVector);
LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat);
}
assert(!ResultType.isNull() && ResultType->isVectorType() &&
+ (!CondType->isExtVectorType() || ResultType->isExtVectorType()) &&
"Result should have been a vector type");
auto *ResultVectorTy = ResultType->castAs<VectorType>();
QualType ResultElementTy = ResultVectorTy->getElementType();
@@ -6056,15 +6143,21 @@ QualType Sema::CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
/// extension. In this case, LHS == Cond. (But they're not aliases.)
///
-/// This function also implements GCC's vector extension for conditionals.
-/// GCC's vector extension permits the use of a?b:c where the type of
-/// a is that of a integer vector with the same number of elements and
-/// size as the vectors of b and c. If one of either b or c is a scalar
-/// it is implicitly converted to match the type of the vector.
-/// Otherwise the expression is ill-formed. If both b and c are scalars,
-/// then b and c are checked and converted to the type of a if possible.
-/// Unlike the OpenCL ?: operator, the expression is evaluated as
-/// (a[0] != 0 ? b[0] : c[0], .. , a[n] != 0 ? b[n] : c[n]).
+/// This function also implements GCC's vector extension and the
+/// OpenCL/ext_vector_type extension for conditionals. The vector extensions
+/// permit the use of a?b:c where the type of a is that of a integer vector with
+/// the same number of elements and size as the vectors of b and c. If one of
+/// either b or c is a scalar it is implicitly converted to match the type of
+/// the vector. Otherwise the expression is ill-formed. If both b and c are
+/// scalars, then b and c are checked and converted to the type of a if
+/// possible.
+///
+/// The expressions are evaluated differently for GCC's and OpenCL's extensions.
+/// For the GCC extension, the ?: operator is evaluated as
+/// (a[0] != 0 ? b[0] : c[0], .. , a[n] != 0 ? b[n] : c[n]).
+/// For the OpenCL extensions, the ?: operator is evaluated as
+/// (most-significant-bit-set(a[0]) ? b[0] : c[0], .. ,
+/// most-significant-bit-set(a[n]) ? b[n] : c[n]).
QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS, ExprValueKind &VK,
ExprObjectKind &OK,
@@ -6073,7 +6166,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// pointers.
// Assume r-value.
- VK = VK_RValue;
+ VK = VK_PRValue;
OK = OK_Ordinary;
bool IsVectorConditional =
isValidVectorForConditionalCondition(Context, Cond.get()->getType());
@@ -6148,7 +6241,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Neither is void.
if (IsVectorConditional)
- return CheckGNUVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+ return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
// C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
@@ -6198,8 +6291,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// that instead?
ExprValueKind LVK = LHS.get()->getValueKind();
ExprValueKind RVK = RHS.get()->getValueKind();
- if (!Context.hasSameType(LTy, RTy) &&
- LVK == RVK && LVK != VK_RValue) {
+ if (!Context.hasSameType(LTy, RTy) && LVK == RVK && LVK != VK_PRValue) {
// DerivedToBase was already handled by the class-specific case above.
// FIXME: Should we allow ObjC conversions here?
const ReferenceConversions AllowedConversions =
@@ -6234,7 +6326,7 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// We only extend this to bitfields, not to the crazy other kinds of
// l-values.
bool Same = Context.hasSameType(LTy, RTy);
- if (Same && LVK == RVK && LVK != VK_RValue &&
+ if (Same && LVK == RVK && LVK != VK_PRValue &&
LHS.get()->isOrdinaryOrBitFieldObject() &&
RHS.get()->isOrdinaryOrBitFieldObject()) {
VK = LHS.get()->getValueKind();
@@ -6805,7 +6897,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
// If the result is a glvalue, we shouldn't bind it.
- if (!E->isRValue())
+ if (E->isGLValue())
return E;
// In ARC, calls that return a retainable type can return retained,
@@ -6897,7 +6989,7 @@ ExprResult Sema::MaybeBindToTemporary(Expr *E) {
CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
: CK_ARCReclaimReturnedObject);
return ImplicitCastExpr::Create(Context, E->getType(), ck, E, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
}
if (E->getType().isDestructedType() == QualType::DK_nontrivial_c_struct)
@@ -7690,7 +7782,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
NestedNameSpecifierLoc(), SourceLocation(), Method,
DeclAccessPair::make(FoundDecl, FoundDecl->getAccess()),
HadMultipleCandidates, DeclarationNameInfo(),
- Context.BoundMemberTy, VK_RValue, OK_Ordinary);
+ Context.BoundMemberTy, VK_PRValue, OK_Ordinary);
QualType ResultType = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultType);
@@ -7704,7 +7796,7 @@ ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
Method->getType()->castAs<FunctionProtoType>()))
return ExprError();
- return CE;
+ return CheckForImmediateInvocation(CE, CE->getMethodDecl());
}
ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
@@ -7739,9 +7831,38 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
}
+static void MaybeDecrementCount(
+ Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
+ DeclRefExpr *LHS = nullptr;
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getLHS()->getType()->isDependentType() ||
+ BO->getRHS()->getType()->isDependentType()) {
+ if (BO->getOpcode() != BO_Assign)
+ return;
+ } else if (!BO->isAssignmentOp())
+ return;
+ LHS = dyn_cast<DeclRefExpr>(BO->getLHS());
+ } else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (COCE->getOperator() != OO_Equal)
+ return;
+ LHS = dyn_cast<DeclRefExpr>(COCE->getArg(0));
+ }
+ if (!LHS)
+ return;
+ VarDecl *VD = dyn_cast<VarDecl>(LHS->getDecl());
+ if (!VD)
+ return;
+ auto iter = RefsMinusAssignments.find(VD);
+ if (iter == RefsMinusAssignments.end())
+ return;
+ iter->getSecond()--;
+}
+
/// Perform the conversions required for an expression used in a
/// context that ignores the result.
ExprResult Sema::IgnoredValueConversions(Expr *E) {
+ MaybeDecrementCount(E, RefsMinusAssignments);
+
if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return E;
@@ -7752,7 +7873,7 @@ ExprResult Sema::IgnoredValueConversions(Expr *E) {
// [Except in specific positions,] an lvalue that does not have
// array type is converted to the value stored in the
// designated object (and is no longer an lvalue).
- if (E->isRValue()) {
+ if (E->isPRValue()) {
// In C, function designators (i.e. expressions of function type)
// are r-values, but we still want to do function-to-pointer decay
// on them. This is both technically correct and convenient for
@@ -8232,6 +8353,7 @@ class TransformTypos : public TreeTransform<TransformTypos> {
AmbiguousTypoExprs.remove(TE);
SemaRef.getTypoExprState(TE).Consumer->restoreSavedPosition();
+ TransformCache[TE] = SavedTransformCache[TE];
}
TransformCache = std::move(SavedTransformCache);
}
@@ -8586,8 +8708,9 @@ Sema::ActOnCompoundRequirement(
/*ParameterPack=*/false,
/*HasTypeConstraint=*/true);
- if (ActOnTypeConstraint(SS, TypeConstraint, TParam,
- /*EllpsisLoc=*/SourceLocation()))
+ if (BuildTypeConstraint(SS, TypeConstraint, TParam,
+ /*EllpsisLoc=*/SourceLocation(),
+ /*AllowUnexpandedPack=*/true))
// Just produce a requirement with no type requirements.
return BuildExprRequirement(E, /*IsSimple=*/false, NoexceptLoc, {});
@@ -8620,7 +8743,7 @@ Sema::BuildExprRequirement(
TemplateParameterList *TPL =
ReturnTypeRequirement.getTypeConstraintTemplateParameterList();
QualType MatchedType =
- BuildDecltypeType(E, E->getBeginLoc()).getCanonicalType();
+ getDecltypeForParenthesizedExpr(E).getCanonicalType();
llvm::SmallVector<TemplateArgument, 1> Args;
Args.push_back(TemplateArgument(MatchedType));
TemplateArgumentList TAL(TemplateArgumentList::OnStack, Args);
diff --git a/clang/lib/Sema/SemaExprMember.cpp b/clang/lib/Sema/SemaExprMember.cpp
index f5afcb76fc96..af2aa49c0103 100644
--- a/clang/lib/Sema/SemaExprMember.cpp
+++ b/clang/lib/Sema/SemaExprMember.cpp
@@ -338,13 +338,12 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
compStr++;
} while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
- // Emit a warning if an rgba selector is used earlier than OpenCL 2.2
+ // Emit a warning if an rgba selector is used earlier than OpenCL C 3.0.
if (HasRGBA || (*compStr && IsRGBA(*compStr))) {
- if (S.getLangOpts().OpenCL && S.getLangOpts().OpenCLVersion < 220) {
+ if (S.getLangOpts().OpenCL && S.getLangOpts().OpenCLVersion < 300) {
const char *DiagBegin = HasRGBA ? CompName->getNameStart() : compStr;
S.Diag(OpLoc, diag::ext_opencl_ext_vector_type_rgba_selector)
- << StringRef(DiagBegin, 1)
- << S.getLangOpts().OpenCLVersion << SourceRange(CompLoc);
+ << StringRef(DiagBegin, 1) << SourceRange(CompLoc);
}
}
} else {
@@ -409,7 +408,8 @@ CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
if (CompSize == 1)
return vecType->getElementType();
- if (HasRepeated) VK = VK_RValue;
+ if (HasRepeated)
+ VK = VK_PRValue;
QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
// Now look up the TypeDefDecl from the vector type. Without this,
@@ -761,7 +761,7 @@ Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
if (!Base) {
TypoExpr *TE = nullptr;
QualType RecordTy = BaseType;
- if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
+ if (IsArrow) RecordTy = RecordTy->castAs<PointerType>()->getPointeeType();
if (LookupMemberExprInRecord(
*this, R, nullptr, RecordTy->getAs<RecordType>(), OpLoc, IsArrow,
SS, TemplateArgs != nullptr, TemplateKWLoc, TE))
@@ -910,7 +910,8 @@ MemberExpr *Sema::BuildMemberExpr(
bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo,
QualType Ty, ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs) {
- assert((!IsArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
+ assert((!IsArrow || Base->isPRValue()) &&
+ "-> base must be a pointer prvalue");
MemberExpr *E =
MemberExpr::Create(Context, Base, IsArrow, OpLoc, NNS, TemplateKWLoc,
Member, FoundDecl, MemberNameInfo, TemplateArgs, Ty,
@@ -964,14 +965,13 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
// C++1z [expr.ref]p2:
// For the first option (dot) the first expression shall be a glvalue [...]
- if (!IsArrow && BaseExpr && BaseExpr->isRValue()) {
+ if (!IsArrow && BaseExpr && BaseExpr->isPRValue()) {
ExprResult Converted = TemporaryMaterializationConversion(BaseExpr);
if (Converted.isInvalid())
return ExprError();
BaseExpr = Converted.get();
}
-
const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo();
DeclarationName MemberName = MemberNameInfo.getName();
SourceLocation MemberLoc = MemberNameInfo.getLoc();
@@ -1119,7 +1119,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
ExprValueKind valueKind;
QualType type;
if (MemberFn->isInstance()) {
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
type = Context.BoundMemberTy;
} else {
valueKind = VK_LValue;
@@ -1135,7 +1135,7 @@ Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
return BuildMemberExpr(BaseExpr, IsArrow, OpLoc, &SS, TemplateKWLoc, Enum,
FoundDecl, /*HadMultipleCandidates=*/false,
- MemberNameInfo, Enum->getType(), VK_RValue,
+ MemberNameInfo, Enum->getType(), VK_PRValue,
OK_Ordinary);
}
@@ -1779,9 +1779,9 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
if (BaseExpr->getObjectKind() == OK_Ordinary)
VK = BaseExpr->getValueKind();
else
- VK = VK_RValue;
+ VK = VK_PRValue;
}
- if (VK != VK_RValue && Field->isBitField())
+ if (VK != VK_PRValue && Field->isBitField())
OK = OK_BitField;
// Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
@@ -1791,7 +1791,7 @@ Sema::BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
VK = VK_LValue;
} else {
QualType BaseType = BaseExpr->getType();
- if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+ if (IsArrow) BaseType = BaseType->castAs<PointerType>()->getPointeeType();
Qualifiers BaseQuals = BaseType.getQualifiers();
diff --git a/clang/lib/Sema/SemaExprObjC.cpp b/clang/lib/Sema/SemaExprObjC.cpp
index f5456ee0711e..8a9c933fc93f 100644
--- a/clang/lib/Sema/SemaExprObjC.cpp
+++ b/clang/lib/Sema/SemaExprObjC.cpp
@@ -1786,7 +1786,7 @@ bool Sema::CheckMessageArgumentTypes(
} else {
ReturnType = Context.getObjCIdType();
}
- VK = VK_RValue;
+ VK = VK_PRValue;
return false;
}
@@ -1821,7 +1821,8 @@ bool Sema::CheckMessageArgumentTypes(
ParmVarDecl *param = Method->parameters()[i];
assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
- if (param->hasAttr<NoEscapeAttr>())
+ if (param->hasAttr<NoEscapeAttr>() &&
+ param->getType()->isBlockPointerType())
if (auto *BE = dyn_cast<BlockExpr>(
argExpr->IgnoreParenNoopCasts(Context)))
BE->getBlockDecl()->setDoesNotEscape();
@@ -1872,7 +1873,7 @@ bool Sema::CheckMessageArgumentTypes(
// If we are type-erasing a block to a block-compatible
// Objective-C pointer type, we may need to extend the lifetime
// of the block object.
- if (typeArgs && Args[i]->isRValue() && paramType->isBlockPointerType() &&
+ if (typeArgs && Args[i]->isPRValue() && paramType->isBlockPointerType() &&
Args[i]->getType()->isBlockPointerType() &&
origParamType->isObjCObjectPointerType()) {
ExprResult arg = Args[i];
@@ -2633,7 +2634,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
Expr **Args = ArgsIn.data();
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return ObjCMessageExpr::Create(
- Context, ReceiverType, VK_RValue, LBracLoc, ReceiverTypeInfo, Sel,
+ Context, ReceiverType, VK_PRValue, LBracLoc, ReceiverTypeInfo, Sel,
SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs), RBracLoc,
isImplicit);
}
@@ -2681,7 +2682,7 @@ ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
// Check the argument types and determine the result type.
QualType ReturnType;
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
unsigned NumArgs = ArgsIn.size();
Expr **Args = ArgsIn.data();
@@ -2886,7 +2887,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
Expr **Args = ArgsIn.data();
assert(SuperLoc.isInvalid() && "Message to super with dependent type");
return ObjCMessageExpr::Create(
- Context, Context.DependentTy, VK_RValue, LBracLoc, Receiver, Sel,
+ Context, Context.DependentTy, VK_PRValue, LBracLoc, Receiver, Sel,
SelectorLocs, /*Method=*/nullptr, makeArrayRef(Args, NumArgs),
RBracLoc, isImplicit);
}
@@ -3225,7 +3226,7 @@ ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
unsigned NumArgs = ArgsIn.size();
Expr **Args = ArgsIn.data();
QualType ReturnType;
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
bool ClassMessage = (ReceiverType->isObjCClassType() ||
ReceiverType->isObjCQualifiedClassType());
if (CheckMessageArgumentTypes(Receiver, ReceiverType,
@@ -3847,9 +3848,12 @@ static inline T *getObjCBridgeAttr(const TypedefType *TD) {
QualType QT = TDNDecl->getUnderlyingType();
if (QT->isPointerType()) {
QT = QT->getPointeeType();
- if (const RecordType *RT = QT->getAs<RecordType>())
- if (RecordDecl *RD = RT->getDecl()->getMostRecentDecl())
- return RD->getAttr<T>();
+ if (const RecordType *RT = QT->getAs<RecordType>()) {
+ for (auto *Redecl : RT->getDecl()->getMostRecentDecl()->redecls()) {
+ if (auto *attr = Redecl->getAttr<T>())
+ return attr;
+ }
+ }
}
return nullptr;
}
@@ -4469,7 +4473,7 @@ Sema::CheckObjCConversion(SourceRange castRange, QualType castType,
case ACC_plusOne:
castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(),
CK_ARCConsumeObject, castExpr, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
Cleanup.setExprNeedsCleanups(true);
return ACR_okay;
}
@@ -4696,7 +4700,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
case OBC_BridgeRetained:
// Produce the object before casting it.
SubExpr = ImplicitCastExpr::Create(Context, FromType, CK_ARCProduceObject,
- SubExpr, nullptr, VK_RValue,
+ SubExpr, nullptr, VK_PRValue,
FPOptionsOverride());
break;
@@ -4736,7 +4740,7 @@ ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
if (MustConsume) {
Cleanup.setExprNeedsCleanups(true);
Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result,
- nullptr, VK_RValue, FPOptionsOverride());
+ nullptr, VK_PRValue, FPOptionsOverride());
}
return Result;
diff --git a/clang/lib/Sema/SemaFixItUtils.cpp b/clang/lib/Sema/SemaFixItUtils.cpp
index 41a7a90a3727..2910a56f866b 100644
--- a/clang/lib/Sema/SemaFixItUtils.cpp
+++ b/clang/lib/Sema/SemaFixItUtils.cpp
@@ -132,8 +132,8 @@ bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
if (!Expr->isLValue() || Expr->getObjectKind() != OK_Ordinary)
return false;
- CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy,
- S, Begin, VK_RValue);
+ CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy, S,
+ Begin, VK_PRValue);
if (CanConvert) {
if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) {
diff --git a/clang/lib/Sema/SemaInit.cpp b/clang/lib/Sema/SemaInit.cpp
index f4493d84238d..78574e34d906 100644
--- a/clang/lib/Sema/SemaInit.cpp
+++ b/clang/lib/Sema/SemaInit.cpp
@@ -24,6 +24,7 @@
#include "clang/Sema/Lookup.h"
#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
@@ -150,7 +151,7 @@ bool Sema::IsStringInit(Expr *Init, const ArrayType *AT) {
static void updateStringLiteralType(Expr *E, QualType Ty) {
while (true) {
E->setType(Ty);
- E->setValueKind(VK_RValue);
+ E->setValueKind(VK_PRValue);
if (isa<StringLiteral>(E) || isa<ObjCEncodeExpr>(E)) {
break;
} else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
@@ -172,7 +173,7 @@ static void updateStringLiteralType(Expr *E, QualType Ty) {
/// as an rvalue.
static void updateGNUCompoundLiteralRValue(Expr *E) {
while (true) {
- E->setValueKind(VK_RValue);
+ E->setValueKind(VK_PRValue);
if (isa<CompoundLiteralExpr>(E)) {
break;
} else if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
@@ -315,7 +316,8 @@ class InitListChecker {
InitListExpr *IList, QualType ElemType,
unsigned &Index,
InitListExpr *StructuredList,
- unsigned &StructuredIndex);
+ unsigned &StructuredIndex,
+ bool DirectlyDesignated = false);
void CheckComplexType(const InitializedEntity &Entity,
InitListExpr *IList, QualType DeclType,
unsigned &Index,
@@ -1005,21 +1007,33 @@ static bool isIdiomaticBraceElisionEntity(const InitializedEntity &Entity) {
//
// (where std::array is an aggregate struct containing a single array field.
- // FIXME: Should aggregate initialization of a struct with a single
- // base class and no members also suppress the warning?
- if (Entity.getKind() != InitializedEntity::EK_Member || !Entity.getParent())
+ if (!Entity.getParent())
return false;
- auto *ParentRD =
- Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
- if (CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(ParentRD))
- if (CXXRD->getNumBases())
- return false;
+ // Allows elide brace initialization for aggregates with empty base.
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ auto *ParentRD =
+ Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ CXXRecordDecl *CXXRD = cast<CXXRecordDecl>(ParentRD);
+ return CXXRD->getNumBases() == 1 && CXXRD->field_empty();
+ }
+
+ // Allow brace elision if the only subobject is a field.
+ if (Entity.getKind() == InitializedEntity::EK_Member) {
+ auto *ParentRD =
+ Entity.getParent()->getType()->castAs<RecordType>()->getDecl();
+ if (CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(ParentRD)) {
+ if (CXXRD->getNumBases()) {
+ return false;
+ }
+ }
+ auto FieldIt = ParentRD->field_begin();
+ assert(FieldIt != ParentRD->field_end() &&
+ "no fields but have initializer for member?");
+ return ++FieldIt == ParentRD->field_end();
+ }
- auto FieldIt = ParentRD->field_begin();
- assert(FieldIt != ParentRD->field_end() &&
- "no fields but have initializer for member?");
- return ++FieldIt == ParentRD->field_end();
+ return false;
}
/// Check whether the range of the initializer \p ParentIList from element
@@ -1326,7 +1340,8 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
QualType ElemType,
unsigned &Index,
InitListExpr *StructuredList,
- unsigned &StructuredIndex) {
+ unsigned &StructuredIndex,
+ bool DirectlyDesignated) {
Expr *expr = IList->getInit(Index);
if (ElemType->isReferenceType())
@@ -1462,6 +1477,20 @@ void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
StructuredIndex);
++StructuredIndex;
+
+ // In C++20, brace elision is not permitted for a designated initializer.
+ if (DirectlyDesignated && SemaRef.getLangOpts().CPlusPlus && !hadError) {
+ if (InOverloadResolution)
+ hadError = true;
+ if (!VerifyOnly) {
+ SemaRef.Diag(expr->getBeginLoc(),
+ diag::ext_designated_init_brace_elision)
+ << expr->getSourceRange()
+ << FixItHint::CreateInsertion(expr->getBeginLoc(), "{")
+ << FixItHint::CreateInsertion(
+ SemaRef.getLocForEndOfToken(expr->getEndLoc()), "}");
+ }
+ }
} else {
if (!VerifyOnly) {
// We cannot initialize this element, so let PerformCopyInitialization
@@ -2413,8 +2442,8 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
unsigned OldIndex = Index;
IList->setInit(OldIndex, DIE->getInit());
- CheckSubElementType(Entity, IList, CurrentObjectType, Index,
- StructuredList, StructuredIndex);
+ CheckSubElementType(Entity, IList, CurrentObjectType, Index, StructuredList,
+ StructuredIndex, /*DirectlyDesignated=*/true);
// Restore the designated initializer expression in the syntactic
// form of the initializer list.
@@ -2844,7 +2873,7 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
if (!VerifyOnly)
SemaRef.Diag(IndexExpr->getBeginLoc(),
diag::err_array_designator_too_large)
- << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
+ << toString(DesignatedEndIndex, 10) << toString(MaxElements, 10)
<< IndexExpr->getSourceRange();
++Index;
return true;
@@ -2895,9 +2924,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Expr *Init = new (Context) IntegerLiteral(
Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
if (CharTy != PromotedCharTy)
- Init =
- ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init,
- nullptr, VK_RValue, FPOptionsOverride());
+ Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
+ Init, nullptr, VK_PRValue,
+ FPOptionsOverride());
StructuredList->updateInit(Context, i, Init);
}
} else {
@@ -2918,9 +2947,9 @@ InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
Expr *Init = new (Context) IntegerLiteral(
Context, CodeUnit, PromotedCharTy, SubExpr->getExprLoc());
if (CharTy != PromotedCharTy)
- Init =
- ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast, Init,
- nullptr, VK_RValue, FPOptionsOverride());
+ Init = ImplicitCastExpr::Create(Context, CharTy, CK_IntegralCast,
+ Init, nullptr, VK_PRValue,
+ FPOptionsOverride());
StructuredList->updateInit(Context, i, Init);
}
}
@@ -3138,7 +3167,7 @@ CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
if (Value.isSigned() && Value.isNegative())
return S.Diag(Loc, diag::err_array_designator_negative)
- << Value.toString(10) << Index->getSourceRange();
+ << toString(Value, 10) << Index->getSourceRange();
Value.setIsUnsigned(true);
return Result;
@@ -3207,7 +3236,7 @@ ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
if (!StartDependent && !EndDependent && EndValue < StartValue) {
Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
- << StartValue.toString(10) << EndValue.toString(10)
+ << toString(StartValue, 10) << toString(EndValue, 10)
<< StartIndex->getSourceRange() << EndIndex->getSourceRange();
Invalid = true;
} else {
@@ -3265,10 +3294,7 @@ InitializedEntity::InitializeBase(ASTContext &Context,
InitializedEntity Result;
Result.Kind = EK_Base;
Result.Parent = Parent;
- Result.Base = reinterpret_cast<uintptr_t>(Base);
- if (IsInheritedVirtualBase)
- Result.Base |= 0x01;
-
+ Result.Base = {Base, IsInheritedVirtualBase};
Result.Type = Base->getType();
return Result;
}
@@ -3277,7 +3303,7 @@ DeclarationName InitializedEntity::getName() const {
switch (getKind()) {
case EK_Parameter:
case EK_Parameter_CF_Audited: {
- ParmVarDecl *D = reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+ ParmVarDecl *D = Parameter.getPointer();
return (D ? D->getDeclName() : DeclarationName());
}
@@ -3320,7 +3346,7 @@ ValueDecl *InitializedEntity::getDecl() const {
case EK_Parameter:
case EK_Parameter_CF_Audited:
- return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+ return Parameter.getPointer();
case EK_Result:
case EK_StmtExprResult:
@@ -3431,7 +3457,7 @@ LLVM_DUMP_METHOD void InitializedEntity::dump() const {
void InitializationSequence::Step::Destroy() {
switch (Kind) {
case SK_ResolveAddressOfOverloadedFunction:
- case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBasePRValue:
case SK_CastDerivedToBaseXValue:
case SK_CastDerivedToBaseLValue:
case SK_BindReference:
@@ -3439,7 +3465,7 @@ void InitializationSequence::Step::Destroy() {
case SK_FinalCopy:
case SK_ExtraneousCopyToTemporary:
case SK_UserConversion:
- case SK_QualificationConversionRValue:
+ case SK_QualificationConversionPRValue:
case SK_QualificationConversionXValue:
case SK_QualificationConversionLValue:
case SK_FunctionReferenceConversion:
@@ -3558,7 +3584,9 @@ void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
ExprValueKind VK) {
Step S;
switch (VK) {
- case VK_RValue: S.Kind = SK_CastDerivedToBaseRValue; break;
+ case VK_PRValue:
+ S.Kind = SK_CastDerivedToBasePRValue;
+ break;
case VK_XValue: S.Kind = SK_CastDerivedToBaseXValue; break;
case VK_LValue: S.Kind = SK_CastDerivedToBaseLValue; break;
}
@@ -3605,10 +3633,10 @@ InitializationSequence::AddUserConversionStep(FunctionDecl *Function,
void InitializationSequence::AddQualificationConversionStep(QualType Ty,
ExprValueKind VK) {
Step S;
- S.Kind = SK_QualificationConversionRValue; // work around a gcc warning
+ S.Kind = SK_QualificationConversionPRValue; // work around a gcc warning
switch (VK) {
- case VK_RValue:
- S.Kind = SK_QualificationConversionRValue;
+ case VK_PRValue:
+ S.Kind = SK_QualificationConversionPRValue;
break;
case VK_XValue:
S.Kind = SK_QualificationConversionXValue;
@@ -4047,10 +4075,10 @@ static void TryConstructorInitialization(Sema &S,
Entity.getKind() != InitializedEntity::EK_Delegating &&
Entity.getKind() !=
InitializedEntity::EK_LambdaToBlockConversionBlockElement &&
- UnwrappedArgs.size() == 1 && UnwrappedArgs[0]->isRValue() &&
+ UnwrappedArgs.size() == 1 && UnwrappedArgs[0]->isPRValue() &&
S.Context.hasSameUnqualifiedType(UnwrappedArgs[0]->getType(), DestType)) {
// Convert qualifications if necessary.
- Sequence.AddQualificationConversionStep(DestType, VK_RValue);
+ Sequence.AddQualificationConversionStep(DestType, VK_PRValue);
if (ILE)
Sequence.RewrapReferenceInitList(DestType, ILE);
return;
@@ -4136,7 +4164,7 @@ static void TryConstructorInitialization(Sema &S,
Sequence.AddUserConversionStep(CD, Best->FoundDecl, ConvType,
HadMultipleCandidates);
if (!S.Context.hasSameType(ConvType, DestType))
- Sequence.AddQualificationConversionStep(DestType, VK_RValue);
+ Sequence.AddQualificationConversionStep(DestType, VK_PRValue);
if (IsListInit)
Sequence.RewrapReferenceInitList(Entity.getType(), ILE);
return;
@@ -4289,17 +4317,36 @@ static void TryReferenceListInitialization(Sema &S,
if (Sequence.step_begin() != Sequence.step_end())
Sequence.RewrapReferenceInitList(cv1T1, InitList);
}
-
+ // Perform address space compatibility check.
+ QualType cv1T1IgnoreAS = cv1T1;
+ if (T1Quals.hasAddressSpace()) {
+ Qualifiers T2Quals;
+ (void)S.Context.getUnqualifiedArrayType(InitList->getType(), T2Quals);
+ if (!T1Quals.isAddressSpaceSupersetOf(T2Quals)) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_ReferenceInitDropsQualifiers);
+ return;
+ }
+ // Ignore address space of reference type at this point and perform address
+ // space conversion after the reference binding step.
+ cv1T1IgnoreAS =
+ S.Context.getQualifiedType(T1, T1Quals.withoutAddressSpace());
+ }
// Not reference-related. Create a temporary and bind to that.
- InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+ InitializedEntity TempEntity =
+ InitializedEntity::InitializeTemporary(cv1T1IgnoreAS);
TryListInitialization(S, TempEntity, Kind, InitList, Sequence,
TreatUnavailableAsInvalid);
if (Sequence) {
if (DestType->isRValueReferenceType() ||
- (T1Quals.hasConst() && !T1Quals.hasVolatile()))
- Sequence.AddReferenceBindingStep(cv1T1, /*BindingTemporary=*/true);
- else
+ (T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ Sequence.AddReferenceBindingStep(cv1T1IgnoreAS,
+ /*BindingTemporary=*/true);
+ if (T1Quals.hasAddressSpace())
+ Sequence.AddQualificationConversionStep(
+ cv1T1, DestType->isRValueReferenceType() ? VK_XValue : VK_LValue);
+ } else
Sequence.SetFailed(
InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
}
@@ -4441,7 +4488,7 @@ static void TryListInitialization(Sema &S,
ImplicitConversionSequence ICS;
ICS.setStandard();
ICS.Standard.setAsIdentityConversion();
- if (!E->isRValue())
+ if (!E->isPRValue())
ICS.Standard.First = ICK_Lvalue_To_Rvalue;
// If E is of a floating-point type, then the conversion is ill-formed
// due to narrowing, but go through the motions in order to produce the
@@ -4626,7 +4673,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
else
cv3T3 = T1;
- ExprValueKind VK = VK_RValue;
+ ExprValueKind VK = VK_PRValue;
if (cv3T3->isLValueReferenceType())
VK = VK_LValue;
else if (const auto *RRef = cv3T3->getAs<RValueReferenceType>())
@@ -4657,7 +4704,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
// Every implicit conversion results in a prvalue, except for a glvalue
// derived-to-base conversion, which we handle below.
cv3T3 = ICS.Standard.getToType(2);
- VK = VK_RValue;
+ VK = VK_PRValue;
}
// If the converted initializer is a prvalue, its type T4 is adjusted to
@@ -4669,7 +4716,7 @@ static OverloadingResult TryRefInitWithConversionFunction(
QualType cv1T4 = S.Context.getQualifiedType(cv3T3, cv1T1.getQualifiers());
if (cv1T4.getQualifiers() != cv3T3.getQualifiers())
Sequence.AddQualificationConversionStep(cv1T4, VK);
- Sequence.AddReferenceBindingStep(cv1T4, VK == VK_RValue);
+ Sequence.AddReferenceBindingStep(cv1T4, VK == VK_PRValue);
VK = IsLValueRef ? VK_LValue : VK_XValue;
if (RefConv & Sema::ReferenceConversions::DerivedToBase)
@@ -4884,7 +4931,7 @@ static void TryReferenceInitializationCore(Sema &S,
(InitCategory.isPRValue() &&
(S.getLangOpts().CPlusPlus17 || T2->isRecordType() ||
T2->isArrayType())))) {
- ExprValueKind ValueKind = InitCategory.isXValue() ? VK_XValue : VK_RValue;
+ ExprValueKind ValueKind = InitCategory.isXValue() ? VK_XValue : VK_PRValue;
if (InitCategory.isPRValue() && T2->isRecordType()) {
// The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
// compiler the freedom to perform a copy here or bind to the
@@ -4916,7 +4963,7 @@ static void TryReferenceInitializationCore(Sema &S,
QualType cv1T4 = S.Context.getQualifiedType(cv2T2, T1QualsIgnoreAS);
if (T1QualsIgnoreAS != T2QualsIgnoreAS)
Sequence.AddQualificationConversionStep(cv1T4, ValueKind);
- Sequence.AddReferenceBindingStep(cv1T4, ValueKind == VK_RValue);
+ Sequence.AddReferenceBindingStep(cv1T4, ValueKind == VK_PRValue);
ValueKind = isLValueRef ? VK_LValue : VK_XValue;
// Add addr space conversion if required.
if (T1Quals.getAddressSpace() != T2Quals.getAddressSpace()) {
@@ -5018,9 +5065,9 @@ static void TryReferenceInitializationCore(Sema &S,
// than, cv2; otherwise, the program is ill-formed.
unsigned T1CVRQuals = T1Quals.getCVRQualifiers();
unsigned T2CVRQuals = T2Quals.getCVRQualifiers();
- if ((RefRelationship == Sema::Ref_Related &&
- (T1CVRQuals | T2CVRQuals) != T1CVRQuals) ||
- !T1Quals.isAddressSpaceSupersetOf(T2Quals)) {
+ if (RefRelationship == Sema::Ref_Related &&
+ ((T1CVRQuals | T2CVRQuals) != T1CVRQuals ||
+ !T1Quals.isAddressSpaceSupersetOf(T2Quals))) {
Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
return;
}
@@ -5319,7 +5366,7 @@ static void TryUserDefinedConversion(Sema &S,
if (!S.getLangOpts().CPlusPlus17)
Sequence.AddFinalCopy(DestType);
else if (DestType.hasQualifiers())
- Sequence.AddQualificationConversionStep(DestType, VK_RValue);
+ Sequence.AddQualificationConversionStep(DestType, VK_PRValue);
return;
}
@@ -5343,7 +5390,7 @@ static void TryUserDefinedConversion(Sema &S,
!S.Context.hasSameUnqualifiedType(ConvType, DestType))
Sequence.AddFinalCopy(DestType);
else if (!S.Context.hasSameType(ConvType, DestType))
- Sequence.AddQualificationConversionStep(DestType, VK_RValue);
+ Sequence.AddQualificationConversionStep(DestType, VK_PRValue);
return;
}
@@ -5448,7 +5495,7 @@ static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e,
/// Check whether the given expression is a valid operand for an
/// indirect copy/restore.
static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) {
- assert(src->isRValue());
+ assert(src->isPRValue());
bool isWeakAccess = false;
InvalidICRKind iik = isInvalidICRSource(S.Context, src, false, isWeakAccess);
// If isWeakAccess to true, there will be an implicit
@@ -5572,8 +5619,8 @@ static bool TryOCLZeroOpaqueTypeInitialization(Sema &S,
// We should allow zero initialization for all types defined in the
// cl_intel_device_side_avc_motion_estimation extension, except
// intel_sub_group_avc_mce_payload_t and intel_sub_group_avc_mce_result_t.
- if (S.getOpenCLOptions().isEnabled(
- "cl_intel_device_side_avc_motion_estimation") &&
+ if (S.getOpenCLOptions().isAvailableOption(
+ "cl_intel_device_side_avc_motion_estimation", S.getLangOpts()) &&
DestType->isOCLIntelSubgroupAVCType()) {
if (DestType->isOCLIntelSubgroupAVCMcePayloadType() ||
DestType->isOCLIntelSubgroupAVCMceResultType())
@@ -5785,7 +5832,7 @@ void InitializationSequence::InitializeFrom(Sema &S,
Entity.getType()) &&
canPerformArrayCopy(Entity)) {
// If source is a prvalue, use it directly.
- if (Initializer->getValueKind() == VK_RValue) {
+ if (Initializer->isPRValue()) {
AddArrayInitStep(DestType, /*IsGNUExtension*/false);
return;
}
@@ -6284,7 +6331,8 @@ static ExprResult CopyObject(Sema &S,
// Determine the arguments required to actually perform the
// constructor call (we might have derived-to-base conversions, or
// the copy constructor may have default arguments).
- if (S.CompleteConstructorCall(Constructor, CurInitExpr, Loc, ConstructorArgs))
+ if (S.CompleteConstructorCall(Constructor, T, CurInitExpr, Loc,
+ ConstructorArgs))
return ExprError();
// C++0x [class.copy]p32:
@@ -6481,13 +6529,11 @@ PerformConstructorInitialization(Sema &S,
// Determine the arguments required to actually perform the constructor
// call.
- if (S.CompleteConstructorCall(Constructor, Args,
- Loc, ConstructorArgs,
- AllowExplicitConv,
+ if (S.CompleteConstructorCall(Constructor, Step.Type, Args, Loc,
+ ConstructorArgs, AllowExplicitConv,
IsListInitialization))
return ExprError();
-
if (isExplicitTemporary(Entity, Kind, NumArgs)) {
// An explicitly-constructed temporary, e.g., X(1, 2).
if (S.DiagnoseUseOfDecl(Constructor, Loc))
@@ -6501,22 +6547,23 @@ PerformConstructorInitialization(Sema &S,
? SourceRange(LBraceLoc, RBraceLoc)
: Kind.getParenOrBraceRange();
+ CXXConstructorDecl *CalleeDecl = Constructor;
if (auto *Shadow = dyn_cast<ConstructorUsingShadowDecl>(
Step.Function.FoundDecl.getDecl())) {
- Constructor = S.findInheritingConstructor(Loc, Constructor, Shadow);
- if (S.DiagnoseUseOfDecl(Constructor, Loc))
+ CalleeDecl = S.findInheritingConstructor(Loc, Constructor, Shadow);
+ if (S.DiagnoseUseOfDecl(CalleeDecl, Loc))
return ExprError();
}
- S.MarkFunctionReferenced(Loc, Constructor);
+ S.MarkFunctionReferenced(Loc, CalleeDecl);
CurInit = S.CheckForImmediateInvocation(
CXXTemporaryObjectExpr::Create(
- S.Context, Constructor,
+ S.Context, CalleeDecl,
Entity.getType().getNonLValueExprType(S.Context), TSInfo,
ConstructorArgs, ParenOrBraceRange, HadMultipleCandidates,
IsListInitialization, IsStdInitListInitialization,
ConstructorInitRequiresZeroInit),
- Constructor);
+ CalleeDecl);
} else {
CXXConstructExpr::ConstructionKind ConstructKind =
CXXConstructExpr::CK_Complete;
@@ -7485,6 +7532,8 @@ static bool pathOnlyInitializesGslPointer(IndirectLocalPath &Path) {
continue;
if (It->Kind == IndirectLocalPathEntry::AddressOf)
continue;
+ if (It->Kind == IndirectLocalPathEntry::LifetimeBoundCall)
+ continue;
return It->Kind == IndirectLocalPathEntry::GslPointerInit ||
It->Kind == IndirectLocalPathEntry::GslReferenceInit;
}
@@ -7833,7 +7882,7 @@ static void CheckMoveOnConstruction(Sema &S, const Expr *InitExpr,
} else {
DiagID = diag::warn_pessimizing_move_on_initialization;
const Expr *ArgStripped = Arg->IgnoreImplicit()->IgnoreParens();
- if (!ArgStripped->isRValue() || !ArgStripped->getType()->isRecordType())
+ if (!ArgStripped->isPRValue() || !ArgStripped->getType()->isRecordType())
return;
}
@@ -7903,7 +7952,7 @@ ExprResult Sema::TemporaryMaterializationConversion(Expr *E) {
// FIXME: This means that AST consumers need to deal with "prvalues" that
// denote materialized temporaries. Maybe we should add another ValueKind
// for "xvalue pretending to be a prvalue" for C++98 support.
- if (!E->isRValue() || !getLangOpts().CPlusPlus11)
+ if (!E->isPRValue() || !getLangOpts().CPlusPlus11)
return E;
// C++1z [conv.rval]/1: T shall be a complete type.
@@ -7922,7 +7971,7 @@ ExprResult Sema::PerformQualificationConversion(Expr *E, QualType Ty,
CastKind CK = CK_NoOp;
- if (VK == VK_RValue) {
+ if (VK == VK_PRValue) {
auto PointeeTy = Ty->getPointeeType();
auto ExprPointeeTy = E->getType()->getPointeeType();
if (!PointeeTy.isNull() &&
@@ -8057,7 +8106,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
// initializer.
switch (Steps.front().Kind) {
case SK_ResolveAddressOfOverloadedFunction:
- case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBasePRValue:
case SK_CastDerivedToBaseXValue:
case SK_CastDerivedToBaseLValue:
case SK_BindReference:
@@ -8067,7 +8116,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_UserConversion:
case SK_QualificationConversionLValue:
case SK_QualificationConversionXValue:
- case SK_QualificationConversionRValue:
+ case SK_QualificationConversionPRValue:
case SK_FunctionReferenceConversion:
case SK_AtomicConversion:
case SK_ConversionSequence:
@@ -8142,7 +8191,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
Step->Function.Function);
break;
- case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBasePRValue:
case SK_CastDerivedToBaseXValue:
case SK_CastDerivedToBaseLValue: {
// We have a derived-to-base cast that produces either an rvalue or an
@@ -8158,11 +8207,10 @@ ExprResult InitializationSequence::Perform(Sema &S,
return ExprError();
ExprValueKind VK =
- Step->Kind == SK_CastDerivedToBaseLValue ?
- VK_LValue :
- (Step->Kind == SK_CastDerivedToBaseXValue ?
- VK_XValue :
- VK_RValue);
+ Step->Kind == SK_CastDerivedToBaseLValue
+ ? VK_LValue
+ : (Step->Kind == SK_CastDerivedToBaseXValue ? VK_XValue
+ : VK_PRValue);
CurInit = ImplicitCastExpr::Create(S.Context, Step->Type,
CK_DerivedToBase, CurInit.get(),
&BasePath, VK, FPOptionsOverride());
@@ -8194,7 +8242,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_BindReferenceToTemporary: {
// Make sure the "temporary" is actually an rvalue.
- assert(CurInit.get()->isRValue() && "not a temporary");
+ assert(CurInit.get()->isPRValue() && "not a temporary");
// Check exception specifications
if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
@@ -8261,9 +8309,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
// Determine the arguments required to actually perform the constructor
// call.
Expr *Arg = CurInit.get();
- if (S.CompleteConstructorCall(Constructor,
- MultiExprArg(&Arg, 1),
- Loc, ConstructorArgs))
+ if (S.CompleteConstructorCall(Constructor, Step->Type,
+ MultiExprArg(&Arg, 1), Loc,
+ ConstructorArgs))
return ExprError();
// Build an expression that constructs a temporary.
@@ -8335,13 +8383,13 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_QualificationConversionLValue:
case SK_QualificationConversionXValue:
- case SK_QualificationConversionRValue: {
+ case SK_QualificationConversionPRValue: {
// Perform a qualification conversion; these can never go wrong.
ExprValueKind VK =
Step->Kind == SK_QualificationConversionLValue
? VK_LValue
: (Step->Kind == SK_QualificationConversionXValue ? VK_XValue
- : VK_RValue);
+ : VK_PRValue);
CurInit = S.PerformQualificationConversion(CurInit.get(), Step->Type, VK);
break;
}
@@ -8354,9 +8402,9 @@ ExprResult InitializationSequence::Perform(Sema &S,
break;
case SK_AtomicConversion: {
- assert(CurInit.get()->isRValue() && "cannot convert glvalue to atomic");
+ assert(CurInit.get()->isPRValue() && "cannot convert glvalue to atomic");
CurInit = S.ImpCastExprToType(CurInit.get(), Step->Type,
- CK_NonAtomicToAtomic, VK_RValue);
+ CK_NonAtomicToAtomic, VK_PRValue);
break;
}
@@ -8659,7 +8707,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_ProduceObjCObject:
CurInit = ImplicitCastExpr::Create(
S.Context, Step->Type, CK_ARCProduceObject, CurInit.get(), nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
break;
case SK_StdInitializerList: {
@@ -8715,7 +8763,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
if (!Var->hasGlobalStorage()) {
CurInit = ImplicitCastExpr::Create(
S.Context, Step->Type, CK_LValueToRValue, Init,
- /*BasePath=*/nullptr, VK_RValue, FPOptionsOverride());
+ /*BasePath=*/nullptr, VK_PRValue, FPOptionsOverride());
break;
}
// Case 1a
@@ -8758,8 +8806,8 @@ ExprResult InitializationSequence::Perform(Sema &S,
unsigned AddressingMode = (0x0E & SamplerValue) >> 1;
unsigned FilterMode = (0x30 & SamplerValue) >> 4;
if (FilterMode != 1 && FilterMode != 2 &&
- !S.getOpenCLOptions().isEnabled(
- "cl_intel_device_side_avc_motion_estimation"))
+ !S.getOpenCLOptions().isAvailableOption(
+ "cl_intel_device_side_avc_motion_estimation", S.getLangOpts()))
S.Diag(Kind.getLocation(),
diag::warn_sampler_initializer_invalid_bits)
<< "Filter Mode";
@@ -9569,8 +9617,8 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "resolve address of overloaded function";
break;
- case SK_CastDerivedToBaseRValue:
- OS << "derived-to-base (rvalue)";
+ case SK_CastDerivedToBasePRValue:
+ OS << "derived-to-base (prvalue)";
break;
case SK_CastDerivedToBaseXValue:
@@ -9601,8 +9649,8 @@ void InitializationSequence::dump(raw_ostream &OS) const {
OS << "user-defined conversion via " << *S->Function.Function;
break;
- case SK_QualificationConversionRValue:
- OS << "qualification conversion (rvalue)";
+ case SK_QualificationConversionPRValue:
+ OS << "qualification conversion (prvalue)";
break;
case SK_QualificationConversionXValue:
diff --git a/clang/lib/Sema/SemaLambda.cpp b/clang/lib/Sema/SemaLambda.cpp
index af61c82c2002..eb1e9c3e5f7e 100644
--- a/clang/lib/Sema/SemaLambda.cpp
+++ b/clang/lib/Sema/SemaLambda.cpp
@@ -386,11 +386,8 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
// trailing-return-type respectively.
DeclarationName MethodName
= Context.DeclarationNames.getCXXOperatorName(OO_Call);
- DeclarationNameLoc MethodNameLoc;
- MethodNameLoc.CXXOperatorName.BeginOpNameLoc
- = IntroducerRange.getBegin().getRawEncoding();
- MethodNameLoc.CXXOperatorName.EndOpNameLoc
- = IntroducerRange.getEnd().getRawEncoding();
+ DeclarationNameLoc MethodNameLoc =
+ DeclarationNameLoc::makeCXXOperatorNameLoc(IntroducerRange);
CXXMethodDecl *Method = CXXMethodDecl::Create(
Context, Class, EndLoc,
DeclarationNameInfo(MethodName, IntroducerRange.getBegin(),
@@ -432,15 +429,16 @@ CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
void Sema::handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
- Optional<std::tuple<unsigned, bool, Decl *>> Mangling) {
+ Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling) {
if (Mangling) {
- unsigned ManglingNumber;
bool HasKnownInternalLinkage;
+ unsigned ManglingNumber, DeviceManglingNumber;
Decl *ManglingContextDecl;
- std::tie(ManglingNumber, HasKnownInternalLinkage, ManglingContextDecl) =
- Mangling.getValue();
+ std::tie(HasKnownInternalLinkage, ManglingNumber, DeviceManglingNumber,
+ ManglingContextDecl) = Mangling.getValue();
Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
HasKnownInternalLinkage);
+ Class->setDeviceLambdaManglingNumber(DeviceManglingNumber);
return;
}
@@ -463,11 +461,15 @@ void Sema::handleLambdaNumbering(
std::tie(MCtx, ManglingContextDecl) =
getCurrentMangleNumberContext(Class->getDeclContext());
bool HasKnownInternalLinkage = false;
- if (!MCtx && getLangOpts().CUDA) {
+ if (!MCtx && (getLangOpts().CUDA || getLangOpts().SYCLIsDevice ||
+ getLangOpts().SYCLIsHost)) {
// Force lambda numbering in CUDA/HIP as we need to name lambdas following
// ODR. Both device- and host-compilation need to have a consistent naming
// on kernel functions. As lambdas are potential part of these `__global__`
// function names, they needs numbering following ODR.
+ // Also force for SYCL, since we need this for the
+ // __builtin_sycl_unique_stable_name implementation, which depends on lambda
+ // mangling.
MCtx = getMangleNumberingContext(Class, ManglingContextDecl);
assert(MCtx && "Retrieving mangle numbering context failed!");
HasKnownInternalLinkage = true;
@@ -476,6 +478,7 @@ void Sema::handleLambdaNumbering(
unsigned ManglingNumber = MCtx->getManglingNumber(Method);
Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
HasKnownInternalLinkage);
+ Class->setDeviceLambdaManglingNumber(MCtx->getDeviceManglingNumber(Method));
}
}
@@ -683,7 +686,7 @@ static void adjustBlockReturnsToEnum(Sema &S, ArrayRef<ReturnStmt*> returns,
Expr *E = (cleanups ? cleanups->getSubExpr() : retValue);
E = ImplicitCastExpr::Create(S.Context, returnType, CK_IntegralCast, E,
- /*base path*/ nullptr, VK_RValue,
+ /*base path*/ nullptr, VK_PRValue,
FPOptionsOverride());
if (cleanups) {
cleanups->setSubExpr(E);
@@ -1378,7 +1381,6 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
DeclarationName ConversionName
= S.Context.DeclarationNames.getCXXConversionFunctionName(
S.Context.getCanonicalType(PtrToFunctionTy));
- DeclarationNameLoc ConvNameLoc;
// Construct a TypeSourceInfo for the conversion function, and wire
// all the parameters appropriately for the FunctionProtoTypeLoc
// so that everything works during transformation/instantiation of
@@ -1397,7 +1399,8 @@ static void addFunctionPointerConversion(Sema &S, SourceRange IntroducerRange,
// operators ParmVarDecls below.
TypeSourceInfo *ConvNamePtrToFunctionTSI =
S.Context.getTrivialTypeSourceInfo(PtrToFunctionTy, Loc);
- ConvNameLoc.NamedType.TInfo = ConvNamePtrToFunctionTSI;
+ DeclarationNameLoc ConvNameLoc =
+ DeclarationNameLoc::makeNamedTypeLoc(ConvNamePtrToFunctionTSI);
// The conversion function is a conversion to a pointer-to-function.
TypeSourceInfo *ConvTSI = S.Context.getTrivialTypeSourceInfo(ConvTy, Loc);
@@ -1548,8 +1551,8 @@ static void addBlockPointerConversion(Sema &S,
DeclarationName Name
= S.Context.DeclarationNames.getCXXConversionFunctionName(
S.Context.getCanonicalType(BlockPtrTy));
- DeclarationNameLoc NameLoc;
- NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc);
+ DeclarationNameLoc NameLoc = DeclarationNameLoc::makeNamedTypeLoc(
+ S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc));
CXXConversionDecl *Conversion = CXXConversionDecl::Create(
S.Context, Class, Loc, DeclarationNameInfo(Name, Loc, NameLoc), ConvTy,
S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 29038ab9fe1c..5e8c4de61e5d 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -638,8 +638,8 @@ void LookupResult::resolveKind() {
void LookupResult::addDeclsFromBasePaths(const CXXBasePaths &P) {
CXXBasePaths::const_paths_iterator I, E;
for (I = P.begin(), E = P.end(); I != E; ++I)
- for (DeclContext::lookup_iterator DI = I->Decls.begin(),
- DE = I->Decls.end(); DI != DE; ++DI)
+ for (DeclContext::lookup_iterator DI = I->Decls, DE = DI.end(); DI != DE;
+ ++DI)
addDecl(*DI);
}
@@ -677,9 +677,43 @@ LLVM_DUMP_METHOD void LookupResult::dump() {
D->dump();
}
+/// Diagnose a missing builtin type.
+static QualType diagOpenCLBuiltinTypeError(Sema &S, llvm::StringRef TypeClass,
+ llvm::StringRef Name) {
+ S.Diag(SourceLocation(), diag::err_opencl_type_not_found)
+ << TypeClass << Name;
+ return S.Context.VoidTy;
+}
+
+/// Lookup an OpenCL enum type.
+static QualType getOpenCLEnumType(Sema &S, llvm::StringRef Name) {
+ LookupResult Result(S, &S.Context.Idents.get(Name), SourceLocation(),
+ Sema::LookupTagName);
+ S.LookupName(Result, S.TUScope);
+ if (Result.empty())
+ return diagOpenCLBuiltinTypeError(S, "enum", Name);
+ EnumDecl *Decl = Result.getAsSingle<EnumDecl>();
+ if (!Decl)
+ return diagOpenCLBuiltinTypeError(S, "enum", Name);
+ return S.Context.getEnumType(Decl);
+}
+
+/// Lookup an OpenCL typedef type.
+static QualType getOpenCLTypedefType(Sema &S, llvm::StringRef Name) {
+ LookupResult Result(S, &S.Context.Idents.get(Name), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ S.LookupName(Result, S.TUScope);
+ if (Result.empty())
+ return diagOpenCLBuiltinTypeError(S, "typedef", Name);
+ TypedefNameDecl *Decl = Result.getAsSingle<TypedefNameDecl>();
+ if (!Decl)
+ return diagOpenCLBuiltinTypeError(S, "typedef", Name);
+ return S.Context.getTypedefType(Decl);
+}
+
/// Get the QualType instances of the return type and arguments for an OpenCL
/// builtin function signature.
-/// \param Context (in) The Context instance.
+/// \param S (in) The Sema instance.
/// \param OpenCLBuiltin (in) The signature currently handled.
/// \param GenTypeMaxCnt (out) Maximum number of types contained in a generic
/// type used as return type or as argument.
@@ -689,20 +723,20 @@ LLVM_DUMP_METHOD void LookupResult::dump() {
/// argument, ArgTypes contains QualTypes for the Cartesian product
/// of (vector sizes) x (types) .
static void GetQualTypesForOpenCLBuiltin(
- ASTContext &Context, const OpenCLBuiltinStruct &OpenCLBuiltin,
- unsigned &GenTypeMaxCnt, SmallVector<QualType, 1> &RetTypes,
+ Sema &S, const OpenCLBuiltinStruct &OpenCLBuiltin, unsigned &GenTypeMaxCnt,
+ SmallVector<QualType, 1> &RetTypes,
SmallVector<SmallVector<QualType, 1>, 5> &ArgTypes) {
// Get the QualType instances of the return types.
unsigned Sig = SignatureTable[OpenCLBuiltin.SigTableIndex];
- OCL2Qual(Context, TypeTable[Sig], RetTypes);
+ OCL2Qual(S, TypeTable[Sig], RetTypes);
GenTypeMaxCnt = RetTypes.size();
// Get the QualType instances of the arguments.
// First type is the return type, skip it.
for (unsigned Index = 1; Index < OpenCLBuiltin.NumTypes; Index++) {
SmallVector<QualType, 1> Ty;
- OCL2Qual(Context,
- TypeTable[SignatureTable[OpenCLBuiltin.SigTableIndex + Index]], Ty);
+ OCL2Qual(S, TypeTable[SignatureTable[OpenCLBuiltin.SigTableIndex + Index]],
+ Ty);
GenTypeMaxCnt = (Ty.size() > GenTypeMaxCnt) ? Ty.size() : GenTypeMaxCnt;
ArgTypes.push_back(std::move(Ty));
}
@@ -721,14 +755,24 @@ static void GetOpenCLBuiltinFctOverloads(
ASTContext &Context, unsigned GenTypeMaxCnt,
std::vector<QualType> &FunctionList, SmallVector<QualType, 1> &RetTypes,
SmallVector<SmallVector<QualType, 1>, 5> &ArgTypes) {
- FunctionProtoType::ExtProtoInfo PI;
+ FunctionProtoType::ExtProtoInfo PI(
+ Context.getDefaultCallingConvention(false, false, true));
PI.Variadic = false;
+ // Do not attempt to create any FunctionTypes if there are no return types,
+ // which happens when a type belongs to a disabled extension.
+ if (RetTypes.size() == 0)
+ return;
+
// Create FunctionTypes for each (gen)type.
for (unsigned IGenType = 0; IGenType < GenTypeMaxCnt; IGenType++) {
SmallVector<QualType, 5> ArgList;
for (unsigned A = 0; A < ArgTypes.size(); A++) {
+ // Bail out if there is an argument that has no available types.
+ if (ArgTypes[A].size() == 0)
+ return;
+
// Builtins such as "max" have an "sgentype" argument that represents
// the corresponding scalar type of a gentype. The number of gentypes
// must be a multiple of the number of sgentypes.
@@ -743,18 +787,6 @@ static void GetOpenCLBuiltinFctOverloads(
}
}
-/// Add extensions to the function declaration.
-/// \param S (in/out) The Sema instance.
-/// \param BIDecl (in) Description of the builtin.
-/// \param FDecl (in/out) FunctionDecl instance.
-static void AddOpenCLExtensions(Sema &S, const OpenCLBuiltinStruct &BIDecl,
- FunctionDecl *FDecl) {
- // Fetch extension associated with a function prototype.
- StringRef E = FunctionExtensionTable[BIDecl.Extension];
- if (E != "")
- S.setOpenCLExtensionForDecl(FDecl, E);
-}
-
/// When trying to resolve a function name, if isOpenCLBuiltin() returns a
/// non-null <Index, Len> pair, then the name is referencing an OpenCL
/// builtin function. Add all candidate signatures to the LookUpResult.
@@ -775,27 +807,42 @@ static void InsertOCLBuiltinDeclarationsFromTable(Sema &S, LookupResult &LR,
// as argument. Only meaningful for generic types, otherwise equals 1.
unsigned GenTypeMaxCnt;
+ ASTContext &Context = S.Context;
+
for (unsigned SignatureIndex = 0; SignatureIndex < Len; SignatureIndex++) {
const OpenCLBuiltinStruct &OpenCLBuiltin =
BuiltinTable[FctIndex + SignatureIndex];
- ASTContext &Context = S.Context;
- // Ignore this BIF if its version does not match the language options.
- unsigned OpenCLVersion = Context.getLangOpts().OpenCLVersion;
- if (Context.getLangOpts().OpenCLCPlusPlus)
- OpenCLVersion = 200;
- if (OpenCLVersion < OpenCLBuiltin.MinVersion)
- continue;
- if ((OpenCLBuiltin.MaxVersion != 0) &&
- (OpenCLVersion >= OpenCLBuiltin.MaxVersion))
+ // Ignore this builtin function if it is not available in the currently
+ // selected language version.
+ if (!isOpenCLVersionContainedInMask(Context.getLangOpts(),
+ OpenCLBuiltin.Versions))
continue;
+ // Ignore this builtin function if it carries an extension macro that is
+ // not defined. This indicates that the extension is not supported by the
+ // target, so the builtin function should not be available.
+ StringRef Extensions = FunctionExtensionTable[OpenCLBuiltin.Extension];
+ if (!Extensions.empty()) {
+ SmallVector<StringRef, 2> ExtVec;
+ Extensions.split(ExtVec, " ");
+ bool AllExtensionsDefined = true;
+ for (StringRef Ext : ExtVec) {
+ if (!S.getPreprocessor().isMacroDefined(Ext)) {
+ AllExtensionsDefined = false;
+ break;
+ }
+ }
+ if (!AllExtensionsDefined)
+ continue;
+ }
+
SmallVector<QualType, 1> RetTypes;
SmallVector<SmallVector<QualType, 1>, 5> ArgTypes;
// Obtain QualType lists for the function signature.
- GetQualTypesForOpenCLBuiltin(Context, OpenCLBuiltin, GenTypeMaxCnt,
- RetTypes, ArgTypes);
+ GetQualTypesForOpenCLBuiltin(S, OpenCLBuiltin, GenTypeMaxCnt, RetTypes,
+ ArgTypes);
if (GenTypeMaxCnt > 1) {
HasGenType = true;
}
@@ -809,28 +856,24 @@ static void InsertOCLBuiltinDeclarationsFromTable(Sema &S, LookupResult &LR,
DeclContext *Parent = Context.getTranslationUnitDecl();
FunctionDecl *NewOpenCLBuiltin;
- for (unsigned Index = 0; Index < GenTypeMaxCnt; Index++) {
+ for (const auto &FTy : FunctionList) {
NewOpenCLBuiltin = FunctionDecl::Create(
- Context, Parent, Loc, Loc, II, FunctionList[Index],
- /*TInfo=*/nullptr, SC_Extern, false,
- FunctionList[Index]->isFunctionProtoType());
+ Context, Parent, Loc, Loc, II, FTy, /*TInfo=*/nullptr, SC_Extern,
+ false, FTy->isFunctionProtoType());
NewOpenCLBuiltin->setImplicit();
// Create Decl objects for each parameter, adding them to the
// FunctionDecl.
- if (const FunctionProtoType *FP =
- dyn_cast<FunctionProtoType>(FunctionList[Index])) {
- SmallVector<ParmVarDecl *, 16> ParmList;
- for (unsigned IParm = 0, e = FP->getNumParams(); IParm != e; ++IParm) {
- ParmVarDecl *Parm = ParmVarDecl::Create(
- Context, NewOpenCLBuiltin, SourceLocation(), SourceLocation(),
- nullptr, FP->getParamType(IParm),
- /*TInfo=*/nullptr, SC_None, nullptr);
- Parm->setScopeInfo(0, IParm);
- ParmList.push_back(Parm);
- }
- NewOpenCLBuiltin->setParams(ParmList);
+ const auto *FP = cast<FunctionProtoType>(FTy);
+ SmallVector<ParmVarDecl *, 4> ParmList;
+ for (unsigned IParm = 0, e = FP->getNumParams(); IParm != e; ++IParm) {
+ ParmVarDecl *Parm = ParmVarDecl::Create(
+ Context, NewOpenCLBuiltin, SourceLocation(), SourceLocation(),
+ nullptr, FP->getParamType(IParm), nullptr, SC_None, nullptr);
+ Parm->setScopeInfo(0, IParm);
+ ParmList.push_back(Parm);
}
+ NewOpenCLBuiltin->setParams(ParmList);
// Add function attributes.
if (OpenCLBuiltin.IsPure)
@@ -843,8 +886,6 @@ static void InsertOCLBuiltinDeclarationsFromTable(Sema &S, LookupResult &LR,
if (!S.getLangOpts().OpenCLCPlusPlus)
NewOpenCLBuiltin->addAttr(OverloadableAttr::CreateImplicit(Context));
- AddOpenCLExtensions(S, OpenCLBuiltin, NewOpenCLBuiltin);
-
LR.addDecl(NewOpenCLBuiltin);
}
}
@@ -2191,9 +2232,9 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXRecordDecl *BaseRecord = Specifier->getType()->getAsCXXRecordDecl();
// Drop leading non-matching lookup results from the declaration list so
// we don't need to consider them again below.
- for (Path.Decls = BaseRecord->lookup(Name); !Path.Decls.empty();
- Path.Decls = Path.Decls.slice(1)) {
- if (Path.Decls.front()->isInIdentifierNamespace(IDNS))
+ for (Path.Decls = BaseRecord->lookup(Name).begin();
+ Path.Decls != Path.Decls.end(); ++Path.Decls) {
+ if ((*Path.Decls)->isInIdentifierNamespace(IDNS))
return true;
}
return false;
@@ -2217,9 +2258,9 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
AccessSpecifier SubobjectAccess = AS_none;
// Check whether the given lookup result contains only static members.
- auto HasOnlyStaticMembers = [&](DeclContextLookupResult Result) {
- for (NamedDecl *ND : Result)
- if (ND->isInIdentifierNamespace(IDNS) && ND->isCXXInstanceMember())
+ auto HasOnlyStaticMembers = [&](DeclContext::lookup_iterator Result) {
+ for (DeclContext::lookup_iterator I = Result, E = I.end(); I != E; ++I)
+ if ((*I)->isInIdentifierNamespace(IDNS) && (*I)->isCXXInstanceMember())
return false;
return true;
};
@@ -2228,8 +2269,8 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// Determine whether two sets of members contain the same members, as
// required by C++ [class.member.lookup]p6.
- auto HasSameDeclarations = [&](DeclContextLookupResult A,
- DeclContextLookupResult B) {
+ auto HasSameDeclarations = [&](DeclContext::lookup_iterator A,
+ DeclContext::lookup_iterator B) {
using Iterator = DeclContextLookupResult::iterator;
using Result = const void *;
@@ -2266,7 +2307,7 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// We'll often find the declarations are in the same order. Handle this
// case (and the special case of only one declaration) efficiently.
- Iterator AIt = A.begin(), BIt = B.begin(), AEnd = A.end(), BEnd = B.end();
+ Iterator AIt = A, BIt = B, AEnd, BEnd;
while (true) {
Result AResult = Next(AIt, AEnd);
Result BResult = Next(BIt, BEnd);
@@ -2349,10 +2390,11 @@ bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
// Lookup in a base class succeeded; return these results.
- for (auto *D : Paths.front().Decls) {
+ for (DeclContext::lookup_iterator I = Paths.front().Decls, E = I.end();
+ I != E; ++I) {
AccessSpecifier AS = CXXRecordDecl::MergeAccess(SubobjectAccess,
- D->getAccess());
- if (NamedDecl *ND = R.getAcceptableDecl(D))
+ (*I)->getAccess());
+ if (NamedDecl *ND = R.getAcceptableDecl(*I))
R.addDecl(ND, AS);
}
R.resolveKind();
@@ -2495,7 +2537,7 @@ void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
<< Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths)
<< LookupRange;
- DeclContext::lookup_iterator Found = Paths->front().Decls.begin();
+ DeclContext::lookup_iterator Found = Paths->front().Decls;
while (isa<CXXMethodDecl>(*Found) &&
cast<CXXMethodDecl>(*Found)->isStatic())
++Found;
@@ -2513,7 +2555,7 @@ void Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
for (CXXBasePaths::paths_iterator Path = Paths->begin(),
PathEnd = Paths->end();
Path != PathEnd; ++Path) {
- const NamedDecl *D = Path->Decls.front();
+ const NamedDecl *D = *Path->Decls;
if (!D->isInIdentifierNamespace(Result.getIdentifierNamespace()))
continue;
if (DeclsPrinted.insert(D).second) {
@@ -3117,7 +3159,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
ArgType.addVolatile();
// This isn't /really/ specified by the standard, but it's implied
- // we should be working from an RValue in the case of move to ensure
+ // we should be working from a PRValue in the case of move to ensure
// that we prefer to bind to rvalue references, and an LValue in the
// case of copy to ensure we don't bind to rvalue references.
// Possibly an XValue is actually correct in the case of move, but
@@ -3126,7 +3168,7 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
if (SM == CXXCopyConstructor || SM == CXXCopyAssignment)
VK = VK_LValue;
else
- VK = VK_RValue;
+ VK = VK_PRValue;
}
OpaqueValueExpr FakeArg(LookupLoc, ArgType, VK);
@@ -3143,8 +3185,8 @@ Sema::SpecialMemberOverloadResult Sema::LookupSpecialMember(CXXRecordDecl *RD,
if (VolatileThis)
ThisTy.addVolatile();
Expr::Classification Classification =
- OpaqueValueExpr(LookupLoc, ThisTy,
- RValueThis ? VK_RValue : VK_LValue).Classify(Context);
+ OpaqueValueExpr(LookupLoc, ThisTy, RValueThis ? VK_PRValue : VK_LValue)
+ .Classify(Context);
// Now we perform lookup on the name we computed earlier and do overload
// resolution. Lookup is only performed directly into the class since there
@@ -3690,7 +3732,7 @@ NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
// A shadow declaration that's created by a resolved using declaration
// is not hidden by the same using declaration.
if (isa<UsingShadowDecl>(ND) && isa<UsingDecl>(D) &&
- cast<UsingShadowDecl>(ND)->getUsingDecl() == D)
+ cast<UsingShadowDecl>(ND)->getIntroducer() == D)
continue;
// We've found a declaration that hides this one.
@@ -3793,6 +3835,7 @@ private:
if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
Result.getSema().ForceDeclarationOfImplicitMembers(Class);
+ llvm::SmallVector<NamedDecl *, 4> DeclsToVisit;
// We sometimes skip loading namespace-level results (they tend to be huge).
bool Load = LoadExternal ||
!(isa<TranslationUnitDecl>(Ctx) || isa<NamespaceDecl>(Ctx));
@@ -3802,12 +3845,21 @@ private:
: Ctx->noload_lookups(/*PreserveInternalState=*/false)) {
for (auto *D : R) {
if (auto *ND = Result.getAcceptableDecl(D)) {
- Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
- Visited.add(ND);
+ // Rather than visit immediatelly, we put ND into a vector and visit
+ // all decls, in order, outside of this loop. The reason is that
+ // Consumer.FoundDecl() may invalidate the iterators used in the two
+ // loops above.
+ DeclsToVisit.push_back(ND);
}
}
}
+ for (auto *ND : DeclsToVisit) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
+ }
+ DeclsToVisit.clear();
+
// Traverse using directives for qualified name lookup.
if (QualifiedNameLookup) {
ShadowContextRAII Shadow(Visited);
diff --git a/clang/lib/Sema/SemaObjCProperty.cpp b/clang/lib/Sema/SemaObjCProperty.cpp
index fdc30fe6f657..a329d0f22b03 100644
--- a/clang/lib/Sema/SemaObjCProperty.cpp
+++ b/clang/lib/Sema/SemaObjCProperty.cpp
@@ -112,12 +112,10 @@ CheckPropertyAgainstProtocol(Sema &S, ObjCPropertyDecl *Prop,
return;
// Look for a property with the same name.
- DeclContext::lookup_result R = Proto->lookup(Prop->getDeclName());
- for (unsigned I = 0, N = R.size(); I != N; ++I) {
- if (ObjCPropertyDecl *ProtoProp = dyn_cast<ObjCPropertyDecl>(R[I])) {
- S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
- return;
- }
+ if (ObjCPropertyDecl *ProtoProp =
+ Proto->lookup(Prop->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ S.DiagnosePropertyMismatch(Prop, ProtoProp, Proto->getIdentifier(), true);
+ return;
}
// Check this property against any protocols we inherit.
@@ -233,18 +231,13 @@ Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
bool FoundInSuper = false;
ObjCInterfaceDecl *CurrentInterfaceDecl = IFace;
while (ObjCInterfaceDecl *Super = CurrentInterfaceDecl->getSuperClass()) {
- DeclContext::lookup_result R = Super->lookup(Res->getDeclName());
- for (unsigned I = 0, N = R.size(); I != N; ++I) {
- if (ObjCPropertyDecl *SuperProp = dyn_cast<ObjCPropertyDecl>(R[I])) {
- DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false);
- FoundInSuper = true;
- break;
- }
- }
- if (FoundInSuper)
+ if (ObjCPropertyDecl *SuperProp =
+ Super->lookup(Res->getDeclName()).find_first<ObjCPropertyDecl>()) {
+ DiagnosePropertyMismatch(Res, SuperProp, Super->getIdentifier(), false);
+ FoundInSuper = true;
break;
- else
- CurrentInterfaceDecl = Super;
+ }
+ CurrentInterfaceDecl = Super;
}
if (FoundInSuper) {
@@ -1149,14 +1142,13 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
// redeclared 'readwrite', then no warning is to be issued.
for (auto *Ext : IDecl->known_extensions()) {
DeclContext::lookup_result R = Ext->lookup(property->getDeclName());
- if (!R.empty())
- if (ObjCPropertyDecl *ExtProp = dyn_cast<ObjCPropertyDecl>(R[0])) {
- PIkind = ExtProp->getPropertyAttributesAsWritten();
- if (PIkind & ObjCPropertyAttribute::kind_readwrite) {
- ReadWriteProperty = true;
- break;
- }
+ if (auto *ExtProp = R.find_first<ObjCPropertyDecl>()) {
+ PIkind = ExtProp->getPropertyAttributesAsWritten();
+ if (PIkind & ObjCPropertyAttribute::kind_readwrite) {
+ ReadWriteProperty = true;
+ break;
}
+ }
}
if (!ReadWriteProperty) {
@@ -1466,7 +1458,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr = ImplicitCastExpr::Create(
Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
Expr *IvarRefExpr =
new (Context) ObjCIvarRefExpr(Ivar,
Ivar->getUsageType(SelfDecl->getType()),
@@ -1529,7 +1521,7 @@ Decl *Sema::ActOnPropertyImplDecl(Scope *S,
MarkDeclRefReferenced(SelfExpr);
Expr *LoadSelfExpr = ImplicitCastExpr::Create(
Context, SelfDecl->getType(), CK_LValueToRValue, SelfExpr, nullptr,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
Expr *lhs =
new (Context) ObjCIvarRefExpr(Ivar,
Ivar->getUsageType(SelfDecl->getType()),
diff --git a/clang/lib/Sema/SemaOpenMP.cpp b/clang/lib/Sema/SemaOpenMP.cpp
index 78707484f588..c0cd2bf18a77 100644
--- a/clang/lib/Sema/SemaOpenMP.cpp
+++ b/clang/lib/Sema/SemaOpenMP.cpp
@@ -1884,8 +1884,7 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
static bool isOpenMPDeviceDelayedContext(Sema &S) {
assert(S.LangOpts.OpenMP && S.LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
- return !S.isInOpenMPTargetExecutionDirective() &&
- !S.isInOpenMPDeclareTargetContext();
+ return !S.isInOpenMPTargetExecutionDirective();
}
namespace {
@@ -1898,11 +1897,11 @@ enum class FunctionEmissionStatus {
} // anonymous namespace
Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
- unsigned DiagID) {
+ unsigned DiagID,
+ FunctionDecl *FD) {
assert(LangOpts.OpenMP && LangOpts.OpenMPIsDevice &&
"Expected OpenMP device compilation.");
- FunctionDecl *FD = getCurFunctionDecl();
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
if (FD) {
FunctionEmissionStatus FES = getEmissionStatus(FD);
@@ -1911,6 +1910,13 @@ Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
Kind = SemaDiagnosticBuilder::K_Immediate;
break;
case FunctionEmissionStatus::Unknown:
+ // TODO: We should always delay diagnostics here in case a target
+ // region is in a function we do not emit. However, as the
+ // current diagnostics are associated with the function containing
+ // the target region and we do not emit that one, we would miss out
+ // on diagnostics for the target region itself. We need to anchor
+ // the diagnostics with the new generated function *or* ensure we
+ // emit diagnostics associated with the surrounding function.
Kind = isOpenMPDeviceDelayedContext(*this)
? SemaDiagnosticBuilder::K_Deferred
: SemaDiagnosticBuilder::K_Immediate;
@@ -1925,30 +1931,34 @@ Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPDeviceCode(SourceLocation Loc,
}
}
- return SemaDiagnosticBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
}
Sema::SemaDiagnosticBuilder Sema::diagIfOpenMPHostCode(SourceLocation Loc,
- unsigned DiagID) {
+ unsigned DiagID,
+ FunctionDecl *FD) {
assert(LangOpts.OpenMP && !LangOpts.OpenMPIsDevice &&
"Expected OpenMP host compilation.");
- FunctionEmissionStatus FES = getEmissionStatus(getCurFunctionDecl());
+
SemaDiagnosticBuilder::Kind Kind = SemaDiagnosticBuilder::K_Nop;
- switch (FES) {
- case FunctionEmissionStatus::Emitted:
- Kind = SemaDiagnosticBuilder::K_Immediate;
- break;
- case FunctionEmissionStatus::Unknown:
- Kind = SemaDiagnosticBuilder::K_Deferred;
- break;
- case FunctionEmissionStatus::TemplateDiscarded:
- case FunctionEmissionStatus::OMPDiscarded:
- case FunctionEmissionStatus::CUDADiscarded:
- Kind = SemaDiagnosticBuilder::K_Nop;
- break;
+ if (FD) {
+ FunctionEmissionStatus FES = getEmissionStatus(FD);
+ switch (FES) {
+ case FunctionEmissionStatus::Emitted:
+ Kind = SemaDiagnosticBuilder::K_Immediate;
+ break;
+ case FunctionEmissionStatus::Unknown:
+ Kind = SemaDiagnosticBuilder::K_Deferred;
+ break;
+ case FunctionEmissionStatus::TemplateDiscarded:
+ case FunctionEmissionStatus::OMPDiscarded:
+ case FunctionEmissionStatus::CUDADiscarded:
+ Kind = SemaDiagnosticBuilder::K_Nop;
+ break;
+ }
}
- return SemaDiagnosticBuilder(Kind, Loc, DiagID, getCurFunctionDecl(), *this);
+ return SemaDiagnosticBuilder(Kind, Loc, DiagID, FD, *this);
}
static OpenMPDefaultmapClauseKind
@@ -2177,15 +2187,11 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
//
if (VD && !VD->hasLocalStorage() &&
(getCurCapturedRegion() || getCurBlock() || getCurLambda())) {
- if (isInOpenMPDeclareTargetContext()) {
- // Try to mark variable as declare target if it is used in capturing
- // regions.
- if (LangOpts.OpenMP <= 45 &&
- !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
- checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
- return nullptr;
- }
if (isInOpenMPTargetExecutionDirective()) {
+ DSAStackTy::DSAVarData DVarTop =
+ DSAStack->getTopDSA(D, DSAStack->isClauseParsingMode());
+ if (DVarTop.CKind != OMPC_unknown && DVarTop.RefExpr)
+ return VD;
// If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured.
//
@@ -2210,6 +2216,14 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
if (Regions[CSI->OpenMPCaptureLevel] != OMPD_task)
return VD;
}
+ if (isInOpenMPDeclareTargetContext()) {
+ // Try to mark variable as declare target if it is used in capturing
+ // regions.
+ if (LangOpts.OpenMP <= 45 &&
+ !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
+ checkDeclIsAllowedInOpenMPTarget(nullptr, VD);
+ return nullptr;
+ }
}
if (CheckScopeInfo) {
@@ -2467,8 +2481,8 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(Caller->getMostRecentDecl());
// Ignore host functions during device analyzis.
- if (LangOpts.OpenMPIsDevice && DevTy &&
- *DevTy == OMPDeclareTargetDeclAttr::DT_Host)
+ if (LangOpts.OpenMPIsDevice &&
+ (!DevTy || *DevTy == OMPDeclareTargetDeclAttr::DT_Host))
return;
// Ignore nohost functions during host analyzis.
if (!LangOpts.OpenMPIsDevice && DevTy &&
@@ -2513,6 +2527,7 @@ void Sema::StartOpenMPClause(OpenMPClauseKind K) {
void Sema::EndOpenMPClause() {
DSAStack->setClauseParsingMode(/*K=*/OMPC_unknown);
+ CleanupVarDeclMarking();
}
static std::pair<ValueDecl *, bool>
@@ -3414,7 +3429,9 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
if (S->getDirectiveKind() == OMPD_atomic ||
S->getDirectiveKind() == OMPD_critical ||
S->getDirectiveKind() == OMPD_section ||
- S->getDirectiveKind() == OMPD_master) {
+ S->getDirectiveKind() == OMPD_master ||
+ S->getDirectiveKind() == OMPD_masked ||
+ isOpenMPLoopTransformationDirective(S->getDirectiveKind())) {
Visit(S->getAssociatedStmt());
return;
}
@@ -3556,9 +3573,11 @@ public:
!Stack->isLoopControlVariable(VD).first) {
if (!Stack->checkMappableExprComponentListsForDecl(
VD, /*CurrentRegionOnly=*/true,
- [](OMPClauseMappableExprCommon::MappableExprComponentListRef
- StackComponents,
- OpenMPClauseKind) {
+ [this](OMPClauseMappableExprCommon::MappableExprComponentListRef
+ StackComponents,
+ OpenMPClauseKind) {
+ if (SemaRef.LangOpts.OpenMP >= 50)
+ return !StackComponents.empty();
// Variable is used if it has been marked as an array, array
// section, array shaping or the variable iself.
return StackComponents.size() == 1 ||
@@ -3785,6 +3804,17 @@ public:
// Check implicitly captured variables.
VisitSubCaptures(S);
}
+
+ void VisitOMPTileDirective(OMPTileDirective *S) {
+ // #pragma omp tile does not introduce data sharing.
+ VisitStmt(S);
+ }
+
+ void VisitOMPUnrollDirective(OMPUnrollDirective *S) {
+ // #pragma omp unroll does not introduce data sharing.
+ VisitStmt(S);
+ }
+
void VisitStmt(Stmt *S) {
for (Stmt *C : S->children()) {
if (C) {
@@ -3949,6 +3979,9 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_critical:
case OMPD_section:
case OMPD_master:
+ case OMPD_masked:
+ case OMPD_tile:
+ case OMPD_unroll:
break;
case OMPD_simd:
case OMPD_for:
@@ -3959,7 +3992,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_distribute:
case OMPD_distribute_simd:
case OMPD_ordered:
- case OMPD_target_data: {
+ case OMPD_target_data:
+ case OMPD_dispatch: {
Sema::CapturedParamNameType Params[] = {
std::make_pair(StringRef(), QualType()) // __context with shared vars
};
@@ -4429,7 +4463,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
if (DSAStack->getCurrentDirective() == OMPD_atomic ||
DSAStack->getCurrentDirective() == OMPD_critical ||
DSAStack->getCurrentDirective() == OMPD_section ||
- DSAStack->getCurrentDirective() == OMPD_master)
+ DSAStack->getCurrentDirective() == OMPD_master ||
+ DSAStack->getCurrentDirective() == OMPD_masked)
return S;
bool ErrorFound = false;
@@ -4471,6 +4506,10 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
}
DSAStack->setForceVarCapturing(/*V=*/false);
+ } else if (isOpenMPLoopTransformationDirective(
+ DSAStack->getCurrentDirective())) {
+ assert(CaptureRegions.empty() &&
+ "No captured regions in loop transformation directives.");
} else if (CaptureRegions.size() > 1 ||
CaptureRegions.back() != OMPD_unknown) {
if (auto *C = OMPClauseWithPreInit::get(Clause))
@@ -4568,6 +4607,22 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
}
}
}
+ if (ThisCaptureRegion == OMPD_parallel) {
+ // Capture temp arrays for inscan reductions and locals in aligned
+ // clauses.
+ for (OMPClause *C : Clauses) {
+ if (auto *RC = dyn_cast<OMPReductionClause>(C)) {
+ if (RC->getModifier() != OMPC_REDUCTION_inscan)
+ continue;
+ for (Expr *E : RC->copy_array_temps())
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ if (auto *AC = dyn_cast<OMPAlignedClause>(C)) {
+ for (Expr *E : AC->varlists())
+ MarkDeclarationsReferencedInExpr(E);
+ }
+ }
+ }
if (++CompletedRegions == CaptureRegions.size())
DSAStack->setBodyComplete();
SR = ActOnCapturedRegionEnd(SR.get());
@@ -4694,10 +4749,10 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
ParentRegion == OMPD_parallel_sections)));
OrphanSeen = ParentRegion == OMPD_unknown;
- } else if (CurrentRegion == OMPD_master) {
- // OpenMP [2.16, Nesting of Regions]
- // A master region may not be closely nested inside a worksharing,
- // atomic, or explicit task region.
+ } else if (CurrentRegion == OMPD_master || CurrentRegion == OMPD_masked) {
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A masked region may not be closely nested inside a worksharing, loop,
+ // atomic, task, or taskloop region.
NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
isOpenMPTaskingDirective(ParentRegion);
} else if (CurrentRegion == OMPD_critical && CurrentName.getName()) {
@@ -4727,27 +4782,28 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
return true;
}
} else if (CurrentRegion == OMPD_barrier) {
- // OpenMP [2.16, Nesting of Regions]
- // A barrier region may not be closely nested inside a worksharing,
- // explicit task, critical, ordered, atomic, or master region.
- NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
- isOpenMPTaskingDirective(ParentRegion) ||
- ParentRegion == OMPD_master ||
- ParentRegion == OMPD_parallel_master ||
- ParentRegion == OMPD_critical ||
- ParentRegion == OMPD_ordered;
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A barrier region may not be closely nested inside a worksharing, loop,
+ // task, taskloop, critical, ordered, atomic, or masked region.
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPTaskingDirective(ParentRegion) ||
+ ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
+ ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
!isOpenMPParallelDirective(CurrentRegion) &&
!isOpenMPTeamsDirective(CurrentRegion)) {
- // OpenMP [2.16, Nesting of Regions]
- // A worksharing region may not be closely nested inside a worksharing,
- // explicit task, critical, ordered, atomic, or master region.
- NestingProhibited = isOpenMPWorksharingDirective(ParentRegion) ||
- isOpenMPTaskingDirective(ParentRegion) ||
- ParentRegion == OMPD_master ||
- ParentRegion == OMPD_parallel_master ||
- ParentRegion == OMPD_critical ||
- ParentRegion == OMPD_ordered;
+ // OpenMP 5.1 [2.22, Nesting of Regions]
+ // A loop region that binds to a parallel region or a worksharing region
+ // may not be closely nested inside a worksharing, loop, task, taskloop,
+ // critical, ordered, atomic, or masked region.
+ NestingProhibited =
+ isOpenMPWorksharingDirective(ParentRegion) ||
+ isOpenMPTaskingDirective(ParentRegion) ||
+ ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
+ ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
Recommend = ShouldBeInParallelRegion;
} else if (CurrentRegion == OMPD_ordered) {
// OpenMP [2.16, Nesting of Regions]
@@ -5130,6 +5186,534 @@ static void checkAllocateClauses(Sema &S, DSAStackTy *Stack,
}
}
+namespace {
+/// Rewrite statements and expressions for Sema \p Actions CurContext.
+///
+/// Used to wrap already parsed statements/expressions into a new CapturedStmt
+/// context. DeclRefExpr used inside the new context are changed to refer to the
+/// captured variable instead.
+class CaptureVars : public TreeTransform<CaptureVars> {
+ using BaseTransform = TreeTransform<CaptureVars>;
+
+public:
+ CaptureVars(Sema &Actions) : BaseTransform(Actions) {}
+
+ bool AlwaysRebuild() { return true; }
+};
+} // namespace
+
+static VarDecl *precomputeExpr(Sema &Actions,
+ SmallVectorImpl<Stmt *> &BodyStmts, Expr *E,
+ StringRef Name) {
+ Expr *NewE = AssertSuccess(CaptureVars(Actions).TransformExpr(E));
+ VarDecl *NewVar = buildVarDecl(Actions, {}, NewE->getType(), Name, nullptr,
+ dyn_cast<DeclRefExpr>(E->IgnoreImplicit()));
+ auto *NewDeclStmt = cast<DeclStmt>(AssertSuccess(
+ Actions.ActOnDeclStmt(Actions.ConvertDeclToDeclGroup(NewVar), {}, {})));
+ Actions.AddInitializerToDecl(NewDeclStmt->getSingleDecl(), NewE, false);
+ BodyStmts.push_back(NewDeclStmt);
+ return NewVar;
+}
+
+/// Create a closure that computes the number of iterations of a loop.
+///
+/// \param Actions The Sema object.
+/// \param LogicalTy Type for the logical iteration number.
+/// \param Rel Comparison operator of the loop condition.
+/// \param StartExpr Value of the loop counter at the first iteration.
+/// \param StopExpr Expression the loop counter is compared against in the loop
+/// condition. \param StepExpr Amount of increment after each iteration.
+///
+/// \return Closure (CapturedStmt) of the distance calculation.
+static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
+ BinaryOperator::Opcode Rel,
+ Expr *StartExpr, Expr *StopExpr,
+ Expr *StepExpr) {
+ ASTContext &Ctx = Actions.getASTContext();
+ TypeSourceInfo *LogicalTSI = Ctx.getTrivialTypeSourceInfo(LogicalTy);
+
+ // Captured regions currently don't support return values, we use an
+ // out-parameter instead. All inputs are implicit captures.
+ // TODO: Instead of capturing each DeclRefExpr occurring in
+ // StartExpr/StopExpr/Step, these could also be passed as a value capture.
+ QualType ResultTy = Ctx.getLValueReferenceType(LogicalTy);
+ Sema::CapturedParamNameType Params[] = {{"Distance", ResultTy},
+ {StringRef(), QualType()}};
+ Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
+
+ Stmt *Body;
+ {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ CapturedDecl *CS = cast<CapturedDecl>(Actions.CurContext);
+
+ // Get the LValue expression for the result.
+ ImplicitParamDecl *DistParam = CS->getParam(0);
+ DeclRefExpr *DistRef = Actions.BuildDeclRefExpr(
+ DistParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
+
+ SmallVector<Stmt *, 4> BodyStmts;
+
+ // Capture all referenced variable references.
+ // TODO: Instead of computing NewStart/NewStop/NewStep inside the
+ // CapturedStmt, we could compute them before and capture the result, to be
+ // used jointly with the LoopVar function.
+ VarDecl *NewStart = precomputeExpr(Actions, BodyStmts, StartExpr, ".start");
+ VarDecl *NewStop = precomputeExpr(Actions, BodyStmts, StopExpr, ".stop");
+ VarDecl *NewStep = precomputeExpr(Actions, BodyStmts, StepExpr, ".step");
+ auto BuildVarRef = [&](VarDecl *VD) {
+ return buildDeclRefExpr(Actions, VD, VD->getType(), {});
+ };
+
+ IntegerLiteral *Zero = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
+ Expr *Dist;
+ if (Rel == BO_NE) {
+ // When using a != comparison, the increment can be +1 or -1. This can be
+ // dynamic at runtime, so we need to check for the direction.
+ Expr *IsNegStep = AssertSuccess(
+ Actions.BuildBinOp(nullptr, {}, BO_LT, BuildVarRef(NewStep), Zero));
+
+ // Positive increment.
+ Expr *ForwardRange = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
+ ForwardRange = AssertSuccess(
+ Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, ForwardRange));
+ Expr *ForwardDist = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, BO_Div, ForwardRange, BuildVarRef(NewStep)));
+
+ // Negative increment.
+ Expr *BackwardRange = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
+ BackwardRange = AssertSuccess(
+ Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, BackwardRange));
+ Expr *NegIncAmount = AssertSuccess(
+ Actions.BuildUnaryOp(nullptr, {}, UO_Minus, BuildVarRef(NewStep)));
+ Expr *BackwardDist = AssertSuccess(
+ Actions.BuildBinOp(nullptr, {}, BO_Div, BackwardRange, NegIncAmount));
+
+ // Use the appropriate case.
+ Dist = AssertSuccess(Actions.ActOnConditionalOp(
+ {}, {}, IsNegStep, BackwardDist, ForwardDist));
+ } else {
+ assert((Rel == BO_LT || Rel == BO_LE || Rel == BO_GE || Rel == BO_GT) &&
+ "Expected one of these relational operators");
+
+ // We can derive the direction from any other comparison operator. It is
+ // non well-formed OpenMP if Step increments/decrements in the other
+ // directions. Whether at least the first iteration passes the loop
+ // condition.
+ Expr *HasAnyIteration = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, Rel, BuildVarRef(NewStart), BuildVarRef(NewStop)));
+
+ // Compute the range between first and last counter value.
+ Expr *Range;
+ if (Rel == BO_GE || Rel == BO_GT)
+ Range = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, BO_Sub, BuildVarRef(NewStart), BuildVarRef(NewStop)));
+ else
+ Range = AssertSuccess(Actions.BuildBinOp(
+ nullptr, {}, BO_Sub, BuildVarRef(NewStop), BuildVarRef(NewStart)));
+
+ // Ensure unsigned range space.
+ Range =
+ AssertSuccess(Actions.BuildCStyleCastExpr({}, LogicalTSI, {}, Range));
+
+ if (Rel == BO_LE || Rel == BO_GE) {
+ // Add one to the range if the relational operator is inclusive.
+ Range =
+ AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_PreInc, Range));
+ }
+
+ // Divide by the absolute step amount.
+ Expr *Divisor = BuildVarRef(NewStep);
+ if (Rel == BO_GE || Rel == BO_GT)
+ Divisor =
+ AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
+ Dist = AssertSuccess(
+ Actions.BuildBinOp(nullptr, {}, BO_Div, Range, Divisor));
+
+ // If there is not at least one iteration, the range contains garbage. Fix
+ // to zero in this case.
+ Dist = AssertSuccess(
+ Actions.ActOnConditionalOp({}, {}, HasAnyIteration, Dist, Zero));
+ }
+
+ // Assign the result to the out-parameter.
+ Stmt *ResultAssign = AssertSuccess(Actions.BuildBinOp(
+ Actions.getCurScope(), {}, BO_Assign, DistRef, Dist));
+ BodyStmts.push_back(ResultAssign);
+
+ Body = AssertSuccess(Actions.ActOnCompoundStmt({}, {}, BodyStmts, false));
+ }
+
+ return cast<CapturedStmt>(
+ AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
+}
+
+/// Create a closure that computes the loop variable from the logical iteration
+/// number.
+///
+/// \param Actions The Sema object.
+/// \param LoopVarTy Type for the loop variable used for result value.
+/// \param LogicalTy Type for the logical iteration number.
+/// \param StartExpr Value of the loop counter at the first iteration.
+/// \param Step Amount of increment after each iteration.
+/// \param Deref Whether the loop variable is a dereference of the loop
+/// counter variable.
+///
+/// \return Closure (CapturedStmt) of the loop value calculation.
+static CapturedStmt *buildLoopVarFunc(Sema &Actions, QualType LoopVarTy,
+ QualType LogicalTy,
+ DeclRefExpr *StartExpr, Expr *Step,
+ bool Deref) {
+ ASTContext &Ctx = Actions.getASTContext();
+
+ // Pass the result as an out-parameter. Passing as return value would require
+ // the OpenMPIRBuilder to know additional C/C++ semantics, such as how to
+ // invoke a copy constructor.
+ QualType TargetParamTy = Ctx.getLValueReferenceType(LoopVarTy);
+ Sema::CapturedParamNameType Params[] = {{"LoopVar", TargetParamTy},
+ {"Logical", LogicalTy},
+ {StringRef(), QualType()}};
+ Actions.ActOnCapturedRegionStart({}, nullptr, CR_Default, Params);
+
+ // Capture the initial iterator which represents the LoopVar value at the
+ // zero's logical iteration. Since the original ForStmt/CXXForRangeStmt update
+ // it in every iteration, capture it by value before it is modified.
+ VarDecl *StartVar = cast<VarDecl>(StartExpr->getDecl());
+ bool Invalid = Actions.tryCaptureVariable(StartVar, {},
+ Sema::TryCapture_ExplicitByVal, {});
+ (void)Invalid;
+ assert(!Invalid && "Expecting capture-by-value to work.");
+
+ Expr *Body;
+ {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ auto *CS = cast<CapturedDecl>(Actions.CurContext);
+
+ ImplicitParamDecl *TargetParam = CS->getParam(0);
+ DeclRefExpr *TargetRef = Actions.BuildDeclRefExpr(
+ TargetParam, LoopVarTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
+ ImplicitParamDecl *IndvarParam = CS->getParam(1);
+ DeclRefExpr *LogicalRef = Actions.BuildDeclRefExpr(
+ IndvarParam, LogicalTy, VK_LValue, {}, nullptr, nullptr, {}, nullptr);
+
+ // Capture the Start expression.
+ CaptureVars Recap(Actions);
+ Expr *NewStart = AssertSuccess(Recap.TransformExpr(StartExpr));
+ Expr *NewStep = AssertSuccess(Recap.TransformExpr(Step));
+
+ Expr *Skip = AssertSuccess(
+ Actions.BuildBinOp(nullptr, {}, BO_Mul, NewStep, LogicalRef));
+ // TODO: Explicitly cast to the iterator's difference_type instead of
+ // relying on implicit conversion.
+ Expr *Advanced =
+ AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, NewStart, Skip));
+
+ if (Deref) {
+ // For range-based for-loops convert the loop counter value to a concrete
+ // loop variable value by dereferencing the iterator.
+ Advanced =
+ AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Deref, Advanced));
+ }
+
+ // Assign the result to the output parameter.
+ Body = AssertSuccess(Actions.BuildBinOp(Actions.getCurScope(), {},
+ BO_Assign, TargetRef, Advanced));
+ }
+ return cast<CapturedStmt>(
+ AssertSuccess(Actions.ActOnCapturedRegionEnd(Body)));
+}
+
+StmtResult Sema::ActOnOpenMPCanonicalLoop(Stmt *AStmt) {
+ ASTContext &Ctx = getASTContext();
+
+ // Extract the common elements of ForStmt and CXXForRangeStmt:
+ // Loop variable, repeat condition, increment
+ Expr *Cond, *Inc;
+ VarDecl *LIVDecl, *LUVDecl;
+ if (auto *For = dyn_cast<ForStmt>(AStmt)) {
+ Stmt *Init = For->getInit();
+ if (auto *LCVarDeclStmt = dyn_cast<DeclStmt>(Init)) {
+ // For statement declares loop variable.
+ LIVDecl = cast<VarDecl>(LCVarDeclStmt->getSingleDecl());
+ } else if (auto *LCAssign = dyn_cast<BinaryOperator>(Init)) {
+ // For statement reuses variable.
+ assert(LCAssign->getOpcode() == BO_Assign &&
+ "init part must be a loop variable assignment");
+ auto *CounterRef = cast<DeclRefExpr>(LCAssign->getLHS());
+ LIVDecl = cast<VarDecl>(CounterRef->getDecl());
+ } else
+ llvm_unreachable("Cannot determine loop variable");
+ LUVDecl = LIVDecl;
+
+ Cond = For->getCond();
+ Inc = For->getInc();
+ } else if (auto *RangeFor = dyn_cast<CXXForRangeStmt>(AStmt)) {
+ DeclStmt *BeginStmt = RangeFor->getBeginStmt();
+ LIVDecl = cast<VarDecl>(BeginStmt->getSingleDecl());
+ LUVDecl = RangeFor->getLoopVariable();
+
+ Cond = RangeFor->getCond();
+ Inc = RangeFor->getInc();
+ } else
+ llvm_unreachable("unhandled kind of loop");
+
+ QualType CounterTy = LIVDecl->getType();
+ QualType LVTy = LUVDecl->getType();
+
+ // Analyze the loop condition.
+ Expr *LHS, *RHS;
+ BinaryOperator::Opcode CondRel;
+ Cond = Cond->IgnoreImplicit();
+ if (auto *CondBinExpr = dyn_cast<BinaryOperator>(Cond)) {
+ LHS = CondBinExpr->getLHS();
+ RHS = CondBinExpr->getRHS();
+ CondRel = CondBinExpr->getOpcode();
+ } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Cond)) {
+ assert(CondCXXOp->getNumArgs() == 2 && "Comparison should have 2 operands");
+ LHS = CondCXXOp->getArg(0);
+ RHS = CondCXXOp->getArg(1);
+ switch (CondCXXOp->getOperator()) {
+ case OO_ExclaimEqual:
+ CondRel = BO_NE;
+ break;
+ case OO_Less:
+ CondRel = BO_LT;
+ break;
+ case OO_LessEqual:
+ CondRel = BO_LE;
+ break;
+ case OO_Greater:
+ CondRel = BO_GT;
+ break;
+ case OO_GreaterEqual:
+ CondRel = BO_GE;
+ break;
+ default:
+ llvm_unreachable("unexpected iterator operator");
+ }
+ } else
+ llvm_unreachable("unexpected loop condition");
+
+ // Normalize such that the loop counter is on the LHS.
+ if (!isa<DeclRefExpr>(LHS->IgnoreImplicit()) ||
+ cast<DeclRefExpr>(LHS->IgnoreImplicit())->getDecl() != LIVDecl) {
+ std::swap(LHS, RHS);
+ CondRel = BinaryOperator::reverseComparisonOp(CondRel);
+ }
+ auto *CounterRef = cast<DeclRefExpr>(LHS->IgnoreImplicit());
+
+ // Decide the bit width for the logical iteration counter. By default use the
+ // unsigned ptrdiff_t integer size (for iterators and pointers).
+ // TODO: For iterators, use iterator::difference_type,
+ // std::iterator_traits<>::difference_type or decltype(it - end).
+ QualType LogicalTy = Ctx.getUnsignedPointerDiffType();
+ if (CounterTy->isIntegerType()) {
+ unsigned BitWidth = Ctx.getIntWidth(CounterTy);
+ LogicalTy = Ctx.getIntTypeForBitwidth(BitWidth, false);
+ }
+
+ // Analyze the loop increment.
+ Expr *Step;
+ if (auto *IncUn = dyn_cast<UnaryOperator>(Inc)) {
+ int Direction;
+ switch (IncUn->getOpcode()) {
+ case UO_PreInc:
+ case UO_PostInc:
+ Direction = 1;
+ break;
+ case UO_PreDec:
+ case UO_PostDec:
+ Direction = -1;
+ break;
+ default:
+ llvm_unreachable("unhandled unary increment operator");
+ }
+ Step = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), Direction), LogicalTy, {});
+ } else if (auto *IncBin = dyn_cast<BinaryOperator>(Inc)) {
+ if (IncBin->getOpcode() == BO_AddAssign) {
+ Step = IncBin->getRHS();
+ } else if (IncBin->getOpcode() == BO_SubAssign) {
+ Step =
+ AssertSuccess(BuildUnaryOp(nullptr, {}, UO_Minus, IncBin->getRHS()));
+ } else
+ llvm_unreachable("unhandled binary increment operator");
+ } else if (auto *CondCXXOp = dyn_cast<CXXOperatorCallExpr>(Inc)) {
+ switch (CondCXXOp->getOperator()) {
+ case OO_PlusPlus:
+ Step = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
+ break;
+ case OO_MinusMinus:
+ Step = IntegerLiteral::Create(
+ Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), -1), LogicalTy, {});
+ break;
+ case OO_PlusEqual:
+ Step = CondCXXOp->getArg(1);
+ break;
+ case OO_MinusEqual:
+ Step = AssertSuccess(
+ BuildUnaryOp(nullptr, {}, UO_Minus, CondCXXOp->getArg(1)));
+ break;
+ default:
+ llvm_unreachable("unhandled overloaded increment operator");
+ }
+ } else
+ llvm_unreachable("unknown increment expression");
+
+ CapturedStmt *DistanceFunc =
+ buildDistanceFunc(*this, LogicalTy, CondRel, LHS, RHS, Step);
+ CapturedStmt *LoopVarFunc = buildLoopVarFunc(
+ *this, LVTy, LogicalTy, CounterRef, Step, isa<CXXForRangeStmt>(AStmt));
+ DeclRefExpr *LVRef = BuildDeclRefExpr(LUVDecl, LUVDecl->getType(), VK_LValue,
+ {}, nullptr, nullptr, {}, nullptr);
+ return OMPCanonicalLoop::create(getASTContext(), AStmt, DistanceFunc,
+ LoopVarFunc, LVRef);
+}
+
+static ExprResult buildUserDefinedMapperRef(Sema &SemaRef, Scope *S,
+ CXXScopeSpec &MapperIdScopeSpec,
+ const DeclarationNameInfo &MapperId,
+ QualType Type,
+ Expr *UnresolvedMapper);
+
+/// Perform DFS through the structure/class data members trying to find
+/// member(s) with user-defined 'default' mapper and generate implicit map
+/// clauses for such members with the found 'default' mapper.
+static void
+processImplicitMapsWithDefaultMappers(Sema &S, DSAStackTy *Stack,
+ SmallVectorImpl<OMPClause *> &Clauses) {
+ // Check for the deault mapper for data members.
+ if (S.getLangOpts().OpenMP < 50)
+ return;
+ SmallVector<OMPClause *, 4> ImplicitMaps;
+ for (int Cnt = 0, EndCnt = Clauses.size(); Cnt < EndCnt; ++Cnt) {
+ auto *C = dyn_cast<OMPMapClause>(Clauses[Cnt]);
+ if (!C)
+ continue;
+ SmallVector<Expr *, 4> SubExprs;
+ auto *MI = C->mapperlist_begin();
+ for (auto I = C->varlist_begin(), End = C->varlist_end(); I != End;
+ ++I, ++MI) {
+ // Expression is mapped using mapper - skip it.
+ if (*MI)
+ continue;
+ Expr *E = *I;
+ // Expression is dependent - skip it, build the mapper when it gets
+ // instantiated.
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ E->containsUnexpandedParameterPack())
+ continue;
+ // Array section - need to check for the mapping of the array section
+ // element.
+ QualType CanonType = E->getType().getCanonicalType();
+ if (CanonType->isSpecificBuiltinType(BuiltinType::OMPArraySection)) {
+ const auto *OASE = cast<OMPArraySectionExpr>(E->IgnoreParenImpCasts());
+ QualType BaseType =
+ OMPArraySectionExpr::getBaseOriginalType(OASE->getBase());
+ QualType ElemType;
+ if (const auto *ATy = BaseType->getAsArrayTypeUnsafe())
+ ElemType = ATy->getElementType();
+ else
+ ElemType = BaseType->getPointeeType();
+ CanonType = ElemType;
+ }
+
+ // DFS over data members in structures/classes.
+ SmallVector<std::pair<QualType, FieldDecl *>, 4> Types(
+ 1, {CanonType, nullptr});
+ llvm::DenseMap<const Type *, Expr *> Visited;
+ SmallVector<std::pair<FieldDecl *, unsigned>, 4> ParentChain(
+ 1, {nullptr, 1});
+ while (!Types.empty()) {
+ QualType BaseType;
+ FieldDecl *CurFD;
+ std::tie(BaseType, CurFD) = Types.pop_back_val();
+ while (ParentChain.back().second == 0)
+ ParentChain.pop_back();
+ --ParentChain.back().second;
+ if (BaseType.isNull())
+ continue;
+ // Only structs/classes are allowed to have mappers.
+ const RecordDecl *RD = BaseType.getCanonicalType()->getAsRecordDecl();
+ if (!RD)
+ continue;
+ auto It = Visited.find(BaseType.getTypePtr());
+ if (It == Visited.end()) {
+ // Try to find the associated user-defined mapper.
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo DefaultMapperId;
+ DefaultMapperId.setName(S.Context.DeclarationNames.getIdentifier(
+ &S.Context.Idents.get("default")));
+ DefaultMapperId.setLoc(E->getExprLoc());
+ ExprResult ER = buildUserDefinedMapperRef(
+ S, Stack->getCurScope(), MapperIdScopeSpec, DefaultMapperId,
+ BaseType, /*UnresolvedMapper=*/nullptr);
+ if (ER.isInvalid())
+ continue;
+ It = Visited.try_emplace(BaseType.getTypePtr(), ER.get()).first;
+ }
+ // Found default mapper.
+ if (It->second) {
+ auto *OE = new (S.Context) OpaqueValueExpr(E->getExprLoc(), CanonType,
+ VK_LValue, OK_Ordinary, E);
+ OE->setIsUnique(/*V=*/true);
+ Expr *BaseExpr = OE;
+ for (const auto &P : ParentChain) {
+ if (P.first) {
+ BaseExpr = S.BuildMemberExpr(
+ BaseExpr, /*IsArrow=*/false, E->getExprLoc(),
+ NestedNameSpecifierLoc(), SourceLocation(), P.first,
+ DeclAccessPair::make(P.first, P.first->getAccess()),
+ /*HadMultipleCandidates=*/false, DeclarationNameInfo(),
+ P.first->getType(), VK_LValue, OK_Ordinary);
+ BaseExpr = S.DefaultLvalueConversion(BaseExpr).get();
+ }
+ }
+ if (CurFD)
+ BaseExpr = S.BuildMemberExpr(
+ BaseExpr, /*IsArrow=*/false, E->getExprLoc(),
+ NestedNameSpecifierLoc(), SourceLocation(), CurFD,
+ DeclAccessPair::make(CurFD, CurFD->getAccess()),
+ /*HadMultipleCandidates=*/false, DeclarationNameInfo(),
+ CurFD->getType(), VK_LValue, OK_Ordinary);
+ SubExprs.push_back(BaseExpr);
+ continue;
+ }
+ // Check for the "default" mapper for data memebers.
+ bool FirstIter = true;
+ for (FieldDecl *FD : RD->fields()) {
+ if (!FD)
+ continue;
+ QualType FieldTy = FD->getType();
+ if (FieldTy.isNull() ||
+ !(FieldTy->isStructureOrClassType() || FieldTy->isUnionType()))
+ continue;
+ if (FirstIter) {
+ FirstIter = false;
+ ParentChain.emplace_back(CurFD, 1);
+ } else {
+ ++ParentChain.back().second;
+ }
+ Types.emplace_back(FieldTy, FD);
+ }
+ }
+ }
+ if (SubExprs.empty())
+ continue;
+ CXXScopeSpec MapperIdScopeSpec;
+ DeclarationNameInfo MapperId;
+ if (OMPClause *NewClause = S.ActOnOpenMPMapClause(
+ C->getMapTypeModifiers(), C->getMapTypeModifiersLoc(),
+ MapperIdScopeSpec, MapperId, C->getMapType(),
+ /*IsMapTypeImplicit=*/true, SourceLocation(), SourceLocation(),
+ SubExprs, OMPVarListLocTy()))
+ Clauses.push_back(NewClause);
+ }
+}
+
StmtResult Sema::ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
@@ -5146,7 +5730,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
bool ErrorFound = false;
ClausesWithImplicit.append(Clauses.begin(), Clauses.end());
if (AStmt && !CurContext->isDependentContext() && Kind != OMPD_atomic &&
- Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master) {
+ Kind != OMPD_critical && Kind != OMPD_section && Kind != OMPD_master &&
+ Kind != OMPD_masked && !isOpenMPLoopTransformationDirective(Kind)) {
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
// Check default data sharing attributes for referenced variables.
@@ -5249,6 +5834,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
}
}
}
+ // Build expressions for implicit maps of data members with 'default'
+ // mappers.
+ if (LangOpts.OpenMP >= 50)
+ processImplicitMapsWithDefaultMappers(*this, DSAStack,
+ ClausesWithImplicit);
}
llvm::SmallVector<OpenMPDirectiveKind, 4> AllowedNameModifiers;
@@ -5264,6 +5854,14 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_tile:
+ Res =
+ ActOnOpenMPTileDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc);
+ break;
+ case OMPD_unroll:
+ Res = ActOnOpenMPUnrollDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_for:
Res = ActOnOpenMPForDirective(ClausesWithImplicit, AStmt, StartLoc, EndLoc,
VarsWithInheritedDSA);
@@ -5292,6 +5890,10 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
"No clauses are allowed for 'omp master' directive");
Res = ActOnOpenMPMasterDirective(AStmt, StartLoc, EndLoc);
break;
+ case OMPD_masked:
+ Res = ActOnOpenMPMaskedDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_critical:
Res = ActOnOpenMPCriticalDirective(DirName, ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
@@ -5554,6 +6156,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_interop:
+ assert(AStmt == nullptr &&
+ "No associated statement allowed for 'omp interop' directive");
+ Res = ActOnOpenMPInteropDirective(ClausesWithImplicit, StartLoc, EndLoc);
+ break;
+ case OMPD_dispatch:
+ Res = ActOnOpenMPDispatchDirective(ClausesWithImplicit, AStmt, StartLoc,
+ EndLoc);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -5602,6 +6213,8 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_num_tasks:
case OMPC_final:
case OMPC_priority:
+ case OMPC_novariants:
+ case OMPC_nocontext:
// Do not analyze if no parent parallel directive.
if (isOpenMPParallelDirective(Kind))
break;
@@ -5614,6 +6227,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_collapse:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_default:
case OMPC_proc_bind:
case OMPC_private:
@@ -6115,7 +6729,8 @@ void Sema::ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
auto *VariantFuncRef = DeclRefExpr::Create(
Context, NestedNameSpecifierLoc(), SourceLocation(), FD,
/* RefersToEnclosingVariableOrCapture */ false,
- /* NameLoc */ FD->getLocation(), FD->getType(), ExprValueKind::VK_RValue);
+ /* NameLoc */ FD->getLocation(), FD->getType(),
+ ExprValueKind::VK_PRValue);
OMPDeclareVariantScope &DVScope = OMPDeclareVariantScopes.back();
auto *OMPDeclareVariantA = OMPDeclareVariantAttr::CreateImplicit(
@@ -6578,6 +7193,8 @@ struct LoopIterationSpace final {
class OpenMPIterationSpaceChecker {
/// Reference to Sema.
Sema &SemaRef;
+ /// Does the loop associated directive support non-rectangular loops?
+ bool SupportsNonRectangular;
/// Data-sharing stack.
DSAStackTy &Stack;
/// A location for diagnostics (when there is no some better location).
@@ -6626,10 +7243,10 @@ class OpenMPIterationSpaceChecker {
Expr *Condition = nullptr;
public:
- OpenMPIterationSpaceChecker(Sema &SemaRef, DSAStackTy &Stack,
- SourceLocation DefaultLoc)
- : SemaRef(SemaRef), Stack(Stack), DefaultLoc(DefaultLoc),
- ConditionLoc(DefaultLoc) {}
+ OpenMPIterationSpaceChecker(Sema &SemaRef, bool SupportsNonRectangular,
+ DSAStackTy &Stack, SourceLocation DefaultLoc)
+ : SemaRef(SemaRef), SupportsNonRectangular(SupportsNonRectangular),
+ Stack(Stack), DefaultLoc(DefaultLoc), ConditionLoc(DefaultLoc) {}
/// Check init-expr for canonical loop form and save loop counter
/// variable - #Var and its initialization value - #LB.
bool checkAndSetInit(Stmt *S, bool EmitDiags = true);
@@ -6833,6 +7450,7 @@ class LoopCounterRefChecker final
const ValueDecl *DepDecl = nullptr;
const ValueDecl *PrevDepDecl = nullptr;
bool IsInitializer = true;
+ bool SupportsNonRectangular;
unsigned BaseLoopId = 0;
bool checkDecl(const Expr *E, const ValueDecl *VD) {
if (getCanonicalDecl(VD) == getCanonicalDecl(CurLCDecl)) {
@@ -6855,6 +7473,10 @@ class LoopCounterRefChecker final
SemaRef.Diag(VD->getLocation(), diag::note_previous_decl) << VD;
return false;
}
+ if (Data.first && !SupportsNonRectangular) {
+ SemaRef.Diag(E->getExprLoc(), diag::err_omp_invariant_dependency);
+ return false;
+ }
if (Data.first &&
(DepDecl || (PrevDepDecl &&
getCanonicalDecl(VD) != getCanonicalDecl(PrevDepDecl)))) {
@@ -6899,9 +7521,11 @@ public:
}
explicit LoopCounterRefChecker(Sema &SemaRef, DSAStackTy &Stack,
const ValueDecl *CurLCDecl, bool IsInitializer,
- const ValueDecl *PrevDepDecl = nullptr)
+ const ValueDecl *PrevDepDecl = nullptr,
+ bool SupportsNonRectangular = true)
: SemaRef(SemaRef), Stack(Stack), CurLCDecl(CurLCDecl),
- PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer) {}
+ PrevDepDecl(PrevDepDecl), IsInitializer(IsInitializer),
+ SupportsNonRectangular(SupportsNonRectangular) {}
unsigned getBaseLoopId() const {
assert(CurLCDecl && "Expected loop dependency.");
return BaseLoopId;
@@ -6918,7 +7542,7 @@ OpenMPIterationSpaceChecker::doesDependOnLoopCounter(const Stmt *S,
bool IsInitializer) {
// Check for the non-rectangular loops.
LoopCounterRefChecker LoopStmtChecker(SemaRef, Stack, LCDecl, IsInitializer,
- DepDecl);
+ DepDecl, SupportsNonRectangular);
if (LoopStmtChecker.Visit(S)) {
DepDecl = LoopStmtChecker.getDepDecl();
return LoopStmtChecker.getBaseLoopId();
@@ -7049,53 +7673,43 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
Condition = S;
S = getExprAsWritten(S);
SourceLocation CondLoc = S->getBeginLoc();
- if (auto *BO = dyn_cast<BinaryOperator>(S)) {
- if (BO->isRelationalOp()) {
- if (getInitLCDecl(BO->getLHS()) == LCDecl)
- return setUB(BO->getRHS(),
- (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_LE),
- (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
- BO->getSourceRange(), BO->getOperatorLoc());
- if (getInitLCDecl(BO->getRHS()) == LCDecl)
- return setUB(BO->getLHS(),
- (BO->getOpcode() == BO_GT || BO->getOpcode() == BO_GE),
- (BO->getOpcode() == BO_LT || BO->getOpcode() == BO_GT),
- BO->getSourceRange(), BO->getOperatorLoc());
- } else if (IneqCondIsCanonical && BO->getOpcode() == BO_NE)
- return setUB(
- getInitLCDecl(BO->getLHS()) == LCDecl ? BO->getRHS() : BO->getLHS(),
- /*LessOp=*/llvm::None,
- /*StrictOp=*/true, BO->getSourceRange(), BO->getOperatorLoc());
+ auto &&CheckAndSetCond = [this, IneqCondIsCanonical](
+ BinaryOperatorKind Opcode, const Expr *LHS,
+ const Expr *RHS, SourceRange SR,
+ SourceLocation OpLoc) -> llvm::Optional<bool> {
+ if (BinaryOperator::isRelationalOp(Opcode)) {
+ if (getInitLCDecl(LHS) == LCDecl)
+ return setUB(const_cast<Expr *>(RHS),
+ (Opcode == BO_LT || Opcode == BO_LE),
+ (Opcode == BO_LT || Opcode == BO_GT), SR, OpLoc);
+ if (getInitLCDecl(RHS) == LCDecl)
+ return setUB(const_cast<Expr *>(LHS),
+ (Opcode == BO_GT || Opcode == BO_GE),
+ (Opcode == BO_LT || Opcode == BO_GT), SR, OpLoc);
+ } else if (IneqCondIsCanonical && Opcode == BO_NE) {
+ return setUB(const_cast<Expr *>(getInitLCDecl(LHS) == LCDecl ? RHS : LHS),
+ /*LessOp=*/llvm::None,
+ /*StrictOp=*/true, SR, OpLoc);
+ }
+ return llvm::None;
+ };
+ llvm::Optional<bool> Res;
+ if (auto *RBO = dyn_cast<CXXRewrittenBinaryOperator>(S)) {
+ CXXRewrittenBinaryOperator::DecomposedForm DF = RBO->getDecomposedForm();
+ Res = CheckAndSetCond(DF.Opcode, DF.LHS, DF.RHS, RBO->getSourceRange(),
+ RBO->getOperatorLoc());
+ } else if (auto *BO = dyn_cast<BinaryOperator>(S)) {
+ Res = CheckAndSetCond(BO->getOpcode(), BO->getLHS(), BO->getRHS(),
+ BO->getSourceRange(), BO->getOperatorLoc());
} else if (auto *CE = dyn_cast<CXXOperatorCallExpr>(S)) {
if (CE->getNumArgs() == 2) {
- auto Op = CE->getOperator();
- switch (Op) {
- case OO_Greater:
- case OO_GreaterEqual:
- case OO_Less:
- case OO_LessEqual:
- if (getInitLCDecl(CE->getArg(0)) == LCDecl)
- return setUB(CE->getArg(1), Op == OO_Less || Op == OO_LessEqual,
- Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
- CE->getOperatorLoc());
- if (getInitLCDecl(CE->getArg(1)) == LCDecl)
- return setUB(CE->getArg(0), Op == OO_Greater || Op == OO_GreaterEqual,
- Op == OO_Less || Op == OO_Greater, CE->getSourceRange(),
- CE->getOperatorLoc());
- break;
- case OO_ExclaimEqual:
- if (IneqCondIsCanonical)
- return setUB(getInitLCDecl(CE->getArg(0)) == LCDecl ? CE->getArg(1)
- : CE->getArg(0),
- /*LessOp=*/llvm::None,
- /*StrictOp=*/true, CE->getSourceRange(),
- CE->getOperatorLoc());
- break;
- default:
- break;
- }
+ Res = CheckAndSetCond(
+ BinaryOperator::getOverloadedOpcode(CE->getOperator()), CE->getArg(0),
+ CE->getArg(1), CE->getSourceRange(), CE->getOperatorLoc());
}
}
+ if (Res.hasValue())
+ return *Res;
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
SemaRef.Diag(CondLoc, diag::err_omp_loop_not_canonical_cond)
@@ -7413,10 +8027,7 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
// LB = TestIsLessOp.getValue() ? min(LB(MinVal), LB(MaxVal)) :
// max(LB(MinVal), LB(MaxVal))
if (InitDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
+ const LoopIterationSpace &IS = ResultIterSpaces[*InitDependOnLC - 1];
if (!IS.MinValue || !IS.MaxValue)
return nullptr;
// OuterVar = Min
@@ -7493,10 +8104,7 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
// UB = TestIsLessOp.getValue() ? max(UB(MinVal), UB(MaxVal)) :
// min(UB(MinVal), UB(MaxVal))
if (CondDependOnLC) {
- const LoopIterationSpace &IS =
- ResultIterSpaces[ResultIterSpaces.size() - 1 -
- InitDependOnLC.getValueOr(
- CondDependOnLC.getValueOr(0))];
+ const LoopIterationSpace &IS = ResultIterSpaces[*CondDependOnLC - 1];
if (!IS.MinValue || !IS.MaxValue)
return nullptr;
// OuterVar = Min
@@ -7577,9 +8185,9 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
if (!Upper || !Lower)
return nullptr;
- ExprResult Diff =
- calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
- TestIsStrictOp, /*RoundToStep=*/true, Captures);
+ ExprResult Diff = calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper,
+ Step, VarType, TestIsStrictOp,
+ /*RoundToStep=*/true, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -7655,9 +8263,9 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
// Build minimum/maximum value based on number of iterations.
QualType VarType = LCDecl->getType().getNonReferenceType();
- ExprResult Diff =
- calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
- TestIsStrictOp, /*RoundToStep=*/false, Captures);
+ ExprResult Diff = calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper,
+ Step, VarType, TestIsStrictOp,
+ /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
@@ -7848,9 +8456,9 @@ Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
if (!Upper || !Lower)
return nullptr;
- ExprResult Diff = calculateNumIters(SemaRef, S, DefaultLoc, Lower, Upper,
- Step, VarType, /*TestIsStrictOp=*/false,
- /*RoundToStep=*/false, Captures);
+ ExprResult Diff = calculateNumIters(
+ SemaRef, S, DefaultLoc, Lower, Upper, Step, VarType,
+ /*TestIsStrictOp=*/false, /*RoundToStep=*/false, Captures);
if (!Diff.isUsable())
return nullptr;
@@ -7865,7 +8473,8 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
if (AssociatedLoops > 0 &&
isOpenMPLoopDirective(DSAStack->getCurrentDirective())) {
DSAStack->loopStart();
- OpenMPIterationSpaceChecker ISC(*this, *DSAStack, ForLoc);
+ OpenMPIterationSpaceChecker ISC(*this, /*SupportsNonRectangular=*/true,
+ *DSAStack, ForLoc);
if (!ISC.checkAndSetInit(Init, /*EmitDiags=*/false)) {
if (ValueDecl *D = ISC.getLoopDecl()) {
auto *VD = dyn_cast<VarDecl>(D);
@@ -7950,9 +8559,12 @@ static bool checkOpenMPIterationSpace(
Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
llvm::MutableArrayRef<LoopIterationSpace> ResultIterSpaces,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
+ bool SupportsNonRectangular = !isOpenMPLoopTransformationDirective(DKind);
// OpenMP [2.9.1, Canonical Loop Form]
// for (init-expr; test-expr; incr-expr) structured-block
// for (range-decl: range-expr) structured-block
+ if (auto *CanonLoop = dyn_cast_or_null<OMPCanonicalLoop>(S))
+ S = CanonLoop->getLoopStmt();
auto *For = dyn_cast_or_null<ForStmt>(S);
auto *CXXFor = dyn_cast_or_null<CXXForRangeStmt>(S);
// Ranged for is supported only in OpenMP 5.0.
@@ -7981,7 +8593,7 @@ static bool checkOpenMPIterationSpace(
assert(((For && For->getBody()) || (CXXFor && CXXFor->getBody())) &&
"No loop body.");
- OpenMPIterationSpaceChecker ISC(SemaRef, DSA,
+ OpenMPIterationSpaceChecker ISC(SemaRef, SupportsNonRectangular, DSA,
For ? For->getForLoc() : CXXFor->getForLoc());
// Check init.
@@ -8037,7 +8649,8 @@ static bool checkOpenMPIterationSpace(
ISC.buildNumIterations(DSA.getCurScope(), ResultIterSpaces,
(isOpenMPWorksharingDirective(DKind) ||
isOpenMPTaskLoopDirective(DKind) ||
- isOpenMPDistributeDirective(DKind)),
+ isOpenMPDistributeDirective(DKind) ||
+ isOpenMPLoopTransformationDirective(DKind)),
Captures);
ResultIterSpaces[CurrentNestedLoopCount].CounterVar =
ISC.buildCounterVar(Captures, DSA);
@@ -8294,8 +8907,11 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Expr *OrderedLoopCountExpr, Stmt *AStmt, Sema &SemaRef,
DSAStackTy &DSA,
Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
- OMPLoopDirective::HelperExprs &Built) {
+ OMPLoopBasedDirective::HelperExprs &Built) {
unsigned NestedLoopCount = 1;
+ bool SupportsNonPerfectlyNested = (SemaRef.LangOpts.OpenMP >= 50) &&
+ !isOpenMPLoopTransformationDirective(DKind);
+
if (CollapseLoopCountExpr) {
// Found 'collapse' clause - calculate collapse number.
Expr::EvalResult Result;
@@ -8332,58 +8948,48 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
- SmallVector<LoopIterationSpace, 4> IterSpaces(
- std::max(OrderedLoopCount, NestedLoopCount));
- Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
- for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
- if (checkOpenMPIterationSpace(
- DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
- std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
- OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces, Captures))
- return 0;
- // Move on to the next nested for loop, or to the loop body.
- // OpenMP [2.8.1, simd construct, Restrictions]
- // All loops associated with the construct must be perfectly nested; that
- // is, there must be no intervening code nor any OpenMP directive between
- // any two loops.
- if (auto *For = dyn_cast<ForStmt>(CurStmt)) {
- CurStmt = For->getBody();
- } else {
- assert(isa<CXXForRangeStmt>(CurStmt) &&
- "Expected canonical for or range-based for loops.");
- CurStmt = cast<CXXForRangeStmt>(CurStmt)->getBody();
- }
- CurStmt = OMPLoopDirective::tryToFindNextInnerLoop(
- CurStmt, SemaRef.LangOpts.OpenMP >= 50);
- }
- for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
- if (checkOpenMPIterationSpace(
- DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
- std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
- OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces, Captures))
- return 0;
- if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
- // Handle initialization of captured loop iterator variables.
- auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
- if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
- Captures[DRE] = DRE;
- }
- }
- // Move on to the next nested for loop, or to the loop body.
- // OpenMP [2.8.1, simd construct, Restrictions]
- // All loops associated with the construct must be perfectly nested; that
- // is, there must be no intervening code nor any OpenMP directive between
- // any two loops.
- if (auto *For = dyn_cast<ForStmt>(CurStmt)) {
- CurStmt = For->getBody();
- } else {
- assert(isa<CXXForRangeStmt>(CurStmt) &&
- "Expected canonical for or range-based for loops.");
- CurStmt = cast<CXXForRangeStmt>(CurStmt)->getBody();
- }
- CurStmt = OMPLoopDirective::tryToFindNextInnerLoop(
- CurStmt, SemaRef.LangOpts.OpenMP >= 50);
- }
+ unsigned NumLoops = std::max(OrderedLoopCount, NestedLoopCount);
+ SmallVector<LoopIterationSpace, 4> IterSpaces(NumLoops);
+ if (!OMPLoopBasedDirective::doForAllLoops(
+ AStmt->IgnoreContainers(!isOpenMPLoopTransformationDirective(DKind)),
+ SupportsNonPerfectlyNested, NumLoops,
+ [DKind, &SemaRef, &DSA, NumLoops, NestedLoopCount,
+ CollapseLoopCountExpr, OrderedLoopCountExpr, &VarsWithImplicitDSA,
+ &IterSpaces, &Captures](unsigned Cnt, Stmt *CurStmt) {
+ if (checkOpenMPIterationSpace(
+ DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
+ NumLoops, CollapseLoopCountExpr, OrderedLoopCountExpr,
+ VarsWithImplicitDSA, IterSpaces, Captures))
+ return true;
+ if (Cnt > 0 && Cnt >= NestedLoopCount &&
+ IterSpaces[Cnt].CounterVar) {
+ // Handle initialization of captured loop iterator variables.
+ auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
+ if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
+ Captures[DRE] = DRE;
+ }
+ }
+ return false;
+ },
+ [&SemaRef, &Captures](OMPLoopBasedDirective *Transform) {
+ Stmt *DependentPreInits;
+ if (auto *Dir = dyn_cast<OMPTileDirective>(Transform)) {
+ DependentPreInits = Dir->getPreInits();
+ } else if (auto *Dir = dyn_cast<OMPUnrollDirective>(Transform)) {
+ DependentPreInits = Dir->getPreInits();
+ } else {
+ llvm_unreachable("Unexpected loop transformation");
+ }
+ if (!DependentPreInits)
+ return;
+ for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup()) {
+ auto *D = cast<VarDecl>(C);
+ DeclRefExpr *Ref = buildDeclRefExpr(SemaRef, D, D->getType(),
+ Transform->getBeginLoc());
+ Captures[Ref] = Ref;
+ }
+ }))
+ return 0;
Built.clear(/* size */ NestedLoopCount);
@@ -8531,7 +9137,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Build variables passed into runtime, necessary for worksharing directives.
ExprResult LB, UB, IL, ST, EUB, CombLB, CombUB, PrevLB, PrevUB, CombEUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
- isOpenMPDistributeDirective(DKind)) {
+ isOpenMPDistributeDirective(DKind) ||
+ isOpenMPLoopTransformationDirective(DKind)) {
// Lower bound variable, initialized with zero.
VarDecl *LBDecl = buildVarDecl(SemaRef, InitLoc, VType, ".omp.lb");
LB = buildDeclRefExpr(SemaRef, LBDecl, VType, InitLoc);
@@ -8629,11 +9236,12 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
{
VarDecl *IVDecl = buildVarDecl(SemaRef, InitLoc, RealVType, ".omp.iv");
IV = buildDeclRefExpr(SemaRef, IVDecl, RealVType, InitLoc);
- Expr *RHS =
- (isOpenMPWorksharingDirective(DKind) ||
- isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
- ? LB.get()
- : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
+ Expr *RHS = (isOpenMPWorksharingDirective(DKind) ||
+ isOpenMPTaskLoopDirective(DKind) ||
+ isOpenMPDistributeDirective(DKind) ||
+ isOpenMPLoopTransformationDirective(DKind))
+ ? LB.get()
+ : SemaRef.ActOnIntegerConstant(SourceLocation(), 0).get();
Init = SemaRef.BuildBinOp(CurScope, InitLoc, BO_Assign, IV.get(), RHS);
Init = SemaRef.ActOnFinishFullExpr(Init.get(), /*DiscardedValue*/ false);
@@ -8671,7 +9279,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
}
ExprResult Cond =
(isOpenMPWorksharingDirective(DKind) ||
- isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind))
+ isOpenMPTaskLoopDirective(DKind) || isOpenMPDistributeDirective(DKind) ||
+ isOpenMPLoopTransformationDirective(DKind))
? SemaRef.BuildBinOp(CurScope, CondLoc,
UseStrictCompare ? BO_LT : BO_LE, IV.get(),
BoundUB)
@@ -8719,7 +9328,8 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// base variables for the update
ExprResult NextLB, NextUB, CombNextLB, CombNextUB;
if (isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
- isOpenMPDistributeDirective(DKind)) {
+ isOpenMPDistributeDirective(DKind) ||
+ isOpenMPLoopTransformationDirective(DKind)) {
// LB + ST
NextLB = SemaRef.BuildBinOp(CurScope, IncLoc, BO_Add, LB.get(), ST.get());
if (!NextLB.isUsable())
@@ -8791,11 +9401,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// Build expression: UB = min(UB, prevUB) for #for in composite or combined
// construct
+ ExprResult NewPrevUB = PrevUB;
SourceLocation DistEUBLoc = AStmt->getBeginLoc();
- ExprResult IsUBGreater =
- SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT, UB.get(), PrevUB.get());
+ if (!SemaRef.Context.hasSameType(UB.get()->getType(),
+ PrevUB.get()->getType())) {
+ NewPrevUB = SemaRef.BuildCStyleCastExpr(
+ DistEUBLoc,
+ SemaRef.Context.getTrivialTypeSourceInfo(UB.get()->getType()),
+ DistEUBLoc, NewPrevUB.get());
+ if (!NewPrevUB.isUsable())
+ return 0;
+ }
+ ExprResult IsUBGreater = SemaRef.BuildBinOp(CurScope, DistEUBLoc, BO_GT,
+ UB.get(), NewPrevUB.get());
ExprResult CondOp = SemaRef.ActOnConditionalOp(
- DistEUBLoc, DistEUBLoc, IsUBGreater.get(), PrevUB.get(), UB.get());
+ DistEUBLoc, DistEUBLoc, IsUBGreater.get(), NewPrevUB.get(), UB.get());
PrevEUB = SemaRef.BuildBinOp(CurScope, DistIncLoc, BO_Assign, UB.get(),
CondOp.get());
PrevEUB =
@@ -9045,7 +9665,7 @@ Sema::ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -9084,7 +9704,7 @@ Sema::ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -9120,7 +9740,7 @@ StmtResult Sema::ActOnOpenMPForSimdDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -9204,6 +9824,64 @@ StmtResult Sema::ActOnOpenMPSectionDirective(Stmt *AStmt,
DSAStack->isCancelRegion());
}
+static Expr *getDirectCallExpr(Expr *E) {
+ E = E->IgnoreParenCasts()->IgnoreImplicit();
+ if (auto *CE = dyn_cast<CallExpr>(E))
+ if (CE->getDirectCallee())
+ return E;
+ return nullptr;
+}
+
+StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ Stmt *S = cast<CapturedStmt>(AStmt)->getCapturedStmt();
+
+ // 5.1 OpenMP
+ // expression-stmt : an expression statement with one of the following forms:
+ // expression = target-call ( [expression-list] );
+ // target-call ( [expression-list] );
+
+ SourceLocation TargetCallLoc;
+
+ if (!CurContext->isDependentContext()) {
+ Expr *TargetCall = nullptr;
+
+ auto *E = dyn_cast<Expr>(S);
+ if (!E) {
+ Diag(S->getBeginLoc(), diag::err_omp_dispatch_statement_call);
+ return StmtError();
+ }
+
+ E = E->IgnoreParenCasts()->IgnoreImplicit();
+
+ if (auto *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Assign)
+ TargetCall = getDirectCallExpr(BO->getRHS());
+ } else {
+ if (auto *COCE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (COCE->getOperator() == OO_Equal)
+ TargetCall = getDirectCallExpr(COCE->getArg(1));
+ if (!TargetCall)
+ TargetCall = getDirectCallExpr(E);
+ }
+ if (!TargetCall) {
+ Diag(E->getBeginLoc(), diag::err_omp_dispatch_statement_call);
+ return StmtError();
+ }
+ TargetCallLoc = TargetCall->getExprLoc();
+ }
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPDispatchDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ TargetCallLoc);
+}
+
StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -9246,6 +9924,18 @@ StmtResult Sema::ActOnOpenMPMasterDirective(Stmt *AStmt,
return OMPMasterDirective::Create(Context, StartLoc, EndLoc, AStmt);
}
+StmtResult Sema::ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPMaskedDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt);
+}
+
StmtResult Sema::ActOnOpenMPCriticalDirective(
const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc) {
@@ -9280,14 +9970,14 @@ StmtResult Sema::ActOnOpenMPCriticalDirective(
Diag(StartLoc, diag::err_omp_critical_with_hint);
if (HintLoc.isValid())
Diag(HintLoc, diag::note_omp_critical_hint_here)
- << 0 << Hint.toString(/*Radix=*/10, /*Signed=*/false);
+ << 0 << toString(Hint, /*Radix=*/10, /*Signed=*/false);
else
Diag(StartLoc, diag::note_omp_critical_no_hint) << 0;
if (const auto *C = Pair.first->getSingleClause<OMPHintClause>()) {
Diag(C->getBeginLoc(), diag::note_omp_critical_hint_here)
<< 1
- << C->getHint()->EvaluateKnownConstInt(Context).toString(
- /*Radix=*/10, /*Signed=*/false);
+ << toString(C->getHint()->EvaluateKnownConstInt(Context),
+ /*Radix=*/10, /*Signed=*/false);
} else {
Diag(Pair.first->getBeginLoc(), diag::note_omp_critical_no_hint) << 1;
}
@@ -9317,7 +10007,7 @@ StmtResult Sema::ActOnOpenMPParallelForDirective(
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -9361,7 +10051,7 @@ StmtResult Sema::ActOnOpenMPParallelForSimdDirective(
// longjmp() and throw() must not violate the entry/exit criteria.
CS->getCapturedDecl()->setNothrow();
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -9453,14 +10143,14 @@ Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
DSAStack->getTaskgroupReductionRef(), DSAStack->isCancelRegion());
}
-/// detach and mergeable clauses are mutially exclusive, check for it.
-static bool checkDetachMergeableClauses(Sema &S,
- ArrayRef<OMPClause *> Clauses) {
+/// Find and diagnose mutually exclusive clause kinds.
+static bool checkMutuallyExclusiveClauses(
+ Sema &S, ArrayRef<OMPClause *> Clauses,
+ ArrayRef<OpenMPClauseKind> MutuallyExclusiveClauses) {
const OMPClause *PrevClause = nullptr;
bool ErrorFound = false;
for (const OMPClause *C : Clauses) {
- if (C->getClauseKind() == OMPC_detach ||
- C->getClauseKind() == OMPC_mergeable) {
+ if (llvm::is_contained(MutuallyExclusiveClauses, C->getClauseKind())) {
if (!PrevClause) {
PrevClause = C;
} else if (PrevClause->getClauseKind() != C->getClauseKind()) {
@@ -9485,7 +10175,8 @@ StmtResult Sema::ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
// OpenMP 5.0, 2.10.1 task Construct
// If a detach clause appears on the directive, then a mergeable clause cannot
// appear on the same directive.
- if (checkDetachMergeableClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_detach, OMPC_mergeable}))
return StmtError();
auto *CS = cast<CapturedStmt>(AStmt);
@@ -9943,9 +10634,9 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
// OpaqueValueExpr(expr)' or 'OpaqueValueExpr(expr) binop
// OpaqueValueExpr(x)' and then cast it to the type of the 'x' expression.
auto *OVEX = new (SemaRef.getASTContext())
- OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_RValue);
+ OpaqueValueExpr(X->getExprLoc(), X->getType(), VK_PRValue);
auto *OVEExpr = new (SemaRef.getASTContext())
- OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_RValue);
+ OpaqueValueExpr(E->getExprLoc(), E->getType(), VK_PRValue);
ExprResult Update =
SemaRef.CreateBuiltinBinOp(OpLoc, Op, IsXLHSInRHSPart ? OVEX : OVEExpr,
IsXLHSInRHSPart ? OVEExpr : OVEX);
@@ -10555,7 +11246,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -10789,28 +11480,6 @@ StmtResult Sema::ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
CancelRegion);
}
-static bool checkGrainsizeNumTasksClauses(Sema &S,
- ArrayRef<OMPClause *> Clauses) {
- const OMPClause *PrevClause = nullptr;
- bool ErrorFound = false;
- for (const OMPClause *C : Clauses) {
- if (C->getClauseKind() == OMPC_grainsize ||
- C->getClauseKind() == OMPC_num_tasks) {
- if (!PrevClause)
- PrevClause = C;
- else if (PrevClause->getClauseKind() != C->getClauseKind()) {
- S.Diag(C->getBeginLoc(), diag::err_omp_clauses_mutually_exclusive)
- << getOpenMPClauseName(C->getClauseKind())
- << getOpenMPClauseName(PrevClause->getClauseKind());
- S.Diag(PrevClause->getBeginLoc(), diag::note_omp_previous_clause)
- << getOpenMPClauseName(PrevClause->getClauseKind());
- ErrorFound = true;
- }
- }
- }
- return ErrorFound;
-}
-
static bool checkReductionClauseWithNogroup(Sema &S,
ArrayRef<OMPClause *> Clauses) {
const OMPClause *ReductionClause = nullptr;
@@ -10845,7 +11514,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -10861,7 +11530,8 @@ StmtResult Sema::ActOnOpenMPTaskLoopDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -10882,7 +11552,7 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -10909,7 +11579,8 @@ StmtResult Sema::ActOnOpenMPTaskLoopSimdDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -10931,7 +11602,7 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -10947,7 +11618,8 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -10968,7 +11640,7 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -10995,7 +11667,8 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -11036,7 +11709,7 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11052,7 +11725,8 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -11092,7 +11766,7 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11119,7 +11793,8 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// The grainsize clause and num_tasks clause are mutually exclusive and may
// not appear on the same taskloop directive.
- if (checkGrainsizeNumTasksClauses(*this, Clauses))
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
return StmtError();
// OpenMP, [2.9.2 taskloop Construct, Restrictions]
// If a reduction clause is present on the taskloop directive, the nogroup
@@ -11141,7 +11816,7 @@ StmtResult Sema::ActOnOpenMPDistributeDirective(
return StmtError();
assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -11184,7 +11859,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11228,7 +11903,7 @@ StmtResult Sema::ActOnOpenMPDistributeParallelForSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11284,7 +11959,7 @@ StmtResult Sema::ActOnOpenMPDistributeSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -11340,7 +12015,7 @@ StmtResult Sema::ActOnOpenMPTargetParallelForSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' or 'ordered' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11395,7 +12070,7 @@ StmtResult Sema::ActOnOpenMPTargetSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will define the
// nested loops number.
unsigned NestedLoopCount =
@@ -11451,7 +12126,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -11497,7 +12172,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11559,7 +12234,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11621,7 +12296,7 @@ StmtResult Sema::ActOnOpenMPTeamsDistributeParallelForDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11700,7 +12375,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11743,7 +12418,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11798,7 +12473,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount =
@@ -11857,7 +12532,7 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
CS->getCapturedDecl()->setNothrow();
}
- OMPLoopDirective::HelperExprs B;
+ OMPLoopBasedDirective::HelperExprs B;
// In presence of clause 'collapse' with number of loops, it will
// define the nested loops number.
unsigned NestedLoopCount = checkOpenMPLoop(
@@ -11889,6 +12564,542 @@ StmtResult Sema::ActOnOpenMPTargetTeamsDistributeSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+bool Sema::checkTransformableLoopNest(
+ OpenMPDirectiveKind Kind, Stmt *AStmt, int NumLoops,
+ SmallVectorImpl<OMPLoopBasedDirective::HelperExprs> &LoopHelpers,
+ Stmt *&Body,
+ SmallVectorImpl<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>>
+ &OriginalInits) {
+ OriginalInits.emplace_back();
+ bool Result = OMPLoopBasedDirective::doForAllLoops(
+ AStmt->IgnoreContainers(), /*TryImperfectlyNestedLoops=*/false, NumLoops,
+ [this, &LoopHelpers, &Body, &OriginalInits, Kind](unsigned Cnt,
+ Stmt *CurStmt) {
+ VarsWithInheritedDSAType TmpDSA;
+ unsigned SingleNumLoops =
+ checkOpenMPLoop(Kind, nullptr, nullptr, CurStmt, *this, *DSAStack,
+ TmpDSA, LoopHelpers[Cnt]);
+ if (SingleNumLoops == 0)
+ return true;
+ assert(SingleNumLoops == 1 && "Expect single loop iteration space");
+ if (auto *For = dyn_cast<ForStmt>(CurStmt)) {
+ OriginalInits.back().push_back(For->getInit());
+ Body = For->getBody();
+ } else {
+ assert(isa<CXXForRangeStmt>(CurStmt) &&
+ "Expected canonical for or range-based for loops.");
+ auto *CXXFor = cast<CXXForRangeStmt>(CurStmt);
+ OriginalInits.back().push_back(CXXFor->getBeginStmt());
+ Body = CXXFor->getBody();
+ }
+ OriginalInits.emplace_back();
+ return false;
+ },
+ [&OriginalInits](OMPLoopBasedDirective *Transform) {
+ Stmt *DependentPreInits;
+ if (auto *Dir = dyn_cast<OMPTileDirective>(Transform))
+ DependentPreInits = Dir->getPreInits();
+ else if (auto *Dir = dyn_cast<OMPUnrollDirective>(Transform))
+ DependentPreInits = Dir->getPreInits();
+ else
+ llvm_unreachable("Unhandled loop transformation");
+ if (!DependentPreInits)
+ return;
+ for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup())
+ OriginalInits.back().push_back(C);
+ });
+ assert(OriginalInits.back().empty() && "No preinit after innermost loop");
+ OriginalInits.pop_back();
+ return Result;
+}
+
+StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ auto SizesClauses =
+ OMPExecutableDirective::getClausesOfKind<OMPSizesClause>(Clauses);
+ if (SizesClauses.empty()) {
+ // A missing 'sizes' clause is already reported by the parser.
+ return StmtError();
+ }
+ const OMPSizesClause *SizesClause = *SizesClauses.begin();
+ unsigned NumLoops = SizesClause->getNumSizes();
+
+ // Empty statement should only be possible if there already was an error.
+ if (!AStmt)
+ return StmtError();
+
+ // Verify and diagnose loop nest.
+ SmallVector<OMPLoopBasedDirective::HelperExprs, 4> LoopHelpers(NumLoops);
+ Stmt *Body = nullptr;
+ SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, 4>
+ OriginalInits;
+ if (!checkTransformableLoopNest(OMPD_tile, AStmt, NumLoops, LoopHelpers, Body,
+ OriginalInits))
+ return StmtError();
+
+ // Delay tiling to when template is completely instantiated.
+ if (CurContext->isDependentContext())
+ return OMPTileDirective::Create(Context, StartLoc, EndLoc, Clauses,
+ NumLoops, AStmt, nullptr, nullptr);
+
+ SmallVector<Decl *, 4> PreInits;
+
+ // Create iteration variables for the generated loops.
+ SmallVector<VarDecl *, 4> FloorIndVars;
+ SmallVector<VarDecl *, 4> TileIndVars;
+ FloorIndVars.resize(NumLoops);
+ TileIndVars.resize(NumLoops);
+ for (unsigned I = 0; I < NumLoops; ++I) {
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
+
+ assert(LoopHelper.Counters.size() == 1 &&
+ "Expect single-dimensional loop iteration space");
+ auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters.front());
+ std::string OrigVarName = OrigCntVar->getNameInfo().getAsString();
+ DeclRefExpr *IterVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
+ QualType CntTy = IterVarRef->getType();
+
+ // Iteration variable for the floor (i.e. outer) loop.
+ {
+ std::string FloorCntName =
+ (Twine(".floor_") + llvm::utostr(I) + ".iv." + OrigVarName).str();
+ VarDecl *FloorCntDecl =
+ buildVarDecl(*this, {}, CntTy, FloorCntName, nullptr, OrigCntVar);
+ FloorIndVars[I] = FloorCntDecl;
+ }
+
+ // Iteration variable for the tile (i.e. inner) loop.
+ {
+ std::string TileCntName =
+ (Twine(".tile_") + llvm::utostr(I) + ".iv." + OrigVarName).str();
+
+ // Reuse the iteration variable created by checkOpenMPLoop. It is also
+ // used by the expressions to derive the original iteration variable's
+ // value from the logical iteration number.
+ auto *TileCntDecl = cast<VarDecl>(IterVarRef->getDecl());
+ TileCntDecl->setDeclName(&PP.getIdentifierTable().get(TileCntName));
+ TileIndVars[I] = TileCntDecl;
+ }
+ for (auto &P : OriginalInits[I]) {
+ if (auto *D = P.dyn_cast<Decl *>())
+ PreInits.push_back(D);
+ else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
+ PreInits.append(PI->decl_begin(), PI->decl_end());
+ }
+ if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
+ PreInits.append(PI->decl_begin(), PI->decl_end());
+ // Gather declarations for the data members used as counters.
+ for (Expr *CounterRef : LoopHelper.Counters) {
+ auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
+ if (isa<OMPCapturedExprDecl>(CounterDecl))
+ PreInits.push_back(CounterDecl);
+ }
+ }
+
+ // Once the original iteration values are set, append the innermost body.
+ Stmt *Inner = Body;
+
+ // Create tile loops from the inside to the outside.
+ for (int I = NumLoops - 1; I >= 0; --I) {
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers[I];
+ Expr *NumIterations = LoopHelper.NumIterations;
+ auto *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
+ QualType CntTy = OrigCntVar->getType();
+ Expr *DimTileSize = SizesClause->getSizesRefs()[I];
+ Scope *CurScope = getCurScope();
+
+ // Commonly used variables.
+ DeclRefExpr *TileIV = buildDeclRefExpr(*this, TileIndVars[I], CntTy,
+ OrigCntVar->getExprLoc());
+ DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy,
+ OrigCntVar->getExprLoc());
+
+ // For init-statement: auto .tile.iv = .floor.iv
+ AddInitializerToDecl(TileIndVars[I], DefaultLvalueConversion(FloorIV).get(),
+ /*DirectInit=*/false);
+ Decl *CounterDecl = TileIndVars[I];
+ StmtResult InitStmt = new (Context)
+ DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1),
+ OrigCntVar->getBeginLoc(), OrigCntVar->getEndLoc());
+ if (!InitStmt.isUsable())
+ return StmtError();
+
+ // For cond-expression: .tile.iv < min(.floor.iv + DimTileSize,
+ // NumIterations)
+ ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
+ BO_Add, FloorIV, DimTileSize);
+ if (!EndOfTile.isUsable())
+ return StmtError();
+ ExprResult IsPartialTile =
+ BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT,
+ NumIterations, EndOfTile.get());
+ if (!IsPartialTile.isUsable())
+ return StmtError();
+ ExprResult MinTileAndIterSpace = ActOnConditionalOp(
+ LoopHelper.Cond->getBeginLoc(), LoopHelper.Cond->getEndLoc(),
+ IsPartialTile.get(), NumIterations, EndOfTile.get());
+ if (!MinTileAndIterSpace.isUsable())
+ return StmtError();
+ ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
+ BO_LT, TileIV, MinTileAndIterSpace.get());
+ if (!CondExpr.isUsable())
+ return StmtError();
+
+ // For incr-statement: ++.tile.iv
+ ExprResult IncrStmt =
+ BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(), UO_PreInc, TileIV);
+ if (!IncrStmt.isUsable())
+ return StmtError();
+
+ // Statements to set the original iteration variable's value from the
+ // logical iteration number.
+ // Generated for loop is:
+ // Original_for_init;
+ // for (auto .tile.iv = .floor.iv; .tile.iv < min(.floor.iv + DimTileSize,
+ // NumIterations); ++.tile.iv) {
+ // Original_Body;
+ // Original_counter_update;
+ // }
+ // FIXME: If the innermost body is an loop itself, inserting these
+ // statements stops it being recognized as a perfectly nested loop (e.g.
+ // for applying tiling again). If this is the case, sink the expressions
+ // further into the inner loop.
+ SmallVector<Stmt *, 4> BodyParts;
+ BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ BodyParts.push_back(Inner);
+ Inner = CompoundStmt::Create(Context, BodyParts, Inner->getBeginLoc(),
+ Inner->getEndLoc());
+ Inner = new (Context)
+ ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
+ IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+ }
+
+ // Create floor loops from the inside to the outside.
+ for (int I = NumLoops - 1; I >= 0; --I) {
+ auto &LoopHelper = LoopHelpers[I];
+ Expr *NumIterations = LoopHelper.NumIterations;
+ DeclRefExpr *OrigCntVar = cast<DeclRefExpr>(LoopHelper.Counters[0]);
+ QualType CntTy = OrigCntVar->getType();
+ Expr *DimTileSize = SizesClause->getSizesRefs()[I];
+ Scope *CurScope = getCurScope();
+
+ // Commonly used variables.
+ DeclRefExpr *FloorIV = buildDeclRefExpr(*this, FloorIndVars[I], CntTy,
+ OrigCntVar->getExprLoc());
+
+ // For init-statement: auto .floor.iv = 0
+ AddInitializerToDecl(
+ FloorIndVars[I],
+ ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
+ /*DirectInit=*/false);
+ Decl *CounterDecl = FloorIndVars[I];
+ StmtResult InitStmt = new (Context)
+ DeclStmt(DeclGroupRef::Create(Context, &CounterDecl, 1),
+ OrigCntVar->getBeginLoc(), OrigCntVar->getEndLoc());
+ if (!InitStmt.isUsable())
+ return StmtError();
+
+ // For cond-expression: .floor.iv < NumIterations
+ ExprResult CondExpr = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
+ BO_LT, FloorIV, NumIterations);
+ if (!CondExpr.isUsable())
+ return StmtError();
+
+ // For incr-statement: .floor.iv += DimTileSize
+ ExprResult IncrStmt = BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(),
+ BO_AddAssign, FloorIV, DimTileSize);
+ if (!IncrStmt.isUsable())
+ return StmtError();
+
+ Inner = new (Context)
+ ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
+ IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+ }
+
+ return OMPTileDirective::Create(Context, StartLoc, EndLoc, Clauses, NumLoops,
+ AStmt, Inner,
+ buildPreInits(Context, PreInits));
+}
+
+StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Empty statement should only be possible if there already was an error.
+ if (!AStmt)
+ return StmtError();
+
+ if (checkMutuallyExclusiveClauses(*this, Clauses, {OMPC_partial, OMPC_full}))
+ return StmtError();
+
+ const OMPFullClause *FullClause =
+ OMPExecutableDirective::getSingleClause<OMPFullClause>(Clauses);
+ const OMPPartialClause *PartialClause =
+ OMPExecutableDirective::getSingleClause<OMPPartialClause>(Clauses);
+ assert(!(FullClause && PartialClause) &&
+ "mutual exclusivity must have been checked before");
+
+ constexpr unsigned NumLoops = 1;
+ Stmt *Body = nullptr;
+ SmallVector<OMPLoopBasedDirective::HelperExprs, NumLoops> LoopHelpers(
+ NumLoops);
+ SmallVector<SmallVector<llvm::PointerUnion<Stmt *, Decl *>, 0>, NumLoops + 1>
+ OriginalInits;
+ if (!checkTransformableLoopNest(OMPD_unroll, AStmt, NumLoops, LoopHelpers,
+ Body, OriginalInits))
+ return StmtError();
+
+ // Delay unrolling to when template is completely instantiated.
+ if (CurContext->isDependentContext())
+ return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ nullptr, nullptr);
+
+ OMPLoopBasedDirective::HelperExprs &LoopHelper = LoopHelpers.front();
+
+ if (FullClause) {
+ if (!VerifyPositiveIntegerConstantInClause(
+ LoopHelper.NumIterations, OMPC_full, /*StrictlyPositive=*/false,
+ /*SuppressExprDigs=*/true)
+ .isUsable()) {
+ Diag(AStmt->getBeginLoc(), diag::err_omp_unroll_full_variable_trip_count);
+ Diag(FullClause->getBeginLoc(), diag::note_omp_directive_here)
+ << "#pragma omp unroll full";
+ return StmtError();
+ }
+ }
+
+ // The generated loop may only be passed to other loop-associated directive
+ // when a partial clause is specified. Without the requirement it is
+ // sufficient to generate loop unroll metadata at code-generation.
+ if (!PartialClause)
+ return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ nullptr, nullptr);
+
+ // Otherwise, we need to provide a de-sugared/transformed AST that can be
+ // associated with another loop directive.
+ //
+ // The canonical loop analysis return by checkTransformableLoopNest assumes
+ // the following structure to be the same loop without transformations or
+ // directives applied: \code OriginalInits; LoopHelper.PreInits;
+ // LoopHelper.Counters;
+ // for (; IV < LoopHelper.NumIterations; ++IV) {
+ // LoopHelper.Updates;
+ // Body;
+ // }
+ // \endcode
+ // where IV is a variable declared and initialized to 0 in LoopHelper.PreInits
+ // and referenced by LoopHelper.IterationVarRef.
+ //
+ // The unrolling directive transforms this into the following loop:
+ // \code
+ // OriginalInits; \
+ // LoopHelper.PreInits; > NewPreInits
+ // LoopHelper.Counters; /
+ // for (auto UIV = 0; UIV < LoopHelper.NumIterations; UIV+=Factor) {
+ // #pragma clang loop unroll_count(Factor)
+ // for (IV = UIV; IV < UIV + Factor && UIV < LoopHelper.NumIterations; ++IV)
+ // {
+ // LoopHelper.Updates;
+ // Body;
+ // }
+ // }
+ // \endcode
+ // where UIV is a new logical iteration counter. IV must be the same VarDecl
+ // as the original LoopHelper.IterationVarRef because LoopHelper.Updates
+ // references it. If the partially unrolled loop is associated with another
+ // loop directive (like an OMPForDirective), it will use checkOpenMPLoop to
+ // analyze this loop, i.e. the outer loop must fulfill the constraints of an
+ // OpenMP canonical loop. The inner loop is not an associable canonical loop
+ // and only exists to defer its unrolling to LLVM's LoopUnroll instead of
+ // doing it in the frontend (by adding loop metadata). NewPreInits becomes a
+ // property of the OMPLoopBasedDirective instead of statements in
+ // CompoundStatement. This is to allow the loop to become a non-outermost loop
+ // of a canonical loop nest where these PreInits are emitted before the
+ // outermost directive.
+
+ // Determine the PreInit declarations.
+ SmallVector<Decl *, 4> PreInits;
+ assert(OriginalInits.size() == 1 &&
+ "Expecting a single-dimensional loop iteration space");
+ for (auto &P : OriginalInits[0]) {
+ if (auto *D = P.dyn_cast<Decl *>())
+ PreInits.push_back(D);
+ else if (auto *PI = dyn_cast_or_null<DeclStmt>(P.dyn_cast<Stmt *>()))
+ PreInits.append(PI->decl_begin(), PI->decl_end());
+ }
+ if (auto *PI = cast_or_null<DeclStmt>(LoopHelper.PreInits))
+ PreInits.append(PI->decl_begin(), PI->decl_end());
+ // Gather declarations for the data members used as counters.
+ for (Expr *CounterRef : LoopHelper.Counters) {
+ auto *CounterDecl = cast<DeclRefExpr>(CounterRef)->getDecl();
+ if (isa<OMPCapturedExprDecl>(CounterDecl))
+ PreInits.push_back(CounterDecl);
+ }
+
+ auto *IterationVarRef = cast<DeclRefExpr>(LoopHelper.IterationVarRef);
+ QualType IVTy = IterationVarRef->getType();
+ assert(LoopHelper.Counters.size() == 1 &&
+ "Expecting a single-dimensional loop iteration space");
+ auto *OrigVar = cast<DeclRefExpr>(LoopHelper.Counters.front());
+
+ // Determine the unroll factor.
+ uint64_t Factor;
+ SourceLocation FactorLoc;
+ if (Expr *FactorVal = PartialClause->getFactor()) {
+ Factor =
+ FactorVal->getIntegerConstantExpr(Context).getValue().getZExtValue();
+ FactorLoc = FactorVal->getExprLoc();
+ } else {
+ // TODO: Use a better profitability model.
+ Factor = 2;
+ }
+ assert(Factor > 0 && "Expected positive unroll factor");
+ auto MakeFactorExpr = [this, Factor, IVTy, FactorLoc]() {
+ return IntegerLiteral::Create(
+ Context, llvm::APInt(Context.getIntWidth(IVTy), Factor), IVTy,
+ FactorLoc);
+ };
+
+ // Iteration variable SourceLocations.
+ SourceLocation OrigVarLoc = OrigVar->getExprLoc();
+ SourceLocation OrigVarLocBegin = OrigVar->getBeginLoc();
+ SourceLocation OrigVarLocEnd = OrigVar->getEndLoc();
+
+ // Internal variable names.
+ std::string OrigVarName = OrigVar->getNameInfo().getAsString();
+ std::string OuterIVName = (Twine(".unrolled.iv.") + OrigVarName).str();
+ std::string InnerIVName = (Twine(".unroll_inner.iv.") + OrigVarName).str();
+ std::string InnerTripCountName =
+ (Twine(".unroll_inner.tripcount.") + OrigVarName).str();
+
+ // Create the iteration variable for the unrolled loop.
+ VarDecl *OuterIVDecl =
+ buildVarDecl(*this, {}, IVTy, OuterIVName, nullptr, OrigVar);
+ auto MakeOuterRef = [this, OuterIVDecl, IVTy, OrigVarLoc]() {
+ return buildDeclRefExpr(*this, OuterIVDecl, IVTy, OrigVarLoc);
+ };
+
+ // Iteration variable for the inner loop: Reuse the iteration variable created
+ // by checkOpenMPLoop.
+ auto *InnerIVDecl = cast<VarDecl>(IterationVarRef->getDecl());
+ InnerIVDecl->setDeclName(&PP.getIdentifierTable().get(InnerIVName));
+ auto MakeInnerRef = [this, InnerIVDecl, IVTy, OrigVarLoc]() {
+ return buildDeclRefExpr(*this, InnerIVDecl, IVTy, OrigVarLoc);
+ };
+
+ // Make a copy of the NumIterations expression for each use: By the AST
+ // constraints, every expression object in a DeclContext must be unique.
+ CaptureVars CopyTransformer(*this);
+ auto MakeNumIterations = [&CopyTransformer, &LoopHelper]() -> Expr * {
+ return AssertSuccess(
+ CopyTransformer.TransformExpr(LoopHelper.NumIterations));
+ };
+
+ // Inner For init-statement: auto .unroll_inner.iv = .unrolled.iv
+ ExprResult LValueConv = DefaultLvalueConversion(MakeOuterRef());
+ AddInitializerToDecl(InnerIVDecl, LValueConv.get(), /*DirectInit=*/false);
+ StmtResult InnerInit = new (Context)
+ DeclStmt(DeclGroupRef(InnerIVDecl), OrigVarLocBegin, OrigVarLocEnd);
+ if (!InnerInit.isUsable())
+ return StmtError();
+
+ // Inner For cond-expression:
+ // \code
+ // .unroll_inner.iv < .unrolled.iv + Factor &&
+ // .unroll_inner.iv < NumIterations
+ // \endcode
+ // This conjunction of two conditions allows ScalarEvolution to derive the
+ // maximum trip count of the inner loop.
+ ExprResult EndOfTile = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
+ BO_Add, MakeOuterRef(), MakeFactorExpr());
+ if (!EndOfTile.isUsable())
+ return StmtError();
+ ExprResult InnerCond1 = BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(),
+ BO_LE, MakeInnerRef(), EndOfTile.get());
+ if (!InnerCond1.isUsable())
+ return StmtError();
+ ExprResult InnerCond2 =
+ BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LE, MakeInnerRef(),
+ MakeNumIterations());
+ if (!InnerCond2.isUsable())
+ return StmtError();
+ ExprResult InnerCond =
+ BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LAnd,
+ InnerCond1.get(), InnerCond2.get());
+ if (!InnerCond.isUsable())
+ return StmtError();
+
+ // Inner For incr-statement: ++.unroll_inner.iv
+ ExprResult InnerIncr = BuildUnaryOp(CurScope, LoopHelper.Inc->getExprLoc(),
+ UO_PreInc, MakeInnerRef());
+ if (!InnerIncr.isUsable())
+ return StmtError();
+
+ // Inner For statement.
+ SmallVector<Stmt *> InnerBodyStmts;
+ InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
+ InnerBodyStmts.push_back(Body);
+ CompoundStmt *InnerBody = CompoundStmt::Create(
+ Context, InnerBodyStmts, Body->getBeginLoc(), Body->getEndLoc());
+ ForStmt *InnerFor = new (Context)
+ ForStmt(Context, InnerInit.get(), InnerCond.get(), nullptr,
+ InnerIncr.get(), InnerBody, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+
+ // Unroll metadata for the inner loop.
+ // This needs to take into account the remainder portion of the unrolled loop,
+ // hence `unroll(full)` does not apply here, even though the LoopUnroll pass
+ // supports multiple loop exits. Instead, unroll using a factor equivalent to
+ // the maximum trip count, which will also generate a remainder loop. Just
+ // `unroll(enable)` (which could have been useful if the user has not
+ // specified a concrete factor; even though the outer loop cannot be
+ // influenced anymore, would avoid more code bloat than necessary) will refuse
+ // the loop because "Won't unroll; remainder loop could not be generated when
+ // assuming runtime trip count". Even if it did work, it must not choose a
+ // larger unroll factor than the maximum loop length, or it would always just
+ // execute the remainder loop.
+ LoopHintAttr *UnrollHintAttr =
+ LoopHintAttr::CreateImplicit(Context, LoopHintAttr::UnrollCount,
+ LoopHintAttr::Numeric, MakeFactorExpr());
+ AttributedStmt *InnerUnrolled =
+ AttributedStmt::Create(Context, StartLoc, {UnrollHintAttr}, InnerFor);
+
+ // Outer For init-statement: auto .unrolled.iv = 0
+ AddInitializerToDecl(
+ OuterIVDecl, ActOnIntegerConstant(LoopHelper.Init->getExprLoc(), 0).get(),
+ /*DirectInit=*/false);
+ StmtResult OuterInit = new (Context)
+ DeclStmt(DeclGroupRef(OuterIVDecl), OrigVarLocBegin, OrigVarLocEnd);
+ if (!OuterInit.isUsable())
+ return StmtError();
+
+ // Outer For cond-expression: .unrolled.iv < NumIterations
+ ExprResult OuterConde =
+ BuildBinOp(CurScope, LoopHelper.Cond->getExprLoc(), BO_LT, MakeOuterRef(),
+ MakeNumIterations());
+ if (!OuterConde.isUsable())
+ return StmtError();
+
+ // Outer For incr-statement: .unrolled.iv += Factor
+ ExprResult OuterIncr =
+ BuildBinOp(CurScope, LoopHelper.Inc->getExprLoc(), BO_AddAssign,
+ MakeOuterRef(), MakeFactorExpr());
+ if (!OuterIncr.isUsable())
+ return StmtError();
+
+ // Outer For statement.
+ ForStmt *OuterFor = new (Context)
+ ForStmt(Context, OuterInit.get(), OuterConde.get(), nullptr,
+ OuterIncr.get(), InnerUnrolled, LoopHelper.Init->getBeginLoc(),
+ LoopHelper.Init->getBeginLoc(), LoopHelper.Inc->getEndLoc());
+
+ return OMPUnrollDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
+ OuterFor, buildPreInits(Context, PreInits));
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -11940,6 +13151,18 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_detach:
Res = ActOnOpenMPDetachClause(Expr, StartLoc, LParenLoc, EndLoc);
break;
+ case OMPC_novariants:
+ Res = ActOnOpenMPNovariantsClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_nocontext:
+ Res = ActOnOpenMPNocontextClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_filter:
+ Res = ActOnOpenMPFilterClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
+ case OMPC_partial:
+ Res = ActOnOpenMPPartialClause(Expr, StartLoc, LParenLoc, EndLoc);
+ break;
case OMPC_device:
case OMPC_if:
case OMPC_default:
@@ -11960,6 +13183,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr,
case OMPC_untied:
case OMPC_mergeable:
case OMPC_threadprivate:
+ case OMPC_sizes:
case OMPC_allocate:
case OMPC_flush:
case OMPC_read:
@@ -12140,11 +13364,14 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_teams:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12216,12 +13443,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_end_declare_target:
case OMPD_teams:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12295,12 +13525,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12372,12 +13605,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12450,10 +13686,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12484,7 +13723,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_distribute_parallel_for_simd:
case OMPD_distribute:
case OMPD_distribute_simd:
- // Do not capture thread_limit-clause expressions.
+ // Do not capture dist_schedule-clause expressions.
break;
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
@@ -12527,19 +13766,22 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_ordered:
case OMPD_atomic:
case OMPD_target_teams:
case OMPD_requires:
- llvm_unreachable("Unexpected OpenMP directive with schedule clause");
+ llvm_unreachable("Unexpected OpenMP directive with dist_schedule clause");
case OMPD_unknown:
default:
llvm_unreachable("Unknown OpenMP directive");
@@ -12560,9 +13802,11 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel_for_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_dispatch:
CaptureRegion = OMPD_task;
break;
case OMPD_target_data:
+ case OMPD_interop:
// Do not capture device-clause expressions.
break;
case OMPD_teams_distribute_parallel_for:
@@ -12603,12 +13847,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12616,7 +13863,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_atomic:
case OMPD_distribute_simd:
case OMPD_requires:
- llvm_unreachable("Unexpected OpenMP directive with num_teams-clause");
+ llvm_unreachable("Unexpected OpenMP directive with device-clause");
case OMPD_unknown:
default:
llvm_unreachable("Unknown OpenMP directive");
@@ -12682,12 +13929,15 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_simd:
+ case OMPD_tile:
+ case OMPD_unroll:
case OMPD_for:
case OMPD_for_simd:
case OMPD_sections:
case OMPD_section:
case OMPD_single:
case OMPD_master:
+ case OMPD_masked:
case OMPD_critical:
case OMPD_taskgroup:
case OMPD_distribute:
@@ -12701,6 +13951,19 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
llvm_unreachable("Unknown OpenMP directive");
}
break;
+ case OMPC_novariants:
+ case OMPC_nocontext:
+ switch (DKind) {
+ case OMPD_dispatch:
+ CaptureRegion = OMPD_task;
+ break;
+ default:
+ llvm_unreachable("Unexpected OpenMP directive");
+ }
+ break;
+ case OMPC_filter:
+ // Do not capture filter-clause expressions.
+ break;
case OMPC_firstprivate:
case OMPC_lastprivate:
case OMPC_reduction:
@@ -12711,6 +13974,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPC_proc_bind:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_collapse:
case OMPC_private:
@@ -12950,17 +14214,33 @@ OMPClause *Sema::ActOnOpenMPNumThreadsClause(Expr *NumThreads,
ExprResult Sema::VerifyPositiveIntegerConstantInClause(Expr *E,
OpenMPClauseKind CKind,
- bool StrictlyPositive) {
+ bool StrictlyPositive,
+ bool SuppressExprDiags) {
if (!E)
return ExprError();
if (E->isValueDependent() || E->isTypeDependent() ||
E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
return E;
+
llvm::APSInt Result;
- ExprResult ICE =
- VerifyIntegerConstantExpression(E, &Result, /*FIXME*/ AllowFold);
+ ExprResult ICE;
+ if (SuppressExprDiags) {
+ // Use a custom diagnoser that suppresses 'note' diagnostics about the
+ // expression.
+ struct SuppressedDiagnoser : public Sema::VerifyICEDiagnoser {
+ SuppressedDiagnoser() : VerifyICEDiagnoser(/*Suppress=*/true) {}
+ Sema::SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
+ SourceLocation Loc) override {
+ llvm_unreachable("Diagnostic suppressed");
+ }
+ } Diagnoser;
+ ICE = VerifyIntegerConstantExpression(E, &Result, Diagnoser, AllowFold);
+ } else {
+ ICE = VerifyIntegerConstantExpression(E, &Result, /*FIXME*/ AllowFold);
+ }
if (ICE.isInvalid())
return ExprError();
+
if ((StrictlyPositive && !Result.isStrictlyPositive()) ||
(!StrictlyPositive && !Result.isNonNegative())) {
Diag(E->getExprLoc(), diag::err_omp_negative_expression_in_clause)
@@ -13144,6 +14424,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_collapse:
case OMPC_schedule:
@@ -13203,6 +14484,8 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_match:
case OMPC_nontemporal:
case OMPC_destroy:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
@@ -13275,10 +14558,21 @@ OMPClause *Sema::ActOnOpenMPProcBindClause(ProcBindKind Kind,
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_proc_bind,
/*First=*/unsigned(OMP_PROC_BIND_master),
- /*Last=*/5)
+ /*Last=*/
+ unsigned(LangOpts.OpenMP > 50
+ ? OMP_PROC_BIND_primary
+ : OMP_PROC_BIND_spread) +
+ 1)
<< getOpenMPClauseName(OMPC_proc_bind);
return nullptr;
}
+ if (Kind == OMP_PROC_BIND_primary && LangOpts.OpenMP < 51)
+ Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
+ << getListOfPossibleValues(OMPC_proc_bind,
+ /*First=*/unsigned(OMP_PROC_BIND_master),
+ /*Last=*/
+ unsigned(OMP_PROC_BIND_spread) + 1)
+ << getOpenMPClauseName(OMPC_proc_bind);
return new (Context)
OMPProcBindClause(Kind, KindKwLoc, StartLoc, LParenLoc, EndLoc);
}
@@ -13335,6 +14629,45 @@ OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
EndLoc);
}
+OMPClause *Sema::ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ for (Expr *SizeExpr : SizeExprs) {
+ ExprResult NumForLoopsResult = VerifyPositiveIntegerConstantInClause(
+ SizeExpr, OMPC_sizes, /*StrictlyPositive=*/true);
+ if (!NumForLoopsResult.isUsable())
+ return nullptr;
+ }
+
+ DSAStack->setAssociatedLoops(SizeExprs.size());
+ return OMPSizesClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ SizeExprs);
+}
+
+OMPClause *Sema::ActOnOpenMPFullClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return OMPFullClause::Create(Context, StartLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPPartialClause(Expr *FactorExpr,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ if (FactorExpr) {
+ // If an argument is specified, it must be a constant (or an unevaluated
+ // template expression).
+ ExprResult FactorResult = VerifyPositiveIntegerConstantInClause(
+ FactorExpr, OMPC_partial, /*StrictlyPositive=*/true);
+ if (FactorResult.isInvalid())
+ return nullptr;
+ FactorExpr = FactorResult.get();
+ }
+
+ return OMPPartialClause::Create(Context, StartLoc, LParenLoc, EndLoc,
+ FactorExpr);
+}
+
OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Argument, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -13382,6 +14715,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_collapse:
case OMPC_default:
@@ -13442,6 +14776,8 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_nontemporal:
case OMPC_order:
case OMPC_destroy:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
@@ -13630,13 +14966,22 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
Res = ActOnOpenMPDynamicAllocatorsClause(StartLoc, EndLoc);
break;
case OMPC_destroy:
- Res = ActOnOpenMPDestroyClause(StartLoc, EndLoc);
+ Res = ActOnOpenMPDestroyClause(/*InteropVar=*/nullptr, StartLoc,
+ /*LParenLoc=*/SourceLocation(),
+ /*VarLoc=*/SourceLocation(), EndLoc);
+ break;
+ case OMPC_full:
+ Res = ActOnOpenMPFullClause(StartLoc, EndLoc);
+ break;
+ case OMPC_partial:
+ Res = ActOnOpenMPPartialClause(nullptr, StartLoc, /*LParenLoc=*/{}, EndLoc);
break;
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_collapse:
case OMPC_schedule:
@@ -13680,6 +15025,8 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_match:
case OMPC_nontemporal:
case OMPC_order:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_inclusive:
case OMPC_exclusive:
@@ -13787,9 +15134,270 @@ OMPClause *Sema::ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
return new (Context) OMPDynamicAllocatorsClause(StartLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPDestroyClause(SourceLocation StartLoc,
+StmtResult Sema::ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // At least one action-clause must appear on a directive.
+ if (!hasClauses(Clauses, OMPC_init, OMPC_use, OMPC_destroy, OMPC_nowait)) {
+ StringRef Expected = "'init', 'use', 'destroy', or 'nowait'";
+ Diag(StartLoc, diag::err_omp_no_clause_for_directive)
+ << Expected << getOpenMPDirectiveName(OMPD_interop);
+ return StmtError();
+ }
+
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // A depend clause can only appear on the directive if a targetsync
+ // interop-type is present or the interop-var was initialized with
+ // the targetsync interop-type.
+
+ // If there is any 'init' clause diagnose if there is no 'init' clause with
+ // interop-type of 'targetsync'. Cases involving other directives cannot be
+ // diagnosed.
+ const OMPDependClause *DependClause = nullptr;
+ bool HasInitClause = false;
+ bool IsTargetSync = false;
+ for (const OMPClause *C : Clauses) {
+ if (IsTargetSync)
+ break;
+ if (const auto *InitClause = dyn_cast<OMPInitClause>(C)) {
+ HasInitClause = true;
+ if (InitClause->getIsTargetSync())
+ IsTargetSync = true;
+ } else if (const auto *DC = dyn_cast<OMPDependClause>(C)) {
+ DependClause = DC;
+ }
+ }
+ if (DependClause && HasInitClause && !IsTargetSync) {
+ Diag(DependClause->getBeginLoc(), diag::err_omp_interop_bad_depend_clause);
+ return StmtError();
+ }
+
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // Each interop-var may be specified for at most one action-clause of each
+ // interop construct.
+ llvm::SmallPtrSet<const VarDecl *, 4> InteropVars;
+ for (const OMPClause *C : Clauses) {
+ OpenMPClauseKind ClauseKind = C->getClauseKind();
+ const DeclRefExpr *DRE = nullptr;
+ SourceLocation VarLoc;
+
+ if (ClauseKind == OMPC_init) {
+ const auto *IC = cast<OMPInitClause>(C);
+ VarLoc = IC->getVarLoc();
+ DRE = dyn_cast_or_null<DeclRefExpr>(IC->getInteropVar());
+ } else if (ClauseKind == OMPC_use) {
+ const auto *UC = cast<OMPUseClause>(C);
+ VarLoc = UC->getVarLoc();
+ DRE = dyn_cast_or_null<DeclRefExpr>(UC->getInteropVar());
+ } else if (ClauseKind == OMPC_destroy) {
+ const auto *DC = cast<OMPDestroyClause>(C);
+ VarLoc = DC->getVarLoc();
+ DRE = dyn_cast_or_null<DeclRefExpr>(DC->getInteropVar());
+ }
+
+ if (!DRE)
+ continue;
+
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (!InteropVars.insert(VD->getCanonicalDecl()).second) {
+ Diag(VarLoc, diag::err_omp_interop_var_multiple_actions) << VD;
+ return StmtError();
+ }
+ }
+ }
+
+ return OMPInteropDirective::Create(Context, StartLoc, EndLoc, Clauses);
+}
+
+static bool isValidInteropVariable(Sema &SemaRef, Expr *InteropVarExpr,
+ SourceLocation VarLoc,
+ OpenMPClauseKind Kind) {
+ if (InteropVarExpr->isValueDependent() || InteropVarExpr->isTypeDependent() ||
+ InteropVarExpr->isInstantiationDependent() ||
+ InteropVarExpr->containsUnexpandedParameterPack())
+ return true;
+
+ const auto *DRE = dyn_cast<DeclRefExpr>(InteropVarExpr);
+ if (!DRE || !isa<VarDecl>(DRE->getDecl())) {
+ SemaRef.Diag(VarLoc, diag::err_omp_interop_variable_expected) << 0;
+ return false;
+ }
+
+ // Interop variable should be of type omp_interop_t.
+ bool HasError = false;
+ QualType InteropType;
+ LookupResult Result(SemaRef, &SemaRef.Context.Idents.get("omp_interop_t"),
+ VarLoc, Sema::LookupOrdinaryName);
+ if (SemaRef.LookupName(Result, SemaRef.getCurScope())) {
+ NamedDecl *ND = Result.getFoundDecl();
+ if (const auto *TD = dyn_cast<TypeDecl>(ND)) {
+ InteropType = QualType(TD->getTypeForDecl(), 0);
+ } else {
+ HasError = true;
+ }
+ } else {
+ HasError = true;
+ }
+
+ if (HasError) {
+ SemaRef.Diag(VarLoc, diag::err_omp_implied_type_not_found)
+ << "omp_interop_t";
+ return false;
+ }
+
+ QualType VarType = InteropVarExpr->getType().getUnqualifiedType();
+ if (!SemaRef.Context.hasSameType(InteropType, VarType)) {
+ SemaRef.Diag(VarLoc, diag::err_omp_interop_variable_wrong_type);
+ return false;
+ }
+
+ // OpenMP 5.1 [2.15.1, interop Construct, Restrictions]
+ // The interop-var passed to init or destroy must be non-const.
+ if ((Kind == OMPC_init || Kind == OMPC_destroy) &&
+ isConstNotMutableType(SemaRef, InteropVarExpr->getType())) {
+ SemaRef.Diag(VarLoc, diag::err_omp_interop_variable_expected)
+ << /*non-const*/ 1;
+ return false;
+ }
+ return true;
+}
+
+OMPClause *
+Sema::ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
+ bool IsTarget, bool IsTargetSync,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation VarLoc, SourceLocation EndLoc) {
+
+ if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_init))
+ return nullptr;
+
+ // Check prefer_type values. These foreign-runtime-id values are either
+ // string literals or constant integral expressions.
+ for (const Expr *E : PrefExprs) {
+ if (E->isValueDependent() || E->isTypeDependent() ||
+ E->isInstantiationDependent() || E->containsUnexpandedParameterPack())
+ continue;
+ if (E->isIntegerConstantExpr(Context))
+ continue;
+ if (isa<StringLiteral>(E))
+ continue;
+ Diag(E->getExprLoc(), diag::err_omp_interop_prefer_type);
+ return nullptr;
+ }
+
+ return OMPInitClause::Create(Context, InteropVar, PrefExprs, IsTarget,
+ IsTargetSync, StartLoc, LParenLoc, VarLoc,
+ EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
+
+ if (!isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_use))
+ return nullptr;
+
+ return new (Context)
+ OMPUseClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPDestroyClause(Expr *InteropVar,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
SourceLocation EndLoc) {
- return new (Context) OMPDestroyClause(StartLoc, EndLoc);
+ if (InteropVar &&
+ !isValidInteropVariable(*this, InteropVar, VarLoc, OMPC_destroy))
+ return nullptr;
+
+ return new (Context)
+ OMPDestroyClause(InteropVar, StartLoc, LParenLoc, VarLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNovariantsClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Condition;
+ Stmt *HelperValStmt = nullptr;
+ OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
+ if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
+ !Condition->isInstantiationDependent() &&
+ !Condition->containsUnexpandedParameterPack()) {
+ ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ if (Val.isInvalid())
+ return nullptr;
+
+ ValExpr = MakeFullExpr(Val.get()).get();
+
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ CaptureRegion = getOpenMPCaptureRegionForClause(DKind, OMPC_novariants,
+ LangOpts.OpenMP);
+ if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(Context, Captures);
+ }
+ }
+
+ return new (Context) OMPNovariantsClause(
+ ValExpr, HelperValStmt, CaptureRegion, StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPNocontextClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = Condition;
+ Stmt *HelperValStmt = nullptr;
+ OpenMPDirectiveKind CaptureRegion = OMPD_unknown;
+ if (!Condition->isValueDependent() && !Condition->isTypeDependent() &&
+ !Condition->isInstantiationDependent() &&
+ !Condition->containsUnexpandedParameterPack()) {
+ ExprResult Val = CheckBooleanCondition(StartLoc, Condition);
+ if (Val.isInvalid())
+ return nullptr;
+
+ ValExpr = MakeFullExpr(Val.get()).get();
+
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ CaptureRegion =
+ getOpenMPCaptureRegionForClause(DKind, OMPC_nocontext, LangOpts.OpenMP);
+ if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(Context, Captures);
+ }
+ }
+
+ return new (Context) OMPNocontextClause(ValExpr, HelperValStmt, CaptureRegion,
+ StartLoc, LParenLoc, EndLoc);
+}
+
+OMPClause *Sema::ActOnOpenMPFilterClause(Expr *ThreadID,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ Expr *ValExpr = ThreadID;
+ Stmt *HelperValStmt = nullptr;
+
+ OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
+ OpenMPDirectiveKind CaptureRegion =
+ getOpenMPCaptureRegionForClause(DKind, OMPC_filter, LangOpts.OpenMP);
+ if (CaptureRegion != OMPD_unknown && !CurContext->isDependentContext()) {
+ ValExpr = MakeFullExpr(ValExpr).get();
+ llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
+ ValExpr = tryBuildCapture(*this, ValExpr, Captures).get();
+ HelperValStmt = buildPreInits(Context, Captures);
+ }
+
+ return new (Context) OMPFilterClause(ValExpr, HelperValStmt, CaptureRegion,
+ StartLoc, LParenLoc, EndLoc);
}
OMPClause *Sema::ActOnOpenMPVarListClause(
@@ -13919,6 +15527,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
+ case OMPC_sizes:
case OMPC_allocator:
case OMPC_collapse:
case OMPC_default:
@@ -13961,6 +15570,8 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_match:
case OMPC_order:
case OMPC_destroy:
+ case OMPC_novariants:
+ case OMPC_nocontext:
case OMPC_detach:
case OMPC_uses_allocators:
default:
@@ -15087,6 +16698,21 @@ static bool checkOMPArraySectionConstantForReduction(
return true;
}
+static BinaryOperatorKind
+getRelatedCompoundReductionOp(BinaryOperatorKind BOK) {
+ if (BOK == BO_Add)
+ return BO_AddAssign;
+ if (BOK == BO_Mul)
+ return BO_MulAssign;
+ if (BOK == BO_And)
+ return BO_AndAssign;
+ if (BOK == BO_Or)
+ return BO_OrAssign;
+ if (BOK == BO_Xor)
+ return BO_XorAssign;
+ return BOK;
+}
+
static bool actOnOMPReductionKindClause(
Sema &S, DSAStackTy *Stack, OpenMPClauseKind ClauseKind,
ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc,
@@ -15433,7 +17059,8 @@ static bool actOnOMPReductionKindClause(
// (type of the variable or single array element).
PrivateTy = Context.getVariableArrayType(
Type,
- new (Context) OpaqueValueExpr(ELoc, Context.getSizeType(), VK_RValue),
+ new (Context)
+ OpaqueValueExpr(ELoc, Context.getSizeType(), VK_PRValue),
ArrayType::Normal, /*IndexTypeQuals=*/0, SourceRange());
} else if (!ASE && !OASE &&
Context.getAsArrayType(D->getType().getNonReferenceType())) {
@@ -15452,7 +17079,6 @@ static bool actOnOMPReductionKindClause(
auto *DRDRef = DeclareReductionRef.getAs<DeclRefExpr>();
auto *DRD = cast<OMPDeclareReductionDecl>(DRDRef->getDecl());
if (DRD->getInitializer()) {
- S.ActOnUninitializedDecl(PrivateVD);
Init = DRDRef;
RHSVD->setInit(DRDRef);
RHSVD->setInitStyle(VarDecl::CallInit);
@@ -15574,8 +17200,7 @@ static bool actOnOMPReductionKindClause(
}
if (RHSVD->isInvalidDecl())
continue;
- if (!RHSVD->hasInit() &&
- (DeclareReductionRef.isUnset() || !S.LangOpts.CPlusPlus)) {
+ if (!RHSVD->hasInit() && DeclareReductionRef.isUnset()) {
S.Diag(ELoc, diag::err_omp_reduction_id_not_compatible)
<< Type << ReductionIdRange;
bool IsDecl = !VD || VD->isThisDeclarationADefinition(Context) ==
@@ -15606,32 +17231,42 @@ static bool actOnOMPReductionKindClause(
QualType Params[] = {PtrRedTy, PtrRedTy};
QualType FnTy = Context.getFunctionType(Context.VoidTy, Params, EPI);
auto *OVE = new (Context) OpaqueValueExpr(
- ELoc, Context.getPointerType(FnTy), VK_RValue, OK_Ordinary,
+ ELoc, Context.getPointerType(FnTy), VK_PRValue, OK_Ordinary,
S.DefaultLvalueConversion(DeclareReductionRef.get()).get());
Expr *Args[] = {LHS.get(), RHS.get()};
ReductionOp =
- CallExpr::Create(Context, OVE, Args, Context.VoidTy, VK_RValue, ELoc,
+ CallExpr::Create(Context, OVE, Args, Context.VoidTy, VK_PRValue, ELoc,
S.CurFPFeatureOverrides());
} else {
- ReductionOp = S.BuildBinOp(
- Stack->getCurScope(), ReductionId.getBeginLoc(), BOK, LHSDRE, RHSDRE);
- if (ReductionOp.isUsable()) {
- if (BOK != BO_LT && BOK != BO_GT) {
- ReductionOp =
- S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
- BO_Assign, LHSDRE, ReductionOp.get());
- } else {
- auto *ConditionalOp = new (Context)
- ConditionalOperator(ReductionOp.get(), ELoc, LHSDRE, ELoc, RHSDRE,
- Type, VK_LValue, OK_Ordinary);
- ReductionOp =
- S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
- BO_Assign, LHSDRE, ConditionalOp);
+ BinaryOperatorKind CombBOK = getRelatedCompoundReductionOp(BOK);
+ if (Type->isRecordType() && CombBOK != BOK) {
+ Sema::TentativeAnalysisScope Trap(S);
+ ReductionOp =
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
+ CombBOK, LHSDRE, RHSDRE);
+ }
+ if (!ReductionOp.isUsable()) {
+ ReductionOp =
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(), BOK,
+ LHSDRE, RHSDRE);
+ if (ReductionOp.isUsable()) {
+ if (BOK != BO_LT && BOK != BO_GT) {
+ ReductionOp =
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
+ BO_Assign, LHSDRE, ReductionOp.get());
+ } else {
+ auto *ConditionalOp = new (Context)
+ ConditionalOperator(ReductionOp.get(), ELoc, LHSDRE, ELoc,
+ RHSDRE, Type, VK_LValue, OK_Ordinary);
+ ReductionOp =
+ S.BuildBinOp(Stack->getCurScope(), ReductionId.getBeginLoc(),
+ BO_Assign, LHSDRE, ConditionalOp);
+ }
}
- if (ReductionOp.isUsable())
- ReductionOp = S.ActOnFinishFullExpr(ReductionOp.get(),
- /*DiscardedValue*/ false);
}
+ if (ReductionOp.isUsable())
+ ReductionOp = S.ActOnFinishFullExpr(ReductionOp.get(),
+ /*DiscardedValue*/ false);
if (!ReductionOp.isUsable())
continue;
}
@@ -15664,7 +17299,7 @@ static bool actOnOMPReductionKindClause(
} else {
// Build temp array for prefix sum.
auto *Dim = new (S.Context)
- OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_PRValue);
QualType ArrayTy =
S.Context.getVariableArrayType(PrivateTy, Dim, ArrayType::Normal,
/*IndexTypeQuals=*/0, {ELoc, ELoc});
@@ -15677,7 +17312,7 @@ static bool actOnOMPReductionKindClause(
TempArrayElem =
S.DefaultFunctionArrayLvalueConversion(TempArrayRes.get());
auto *Idx = new (S.Context)
- OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_RValue);
+ OpaqueValueExpr(ELoc, S.Context.getSizeType(), VK_PRValue);
TempArrayElem = S.CreateBuiltinArraySubscriptExpr(TempArrayElem.get(),
ELoc, Idx, ELoc);
}
@@ -17232,6 +18867,14 @@ public:
Components.emplace_back(COCE, nullptr, IsNonContiguous);
return true;
}
+ bool VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ Expr *Source = E->getSourceExpr();
+ if (!Source) {
+ emitErrorMsg();
+ return false;
+ }
+ return Visit(Source);
+ }
bool VisitStmt(Stmt *) {
emitErrorMsg();
return false;
@@ -17311,7 +18954,9 @@ static bool checkMapConflicts(
ERange, CKind, &EnclosingExpr,
CurComponents](OMPClauseMappableExprCommon::MappableExprComponentListRef
StackComponents,
- OpenMPClauseKind) {
+ OpenMPClauseKind Kind) {
+ if (CKind == Kind && SemaRef.LangOpts.OpenMP >= 50)
+ return false;
assert(!StackComponents.empty() &&
"Map clause expression with no components!");
assert(StackComponents.back().getAssociatedDeclaration() == VD &&
@@ -17837,6 +19482,7 @@ static void checkMappableExpressionList(
return MC.getAssociatedDeclaration();
});
assert(I != CurComponents.end() && "Null decl on map clause.");
+ (void)I;
QualType Type;
auto *ASE = dyn_cast<ArraySubscriptExpr>(VE->IgnoreParens());
auto *OASE = dyn_cast<OMPArraySectionExpr>(VE->IgnoreParens());
@@ -17865,8 +19511,6 @@ static void checkMappableExpressionList(
DSAS, Type))
continue;
- Type = I->getAssociatedDeclaration()->getType().getNonReferenceType();
-
if (CKind == OMPC_map) {
// target enter data
// OpenMP [2.10.2, Restrictions, p. 99]
@@ -18351,8 +19995,15 @@ Sema::DeclGroupPtrTy Sema::ActOnOpenMPDeclareMapperDirective(
Diag(I->second, diag::note_previous_definition);
Invalid = true;
}
- auto *DMD = OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name,
- MapperType, VN, Clauses, PrevDMD);
+ // Build expressions for implicit maps of data members with 'default'
+ // mappers.
+ SmallVector<OMPClause *, 4> ClausesWithImplicit(Clauses.begin(),
+ Clauses.end());
+ if (LangOpts.OpenMP >= 50)
+ processImplicitMapsWithDefaultMappers(*this, DSAStack, ClausesWithImplicit);
+ auto *DMD =
+ OMPDeclareMapperDecl::Create(Context, DC, StartLoc, Name, MapperType, VN,
+ ClausesWithImplicit, PrevDMD);
if (S)
PushOnScopeChains(DMD, S);
else
@@ -18389,8 +20040,13 @@ Sema::ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType,
bool Sema::isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const {
assert(LangOpts.OpenMP && "Expected OpenMP mode.");
const Expr *Ref = DSAStack->getDeclareMapperVarRef();
- if (const auto *DRE = cast_or_null<DeclRefExpr>(Ref))
- return VD->getCanonicalDecl() == DRE->getDecl()->getCanonicalDecl();
+ if (const auto *DRE = cast_or_null<DeclRefExpr>(Ref)) {
+ if (VD->getCanonicalDecl() == DRE->getDecl()->getCanonicalDecl())
+ return true;
+ if (VD->isUsableInConstantExpressions(Context))
+ return true;
+ return false;
+ }
return true;
}
@@ -18731,7 +20387,8 @@ OMPClause *Sema::ActOnOpenMPDefaultmapClause(
OMPDefaultmapClause(StartLoc, LParenLoc, MLoc, KindLoc, EndLoc, Kind, M);
}
-bool Sema::ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc) {
+bool Sema::ActOnStartOpenMPDeclareTargetContext(
+ DeclareTargetContextInfo &DTCI) {
DeclContext *CurLexicalContext = getCurLexicalContext();
if (!CurLexicalContext->isFileContext() &&
!CurLexicalContext->isExternCContext() &&
@@ -18740,23 +20397,30 @@ bool Sema::ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc) {
!isa<ClassTemplateDecl>(CurLexicalContext) &&
!isa<ClassTemplatePartialSpecializationDecl>(CurLexicalContext) &&
!isa<ClassTemplateSpecializationDecl>(CurLexicalContext)) {
- Diag(Loc, diag::err_omp_region_not_file_context);
+ Diag(DTCI.Loc, diag::err_omp_region_not_file_context);
return false;
}
- DeclareTargetNesting.push_back(Loc);
+ DeclareTargetNesting.push_back(DTCI);
return true;
}
-void Sema::ActOnFinishOpenMPDeclareTargetDirective() {
+const Sema::DeclareTargetContextInfo
+Sema::ActOnOpenMPEndDeclareTargetDirective() {
assert(!DeclareTargetNesting.empty() &&
- "Unexpected ActOnFinishOpenMPDeclareTargetDirective");
- DeclareTargetNesting.pop_back();
+ "check isInOpenMPDeclareTargetContext() first!");
+ return DeclareTargetNesting.pop_back_val();
+}
+
+void Sema::ActOnFinishedOpenMPDeclareTargetContext(
+ DeclareTargetContextInfo &DTCI) {
+ for (auto &It : DTCI.ExplicitlyMapped)
+ ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT,
+ DTCI.DT);
}
-NamedDecl *
-Sema::lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
- const DeclarationNameInfo &Id,
- NamedDeclSetType &SameDirectiveDecls) {
+NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
+ CXXScopeSpec &ScopeSpec,
+ const DeclarationNameInfo &Id) {
LookupResult Lookup(*this, Id, LookupOrdinaryName);
LookupParsedName(Lookup, CurScope, &ScopeSpec, true);
@@ -18785,8 +20449,6 @@ Sema::lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
Diag(Id.getLoc(), diag::err_omp_invalid_target_decl) << Id.getName();
return nullptr;
}
- if (!SameDirectiveDecls.insert(cast<NamedDecl>(ND->getCanonicalDecl())))
- Diag(Id.getLoc(), diag::err_omp_declare_target_multiple) << Id.getName();
return ND;
}
@@ -18803,32 +20465,35 @@ void Sema::ActOnOpenMPDeclareTargetName(
(ND->isUsed(/*CheckUsedAttr=*/false) || ND->isReferenced()))
Diag(Loc, diag::warn_omp_declare_target_after_first_use);
+ // Explicit declare target lists have precedence.
+ const unsigned Level = -1;
+
auto *VD = cast<ValueDecl>(ND);
- Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
- OMPDeclareTargetDeclAttr::getDeviceType(VD);
- Optional<SourceLocation> AttrLoc = OMPDeclareTargetDeclAttr::getLocation(VD);
- if (DevTy.hasValue() && *DevTy != DT &&
- (DeclareTargetNesting.empty() ||
- *AttrLoc != DeclareTargetNesting.back())) {
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ OMPDeclareTargetDeclAttr::getActiveAttr(VD);
+ if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DT &&
+ ActiveAttr.getValue()->getLevel() == Level) {
Diag(Loc, diag::err_omp_device_type_mismatch)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DT)
- << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(*DevTy);
+ << OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(
+ ActiveAttr.getValue()->getDevType());
return;
}
- Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
- if (!Res || (!DeclareTargetNesting.empty() &&
- *AttrLoc == DeclareTargetNesting.back())) {
- auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, MT, DT, DeclareTargetNesting.size() + 1,
- SourceRange(Loc, Loc));
- ND->addAttr(A);
- if (ASTMutationListener *ML = Context.getASTMutationListener())
- ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
- checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Loc);
- } else if (*Res != MT) {
+ if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getMapType() != MT &&
+ ActiveAttr.getValue()->getLevel() == Level) {
Diag(Loc, diag::err_omp_declare_target_to_and_link) << ND;
+ return;
}
+
+ if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() == Level)
+ return;
+
+ auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(Context, MT, DT, Level,
+ SourceRange(Loc, Loc));
+ ND->addAttr(A);
+ if (ASTMutationListener *ML = Context.getASTMutationListener())
+ ML->DeclarationMarkedOpenMPDeclareTarget(ND, A);
+ checkDeclIsAllowedInOpenMPTarget(nullptr, ND, Loc);
}
static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
@@ -18842,8 +20507,6 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
(SemaRef.getCurLambda(/*IgnoreNonLambdaCapturingScope=*/true) ||
SemaRef.getCurBlock() || SemaRef.getCurCapturedRegion()) &&
VD->hasGlobalStorage()) {
- llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> MapTy =
- OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!MapTy || *MapTy != OMPDeclareTargetDeclAttr::MT_To) {
// OpenMP 5.0, 2.12.7 declare target Directive, Restrictions
// If a lambda declaration and definition appears between a
@@ -18907,15 +20570,19 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
if ((E || !VD->getType()->isIncompleteType()) &&
!checkValueDeclInTarget(SL, SR, *this, DSAStack, VD))
return;
- if (!E && !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
+ if (!E && isInOpenMPDeclareTargetContext()) {
// Checking declaration inside declare target region.
if (isa<VarDecl>(D) || isa<FunctionDecl>(D) ||
isa<FunctionTemplateDecl>(D)) {
+ llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
+ OMPDeclareTargetDeclAttr::getActiveAttr(VD);
+ unsigned Level = DeclareTargetNesting.size();
+ if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() >= Level)
+ return;
+ DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
auto *A = OMPDeclareTargetDeclAttr::CreateImplicit(
- Context, OMPDeclareTargetDeclAttr::MT_To,
- OMPDeclareTargetDeclAttr::DT_Any, DeclareTargetNesting.size(),
- SourceRange(DeclareTargetNesting.back(),
- DeclareTargetNesting.back()));
+ Context, OMPDeclareTargetDeclAttr::MT_To, DTCI.DT, Level,
+ SourceRange(DTCI.Loc, DTCI.Loc));
D->addAttr(A);
if (ASTMutationListener *ML = Context.getASTMutationListener())
ML->DeclarationMarkedOpenMPDeclareTarget(D, A);
diff --git a/clang/lib/Sema/SemaOverload.cpp b/clang/lib/Sema/SemaOverload.cpp
index 7fe7466725fa..0758fbb84107 100644
--- a/clang/lib/Sema/SemaOverload.cpp
+++ b/clang/lib/Sema/SemaOverload.cpp
@@ -3346,10 +3346,7 @@ IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
bool Usable = !Info.Constructor->isInvalidDecl() &&
S.isInitListConstructor(Info.Constructor);
if (Usable) {
- // If the first argument is (a reference to) the target type,
- // suppress conversions.
- bool SuppressUserConversions = isFirstArgumentCompatibleWithType(
- S.Context, Info.Constructor, ToType);
+ bool SuppressUserConversions = false;
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(Info.ConstructorTmpl, Info.FoundDecl,
/*ExplicitArgs*/ nullptr, From,
@@ -3473,14 +3470,18 @@ IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
/*AllowExplicit*/ true);
if (Usable) {
bool SuppressUserConversions = !ConstructorsOnly;
+ // C++20 [over.best.ics.general]/4.5:
+ // if the target is the first parameter of a constructor [of class
+ // X] and the constructor [...] is a candidate by [...] the second
+ // phase of [over.match.list] when the initializer list has exactly
+ // one element that is itself an initializer list, [...] and the
+ // conversion is to X or reference to cv X, user-defined conversion
+ // sequences are not cnosidered.
if (SuppressUserConversions && ListInitializing) {
- SuppressUserConversions = false;
- if (NumArgs == 1) {
- // If the first argument is (a reference to) the target type,
- // suppress conversions.
- SuppressUserConversions = isFirstArgumentCompatibleWithType(
- S.Context, Info.Constructor, ToType);
- }
+ SuppressUserConversions =
+ NumArgs == 1 && isa<InitListExpr>(Args[0]) &&
+ isFirstArgumentCompatibleWithType(S.Context, Info.Constructor,
+ ToType);
}
if (Info.ConstructorTmpl)
S.AddTemplateOverloadCandidate(
@@ -3706,7 +3707,7 @@ compareConversionFunctions(Sema &S, FunctionDecl *Function1,
CallOp->getType()->getAs<FunctionProtoType>();
CallingConv CallOpCC =
- CallOp->getType()->getAs<FunctionType>()->getCallConv();
+ CallOp->getType()->castAs<FunctionType>()->getCallConv();
CallingConv DefaultFree = S.Context.getDefaultCallingConvention(
CallOpProto->isVariadic(), /*IsCXXMethod=*/false);
CallingConv DefaultMember = S.Context.getDefaultCallingConvention(
@@ -3927,7 +3928,7 @@ getFixedEnumPromtion(Sema &S, const StandardConversionSequence &SCS) {
if (!FromType->isEnumeralType())
return FixedEnumPromotion::None;
- EnumDecl *Enum = FromType->getAs<EnumType>()->getDecl();
+ EnumDecl *Enum = FromType->castAs<EnumType>()->getDecl();
if (!Enum->isFixed())
return FixedEnumPromotion::None;
@@ -4106,7 +4107,7 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
}
}
- // In Microsoft mode, prefer an integral conversion to a
+ // In Microsoft mode (below 19.28), prefer an integral conversion to a
// floating-to-integral conversion if the integral conversion
// is between types of the same size.
// For example:
@@ -4118,7 +4119,9 @@ CompareStandardConversionSequences(Sema &S, SourceLocation Loc,
// }
// Here, MSVC will call f(int) instead of generating a compile error
// as clang will do in standard mode.
- if (S.getLangOpts().MSVCCompat && SCS1.Second == ICK_Integral_Conversion &&
+ if (S.getLangOpts().MSVCCompat &&
+ !S.getLangOpts().isCompatibleWithMSVC(LangOptions::MSVC2019_8) &&
+ SCS1.Second == ICK_Integral_Conversion &&
SCS2.Second == ICK_Floating_Integral &&
S.Context.getTypeSize(SCS1.getFromType()) ==
S.Context.getTypeSize(SCS1.getToType(2)))
@@ -5431,8 +5434,8 @@ Sema::PerformObjectArgumentInitialization(Expr *From,
DestType = ImplicitParamRecordType;
FromClassification = From->Classify(Context);
- // When performing member access on an rvalue, materialize a temporary.
- if (From->isRValue()) {
+ // When performing member access on a prvalue, materialize a temporary.
+ if (From->isPRValue()) {
From = CreateMaterializeTemporaryExpr(FromRecordType, From,
Method->getRefQualifier() !=
RefQualifierKind::RQ_RValue);
@@ -5631,12 +5634,8 @@ static ExprResult CheckConvertedConstantExpression(Sema &S, Expr *From,
// implicitly converted to type T, where the converted
// expression is a constant expression and the implicit conversion
// sequence contains only [... list of conversions ...].
- // C++1z [stmt.if]p2:
- // If the if statement is of the form if constexpr, the value of the
- // condition shall be a contextually converted constant expression of type
- // bool.
ImplicitConversionSequence ICS =
- CCE == Sema::CCEK_ConstexprIf || CCE == Sema::CCEK_ExplicitBool
+ CCE == Sema::CCEK_ExplicitBool
? TryContextuallyConvertToBool(S, From)
: TryCopyInitialization(S, From, T,
/*SuppressUserConversions=*/false,
@@ -6479,12 +6478,6 @@ void Sema::AddOverloadCandidate(
Candidate.DeductionFailure.Data = FailedAttr;
return;
}
-
- if (LangOpts.OpenCL && isOpenCLDisabledDecl(Function)) {
- Candidate.Viable = false;
- Candidate.FailureKind = ovl_fail_ext_disabled;
- return;
- }
}
ObjCMethodDecl *
@@ -7374,7 +7367,7 @@ void Sema::AddConversionCandidate(
ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
Context.getPointerType(Conversion->getType()),
CK_FunctionToPointerDecay, &ConversionRef,
- VK_RValue, FPOptionsOverride());
+ VK_PRValue, FPOptionsOverride());
QualType ConversionType = Conversion->getConversionType();
if (!isCompleteType(From->getBeginLoc(), ConversionType)) {
@@ -8469,7 +8462,7 @@ public:
// bool operator==(T, T);
// bool operator!=(T, T);
// R operator<=>(T, T)
- void addGenericBinaryPointerOrEnumeralOverloads() {
+ void addGenericBinaryPointerOrEnumeralOverloads(bool IsSpaceship) {
// C++ [over.match.oper]p3:
// [...]the built-in candidates include all of the candidate operator
// functions defined in 13.6 that, compared to the given operator, [...]
@@ -8528,6 +8521,8 @@ public:
// Don't add the same builtin candidate twice.
if (!AddedTypes.insert(S.Context.getCanonicalType(PtrTy)).second)
continue;
+ if (IsSpaceship && PtrTy->isFunctionPointerType())
+ continue;
QualType ParamTypes[2] = {PtrTy, PtrTy};
S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
@@ -8718,7 +8713,7 @@ public:
//
// where LR is the result of the usual arithmetic conversions
// between types L and R.
- void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) {
+ void addBinaryBitwiseArithmeticOverloads() {
if (!HasArithmeticOrEnumeralCandidateType)
return;
@@ -9224,18 +9219,20 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_EqualEqual:
case OO_ExclaimEqual:
OpBuilder.addEqualEqualOrNotEqualMemberPointerOrNullptrOverloads();
- LLVM_FALLTHROUGH;
+ OpBuilder.addGenericBinaryPointerOrEnumeralOverloads(/*IsSpaceship=*/false);
+ OpBuilder.addGenericBinaryArithmeticOverloads();
+ break;
case OO_Less:
case OO_Greater:
case OO_LessEqual:
case OO_GreaterEqual:
- OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryPointerOrEnumeralOverloads(/*IsSpaceship=*/false);
OpBuilder.addGenericBinaryArithmeticOverloads();
break;
case OO_Spaceship:
- OpBuilder.addGenericBinaryPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryPointerOrEnumeralOverloads(/*IsSpaceship=*/true);
OpBuilder.addThreeWayArithmeticOverloads();
break;
@@ -9244,7 +9241,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
case OO_Pipe:
case OO_LessLess:
case OO_GreaterGreater:
- OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ OpBuilder.addBinaryBitwiseArithmeticOverloads();
break;
case OO_Amp: // '&' is either unary or binary
@@ -9254,7 +9251,7 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
// operator '->', the built-in candidates set is empty.
break;
- OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ OpBuilder.addBinaryBitwiseArithmeticOverloads();
break;
case OO_Tilde:
@@ -9870,6 +9867,23 @@ bool clang::isBetterOverloadCandidate(
S.IdentifyCUDAPreference(Caller, Cand2.Function);
}
+ // General member function overloading is handled above, so this only handles
+ // constructors with address spaces.
+ // This only handles address spaces since C++ has no other
+ // qualifier that can be used with constructors.
+ const auto *CD1 = dyn_cast_or_null<CXXConstructorDecl>(Cand1.Function);
+ const auto *CD2 = dyn_cast_or_null<CXXConstructorDecl>(Cand2.Function);
+ if (CD1 && CD2) {
+ LangAS AS1 = CD1->getMethodQualifiers().getAddressSpace();
+ LangAS AS2 = CD2->getMethodQualifiers().getAddressSpace();
+ if (AS1 != AS2) {
+ if (Qualifiers::isAddressSpaceSupersetOf(AS2, AS1))
+ return true;
+ if (Qualifiers::isAddressSpaceSupersetOf(AS2, AS1))
+ return false;
+ }
+ }
+
return false;
}
@@ -10242,10 +10256,10 @@ static bool shouldSkipNotingLambdaConversionDecl(FunctionDecl *Fn) {
CXXMethodDecl *CallOp = RD->getLambdaCallOperator();
CallingConv CallOpCC =
- CallOp->getType()->getAs<FunctionType>()->getCallConv();
- QualType ConvRTy = ConvD->getType()->getAs<FunctionType>()->getReturnType();
+ CallOp->getType()->castAs<FunctionType>()->getCallConv();
+ QualType ConvRTy = ConvD->getType()->castAs<FunctionType>()->getReturnType();
CallingConv ConvToCC =
- ConvRTy->getPointeeType()->getAs<FunctionType>()->getCallConv();
+ ConvRTy->getPointeeType()->castAs<FunctionType>()->getCallConv();
return ConvToCC != CallOpCC;
}
@@ -10355,18 +10369,15 @@ void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
const PartialDiagnostic &PDiag) const {
S.Diag(CaretLoc, PDiag)
<< Ambiguous.getFromType() << Ambiguous.getToType();
- // FIXME: The note limiting machinery is borrowed from
- // OverloadCandidateSet::NoteCandidates; there's an opportunity for
- // refactoring here.
- const OverloadsShown ShowOverloads = S.Diags.getShowOverloads();
unsigned CandsShown = 0;
AmbiguousConversionSequence::const_iterator I, E;
for (I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
- if (CandsShown >= 4 && ShowOverloads == Ovl_Best)
+ if (CandsShown >= S.Diags.getNumOverloadCandidatesToShow())
break;
++CandsShown;
S.NoteOverloadCandidate(I->first, I->second);
}
+ S.Diags.overloadCandidatesShown(CandsShown);
if (I != E)
S.Diag(SourceLocation(), diag::note_ovl_too_many_candidates) << int(E - I);
}
@@ -11092,14 +11103,6 @@ static void DiagnoseFailedExplicitSpec(Sema &S, OverloadCandidate *Cand) {
<< (ES.getExpr() ? ES.getExpr()->getSourceRange() : SourceRange());
}
-static void DiagnoseOpenCLExtensionDisabled(Sema &S, OverloadCandidate *Cand) {
- FunctionDecl *Callee = Cand->Function;
-
- S.Diag(Callee->getLocation(),
- diag::note_ovl_candidate_disabled_by_extension)
- << S.getOpenCLExtensionsFromDeclExtMap(Callee);
-}
-
/// Generates a 'note' diagnostic for an overload candidate. We've
/// already generated a primary error at the call site.
///
@@ -11195,9 +11198,6 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
case ovl_fail_explicit:
return DiagnoseFailedExplicitSpec(S, Cand);
- case ovl_fail_ext_disabled:
- return DiagnoseOpenCLExtensionDisabled(S, Cand);
-
case ovl_fail_inhctor_slice:
// It's generally not interesting to note copy/move constructors here.
if (cast<CXXConstructorDecl>(Fn)->isCopyOrMoveConstructor())
@@ -11641,10 +11641,11 @@ bool OverloadCandidateSet::shouldDeferDiags(Sema &S, ArrayRef<Expr *> Args,
CompleteCandidates(S, OCD_AllCandidates, Args, OpLoc, [](auto &Cand) {
return (Cand.Viable == false &&
Cand.FailureKind == ovl_fail_bad_target) ||
- (Cand.Function->template hasAttr<CUDAHostAttr>() &&
+ (Cand.Function &&
+ Cand.Function->template hasAttr<CUDAHostAttr>() &&
Cand.Function->template hasAttr<CUDADeviceAttr>());
});
- DeferHint = WrongSidedCands.size();
+ DeferHint = !WrongSidedCands.empty();
}
return DeferHint;
}
@@ -11677,10 +11678,8 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
for (; I != E; ++I) {
OverloadCandidate *Cand = *I;
- // Set an arbitrary limit on the number of candidate functions we'll spam
- // the user with. FIXME: This limit should depend on details of the
- // candidate list.
- if (CandsShown >= 4 && ShowOverloads == Ovl_Best) {
+ if (CandsShown >= S.Diags.getNumOverloadCandidatesToShow() &&
+ ShowOverloads == Ovl_Best) {
break;
}
++CandsShown;
@@ -11709,6 +11708,10 @@ void OverloadCandidateSet::NoteCandidates(Sema &S, ArrayRef<Expr *> Args,
}
}
+ // Inform S.Diags that we've shown an overload set with N elements. This may
+ // inform the future value of S.Diags.getNumOverloadCandidatesToShow().
+ S.Diags.overloadCandidatesShown(CandsShown);
+
if (I != E)
S.Diag(OpLoc, diag::note_ovl_too_many_candidates,
shouldDeferDiags(S, Args, OpLoc))
@@ -12998,7 +13001,7 @@ bool Sema::buildOverloadedCallSet(Scope *S, Expr *Fn,
// lookup to instantiation time to be able to search into type dependent
// base classes.
CallExpr *CE =
- CallExpr::Create(Context, Fn, Args, Context.DependentTy, VK_RValue,
+ CallExpr::Create(Context, Fn, Args, Context.DependentTy, VK_PRValue,
RParenLoc, CurFPFeatureOverrides());
CE->markDependentForPostponedNameLookup();
*Result = CE;
@@ -13256,7 +13259,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Input->isTypeDependent()) {
if (Fns.empty())
return UnaryOperator::Create(Context, Input, Opc, Context.DependentTy,
- VK_RValue, OK_Ordinary, OpLoc, false,
+ VK_PRValue, OK_Ordinary, OpLoc, false,
CurFPFeatureOverrides());
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
@@ -13265,7 +13268,7 @@ Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
if (Fn.isInvalid())
return ExprError();
return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), ArgsArray,
- Context.DependentTy, VK_RValue, OpLoc,
+ Context.DependentTy, VK_PRValue, OpLoc,
CurFPFeatureOverrides());
}
@@ -13513,9 +13516,9 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
Context, Args[0], Args[1], Opc, Context.DependentTy, VK_LValue,
OK_Ordinary, OpLoc, CurFPFeatureOverrides(), Context.DependentTy,
Context.DependentTy);
- return BinaryOperator::Create(Context, Args[0], Args[1], Opc,
- Context.DependentTy, VK_RValue, OK_Ordinary,
- OpLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(
+ Context, Args[0], Args[1], Opc, Context.DependentTy, VK_PRValue,
+ OK_Ordinary, OpLoc, CurFPFeatureOverrides());
}
// FIXME: save results of ADL from here?
@@ -13528,7 +13531,7 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Fn.isInvalid())
return ExprError();
return CXXOperatorCallExpr::Create(Context, Op, Fn.get(), Args,
- Context.DependentTy, VK_RValue, OpLoc,
+ Context.DependentTy, VK_PRValue, OpLoc,
CurFPFeatureOverrides());
}
@@ -13716,6 +13719,15 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
if (Op == OO_Equal)
DiagnoseSelfMove(Args[0], Args[1], OpLoc);
+ if (ImplicitThis) {
+ QualType ThisType = Context.getPointerType(ImplicitThis->getType());
+ QualType ThisTypeFromDecl = Context.getPointerType(
+ cast<CXXMethodDecl>(FnDecl)->getThisObjectType());
+
+ CheckArgAlignment(OpLoc, FnDecl, "'this'", ThisType,
+ ThisTypeFromDecl);
+ }
+
checkCall(FnDecl, nullptr, ImplicitThis, ArgsArray,
isa<CXXMethodDecl>(FnDecl), OpLoc, TheCall->getSourceRange(),
VariadicDoesNotApply);
@@ -13809,6 +13821,8 @@ ExprResult Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
StringRef OpcStr = BinaryOperator::getOpcodeStr(Opc);
auto Cands = CandidateSet.CompleteCandidates(*this, OCD_AllCandidates,
Args, OpLoc);
+ DeferDiagsRAII DDR(*this,
+ CandidateSet.shouldDeferDiags(*this, Args, OpLoc));
if (Args[0]->getType()->isRecordType() &&
Opc >= BO_Assign && Opc <= BO_OrAssign) {
Diag(OpLoc, diag::err_ovl_no_viable_oper)
@@ -13998,7 +14012,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// Can't add any actual overloads yet
return CXXOperatorCallExpr::Create(Context, OO_Subscript, Fn.get(), Args,
- Context.DependentTy, VK_RValue, RLoc,
+ Context.DependentTy, VK_PRValue, RLoc,
CurFPFeatureOverrides());
}
@@ -14221,7 +14235,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
Type);
};
if (isa<CXXPseudoDestructorExpr>(NakedMemExpr))
- return CallExpr::Create(Context, MemExprE, Args, Context.VoidTy, VK_RValue,
+ return CallExpr::Create(Context, MemExprE, Args, Context.VoidTy, VK_PRValue,
RParenLoc, CurFPFeatureOverrides());
UnbridgedCastsSet UnbridgedCasts;
@@ -14606,7 +14620,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
// Record usage of conversion in an implicit cast.
Call = ImplicitCastExpr::Create(
Context, Call.get()->getType(), CK_UserDefinedConversion, Call.get(),
- nullptr, VK_RValue, CurFPFeatureOverrides());
+ nullptr, VK_PRValue, CurFPFeatureOverrides());
return BuildCallExpr(S, Call.get(), LParenLoc, Args, RParenLoc);
}
@@ -15074,7 +15088,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
(void)isCompleteType(UnOp->getOperatorLoc(), MemPtrType);
return UnaryOperator::Create(
- Context, SubExpr, UO_AddrOf, MemPtrType, VK_RValue, OK_Ordinary,
+ Context, SubExpr, UO_AddrOf, MemPtrType, VK_PRValue, OK_Ordinary,
UnOp->getOperatorLoc(), false, CurFPFeatureOverrides());
}
}
@@ -15083,10 +15097,10 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (SubExpr == UnOp->getSubExpr())
return UnOp;
- return UnaryOperator::Create(Context, SubExpr, UO_AddrOf,
- Context.getPointerType(SubExpr->getType()),
- VK_RValue, OK_Ordinary, UnOp->getOperatorLoc(),
- false, CurFPFeatureOverrides());
+ return UnaryOperator::Create(
+ Context, SubExpr, UO_AddrOf, Context.getPointerType(SubExpr->getType()),
+ VK_PRValue, OK_Ordinary, UnOp->getOperatorLoc(), false,
+ CurFPFeatureOverrides());
}
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
@@ -15141,7 +15155,7 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
valueKind = VK_LValue;
type = Fn->getType();
} else {
- valueKind = VK_RValue;
+ valueKind = VK_PRValue;
type = Context.BoundMemberTy;
}
diff --git a/clang/lib/Sema/SemaPseudoObject.cpp b/clang/lib/Sema/SemaPseudoObject.cpp
index d17599a6ed14..7fdb34905b61 100644
--- a/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/clang/lib/Sema/SemaPseudoObject.cpp
@@ -835,7 +835,7 @@ ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) {
// As a special case, if the method returns 'id', try to get
// a better type from the property.
- if (RefExpr->isExplicitProperty() && result.get()->isRValue()) {
+ if (RefExpr->isExplicitProperty() && result.get()->isPRValue()) {
QualType receiverType = RefExpr->getReceiverType(S.Context);
QualType propType = RefExpr->getExplicitProperty()
->getUsageType(receiverType);
@@ -1554,7 +1554,7 @@ ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
- VK_RValue, OK_Ordinary, opcLoc, false,
+ VK_PRValue, OK_Ordinary, opcLoc, false,
CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
@@ -1585,7 +1585,7 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
return BinaryOperator::Create(Context, LHS, RHS, opcode,
- Context.DependentTy, VK_RValue, OK_Ordinary,
+ Context.DependentTy, VK_PRValue, OK_Ordinary,
opcLoc, CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
diff --git a/clang/lib/Sema/SemaSYCL.cpp b/clang/lib/Sema/SemaSYCL.cpp
index af35052ee1e3..3b48a53efc0d 100644
--- a/clang/lib/Sema/SemaSYCL.cpp
+++ b/clang/lib/Sema/SemaSYCL.cpp
@@ -8,6 +8,7 @@
// This implements Semantic Analysis for SYCL constructs.
//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
#include "clang/Sema/Sema.h"
#include "clang/Sema/SemaDiagnostic.h"
@@ -47,3 +48,35 @@ bool Sema::checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee) {
return DiagKind != SemaDiagnosticBuilder::K_Immediate &&
DiagKind != SemaDiagnosticBuilder::K_ImmediateWithCallStack;
}
+
+// The SYCL kernel's 'object type' used for diagnostics and naming/mangling is
+// the first parameter to a sycl_kernel labeled function template. In SYCL1.2.1,
+// this was passed by value, and in SYCL2020, it is passed by reference.
+static QualType GetSYCLKernelObjectType(const FunctionDecl *KernelCaller) {
+ assert(KernelCaller->getNumParams() > 0 && "Insufficient kernel parameters");
+ QualType KernelParamTy = KernelCaller->getParamDecl(0)->getType();
+
+ // SYCL 2020 kernels are passed by reference.
+ if (KernelParamTy->isReferenceType())
+ return KernelParamTy->getPointeeType();
+
+ // SYCL 1.2.1
+ return KernelParamTy;
+}
+
+void Sema::AddSYCLKernelLambda(const FunctionDecl *FD) {
+ auto MangleCallback = [](ASTContext &Ctx,
+ const NamedDecl *ND) -> llvm::Optional<unsigned> {
+ if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
+ Ctx.AddSYCLKernelNamingDecl(RD);
+ // We always want to go into the lambda mangling (skipping the unnamed
+ // struct version), so make sure we return a value here.
+ return 1;
+ };
+
+ QualType Ty = GetSYCLKernelObjectType(FD);
+ std::unique_ptr<MangleContext> Ctx{ItaniumMangleContext::create(
+ Context, Context.getDiagnostics(), MangleCallback)};
+ llvm::raw_null_ostream Out;
+ Ctx->mangleTypeName(Ty, Out);
+}
diff --git a/clang/lib/Sema/SemaStmt.cpp b/clang/lib/Sema/SemaStmt.cpp
index b24a8ab110b2..3baccec2d7bb 100644
--- a/clang/lib/Sema/SemaStmt.cpp
+++ b/clang/lib/Sema/SemaStmt.cpp
@@ -10,17 +10,16 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Ownership.h"
-#include "clang/Sema/SemaInternal.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTDiagnostic.h"
#include "clang/AST/ASTLambda.h"
-#include "clang/AST/CharUnits.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/CharUnits.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
+#include "clang/AST/IgnoreExpr.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtObjC.h"
@@ -30,8 +29,10 @@
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
@@ -541,6 +542,12 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
return SubStmt;
}
+ ReservedIdentifierStatus Status = TheDecl->isReserved(getLangOpts());
+ if (Status != ReservedIdentifierStatus::NotReserved &&
+ !Context.getSourceManager().isInSystemHeader(IdentLoc))
+ Diag(IdentLoc, diag::warn_reserved_extern_symbol)
+ << TheDecl << static_cast<int>(Status);
+
// Otherwise, things are good. Fill in the declaration and return it.
LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
TheDecl->setStmt(LS);
@@ -555,12 +562,286 @@ Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
return LS;
}
-StmtResult Sema::ActOnAttributedStmt(SourceLocation AttrLoc,
- ArrayRef<const Attr*> Attrs,
+StmtResult Sema::BuildAttributedStmt(SourceLocation AttrsLoc,
+ ArrayRef<const Attr *> Attrs,
Stmt *SubStmt) {
- // Fill in the declaration and return it.
- AttributedStmt *LS = AttributedStmt::Create(Context, AttrLoc, Attrs, SubStmt);
- return LS;
+ // FIXME: this code should move when a planned refactoring around statement
+ // attributes lands.
+ for (const auto *A : Attrs) {
+ if (A->getKind() == attr::MustTail) {
+ if (!checkAndRewriteMustTailAttr(SubStmt, *A)) {
+ return SubStmt;
+ }
+ setFunctionHasMustTail();
+ }
+ }
+
+ return AttributedStmt::Create(Context, AttrsLoc, Attrs, SubStmt);
+}
+
+StmtResult Sema::ActOnAttributedStmt(const ParsedAttributesWithRange &Attrs,
+ Stmt *SubStmt) {
+ SmallVector<const Attr *, 1> SemanticAttrs;
+ ProcessStmtAttributes(SubStmt, Attrs, SemanticAttrs);
+ if (!SemanticAttrs.empty())
+ return BuildAttributedStmt(Attrs.Range.getBegin(), SemanticAttrs, SubStmt);
+ // If none of the attributes applied, that's fine, we can recover by
+ // returning the substatement directly instead of making an AttributedStmt
+ // with no attributes on it.
+ return SubStmt;
+}
+
+bool Sema::checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA) {
+ ReturnStmt *R = cast<ReturnStmt>(St);
+ Expr *E = R->getRetValue();
+
+ if (CurContext->isDependentContext() || (E && E->isInstantiationDependent()))
+ // We have to suspend our check until template instantiation time.
+ return true;
+
+ if (!checkMustTailAttr(St, MTA))
+ return false;
+
+ // FIXME: Replace Expr::IgnoreImplicitAsWritten() with this function.
+ // Currently it does not skip implicit constructors in an initialization
+ // context.
+ auto IgnoreImplicitAsWritten = [](Expr *E) -> Expr * {
+ return IgnoreExprNodes(E, IgnoreImplicitAsWrittenSingleStep,
+ IgnoreElidableImplicitConstructorSingleStep);
+ };
+
+ // Now that we have verified that 'musttail' is valid here, rewrite the
+ // return value to remove all implicit nodes, but retain parentheses.
+ R->setRetValue(IgnoreImplicitAsWritten(E));
+ return true;
+}
+
+bool Sema::checkMustTailAttr(const Stmt *St, const Attr &MTA) {
+ assert(!CurContext->isDependentContext() &&
+ "musttail cannot be checked from a dependent context");
+
+ // FIXME: Add Expr::IgnoreParenImplicitAsWritten() with this definition.
+ auto IgnoreParenImplicitAsWritten = [](const Expr *E) -> const Expr * {
+ return IgnoreExprNodes(const_cast<Expr *>(E), IgnoreParensSingleStep,
+ IgnoreImplicitAsWrittenSingleStep,
+ IgnoreElidableImplicitConstructorSingleStep);
+ };
+
+ const Expr *E = cast<ReturnStmt>(St)->getRetValue();
+ const auto *CE = dyn_cast_or_null<CallExpr>(IgnoreParenImplicitAsWritten(E));
+
+ if (!CE) {
+ Diag(St->getBeginLoc(), diag::err_musttail_needs_call) << &MTA;
+ return false;
+ }
+
+ if (const auto *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ if (EWC->cleanupsHaveSideEffects()) {
+ Diag(St->getBeginLoc(), diag::err_musttail_needs_trivial_args) << &MTA;
+ return false;
+ }
+ }
+
+ // We need to determine the full function type (including "this" type, if any)
+ // for both caller and callee.
+ struct FuncType {
+ enum {
+ ft_non_member,
+ ft_static_member,
+ ft_non_static_member,
+ ft_pointer_to_member,
+ } MemberType = ft_non_member;
+
+ QualType This;
+ const FunctionProtoType *Func;
+ const CXXMethodDecl *Method = nullptr;
+ } CallerType, CalleeType;
+
+ auto GetMethodType = [this, St, MTA](const CXXMethodDecl *CMD, FuncType &Type,
+ bool IsCallee) -> bool {
+ if (isa<CXXConstructorDecl, CXXDestructorDecl>(CMD)) {
+ Diag(St->getBeginLoc(), diag::err_musttail_structors_forbidden)
+ << IsCallee << isa<CXXDestructorDecl>(CMD);
+ if (IsCallee)
+ Diag(CMD->getBeginLoc(), diag::note_musttail_structors_forbidden)
+ << isa<CXXDestructorDecl>(CMD);
+ Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA;
+ return false;
+ }
+ if (CMD->isStatic())
+ Type.MemberType = FuncType::ft_static_member;
+ else {
+ Type.This = CMD->getThisType()->getPointeeType();
+ Type.MemberType = FuncType::ft_non_static_member;
+ }
+ Type.Func = CMD->getType()->castAs<FunctionProtoType>();
+ return true;
+ };
+
+ const auto *CallerDecl = dyn_cast<FunctionDecl>(CurContext);
+
+ // Find caller function signature.
+ if (!CallerDecl) {
+ int ContextType;
+ if (isa<BlockDecl>(CurContext))
+ ContextType = 0;
+ else if (isa<ObjCMethodDecl>(CurContext))
+ ContextType = 1;
+ else
+ ContextType = 2;
+ Diag(St->getBeginLoc(), diag::err_musttail_forbidden_from_this_context)
+ << &MTA << ContextType;
+ return false;
+ } else if (const auto *CMD = dyn_cast<CXXMethodDecl>(CurContext)) {
+ // Caller is a class/struct method.
+ if (!GetMethodType(CMD, CallerType, false))
+ return false;
+ } else {
+ // Caller is a non-method function.
+ CallerType.Func = CallerDecl->getType()->getAs<FunctionProtoType>();
+ }
+
+ const Expr *CalleeExpr = CE->getCallee()->IgnoreParens();
+ const auto *CalleeBinOp = dyn_cast<BinaryOperator>(CalleeExpr);
+ SourceLocation CalleeLoc = CE->getCalleeDecl()
+ ? CE->getCalleeDecl()->getBeginLoc()
+ : St->getBeginLoc();
+
+ // Find callee function signature.
+ if (const CXXMethodDecl *CMD =
+ dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl())) {
+ // Call is: obj.method(), obj->method(), functor(), etc.
+ if (!GetMethodType(CMD, CalleeType, true))
+ return false;
+ } else if (CalleeBinOp && CalleeBinOp->isPtrMemOp()) {
+ // Call is: obj->*method_ptr or obj.*method_ptr
+ const auto *MPT =
+ CalleeBinOp->getRHS()->getType()->castAs<MemberPointerType>();
+ CalleeType.This = QualType(MPT->getClass(), 0);
+ CalleeType.Func = MPT->getPointeeType()->castAs<FunctionProtoType>();
+ CalleeType.MemberType = FuncType::ft_pointer_to_member;
+ } else if (isa<CXXPseudoDestructorExpr>(CalleeExpr)) {
+ Diag(St->getBeginLoc(), diag::err_musttail_structors_forbidden)
+ << /* IsCallee = */ 1 << /* IsDestructor = */ 1;
+ Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA;
+ return false;
+ } else {
+ // Non-method function.
+ CalleeType.Func =
+ CalleeExpr->getType()->getPointeeType()->getAs<FunctionProtoType>();
+ }
+
+ // Both caller and callee must have a prototype (no K&R declarations).
+ if (!CalleeType.Func || !CallerType.Func) {
+ Diag(St->getBeginLoc(), diag::err_musttail_needs_prototype) << &MTA;
+ if (!CalleeType.Func && CE->getDirectCallee()) {
+ Diag(CE->getDirectCallee()->getBeginLoc(),
+ diag::note_musttail_fix_non_prototype);
+ }
+ if (!CallerType.Func)
+ Diag(CallerDecl->getBeginLoc(), diag::note_musttail_fix_non_prototype);
+ return false;
+ }
+
+ // Caller and callee must have matching calling conventions.
+ //
+ // Some calling conventions are physically capable of supporting tail calls
+ // even if the function types don't perfectly match. LLVM is currently too
+ // strict to allow this, but if LLVM added support for this in the future, we
+ // could exit early here and skip the remaining checks if the functions are
+ // using such a calling convention.
+ if (CallerType.Func->getCallConv() != CalleeType.Func->getCallConv()) {
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl()))
+ Diag(St->getBeginLoc(), diag::err_musttail_callconv_mismatch)
+ << true << ND->getDeclName();
+ else
+ Diag(St->getBeginLoc(), diag::err_musttail_callconv_mismatch) << false;
+ Diag(CalleeLoc, diag::note_musttail_callconv_mismatch)
+ << FunctionType::getNameForCallConv(CallerType.Func->getCallConv())
+ << FunctionType::getNameForCallConv(CalleeType.Func->getCallConv());
+ Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA;
+ return false;
+ }
+
+ if (CalleeType.Func->isVariadic() || CallerType.Func->isVariadic()) {
+ Diag(St->getBeginLoc(), diag::err_musttail_no_variadic) << &MTA;
+ return false;
+ }
+
+ // Caller and callee must match in whether they have a "this" parameter.
+ if (CallerType.This.isNull() != CalleeType.This.isNull()) {
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
+ Diag(St->getBeginLoc(), diag::err_musttail_member_mismatch)
+ << CallerType.MemberType << CalleeType.MemberType << true
+ << ND->getDeclName();
+ Diag(CalleeLoc, diag::note_musttail_callee_defined_here)
+ << ND->getDeclName();
+ } else
+ Diag(St->getBeginLoc(), diag::err_musttail_member_mismatch)
+ << CallerType.MemberType << CalleeType.MemberType << false;
+ Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA;
+ return false;
+ }
+
+ auto CheckTypesMatch = [this](FuncType CallerType, FuncType CalleeType,
+ PartialDiagnostic &PD) -> bool {
+ enum {
+ ft_different_class,
+ ft_parameter_arity,
+ ft_parameter_mismatch,
+ ft_return_type,
+ };
+
+ auto DoTypesMatch = [this, &PD](QualType A, QualType B,
+ unsigned Select) -> bool {
+ if (!Context.hasSimilarType(A, B)) {
+ PD << Select << A.getUnqualifiedType() << B.getUnqualifiedType();
+ return false;
+ }
+ return true;
+ };
+
+ if (!CallerType.This.isNull() &&
+ !DoTypesMatch(CallerType.This, CalleeType.This, ft_different_class))
+ return false;
+
+ if (!DoTypesMatch(CallerType.Func->getReturnType(),
+ CalleeType.Func->getReturnType(), ft_return_type))
+ return false;
+
+ if (CallerType.Func->getNumParams() != CalleeType.Func->getNumParams()) {
+ PD << ft_parameter_arity << CallerType.Func->getNumParams()
+ << CalleeType.Func->getNumParams();
+ return false;
+ }
+
+ ArrayRef<QualType> CalleeParams = CalleeType.Func->getParamTypes();
+ ArrayRef<QualType> CallerParams = CallerType.Func->getParamTypes();
+ size_t N = CallerType.Func->getNumParams();
+ for (size_t I = 0; I < N; I++) {
+ if (!DoTypesMatch(CalleeParams[I], CallerParams[I],
+ ft_parameter_mismatch)) {
+ PD << static_cast<int>(I) + 1;
+ return false;
+ }
+ }
+
+ return true;
+ };
+
+ PartialDiagnostic PD = PDiag(diag::note_musttail_mismatch);
+ if (!CheckTypesMatch(CallerType, CalleeType, PD)) {
+ if (const auto *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl()))
+ Diag(St->getBeginLoc(), diag::err_musttail_mismatch)
+ << true << ND->getDeclName();
+ else
+ Diag(St->getBeginLoc(), diag::err_musttail_mismatch) << false;
+ Diag(CalleeLoc, PD);
+ Diag(MTA.getLocation(), diag::note_tail_call_required) << &MTA;
+ return false;
+ }
+
+ return true;
}
namespace {
@@ -586,7 +867,7 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Cond = ConditionResult(
*this, nullptr,
MakeFullExpr(new (Context) OpaqueValueExpr(SourceLocation(),
- Context.BoolTy, VK_RValue),
+ Context.BoolTy, VK_PRValue),
IfLoc),
false);
@@ -825,8 +1106,8 @@ static void checkCaseValue(Sema &S, SourceLocation Loc, const llvm::APSInt &Val,
// type versus "switch expression cannot have this value". Use proper
// IntRange checking rather than just looking at the unpromoted type here.
if (ConvVal != Val)
- S.Diag(Loc, diag::warn_case_value_overflow) << Val.toString(10)
- << ConvVal.toString(10);
+ S.Diag(Loc, diag::warn_case_value_overflow) << toString(Val, 10)
+ << toString(ConvVal, 10);
}
}
@@ -1051,12 +1332,12 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (PrevString == CurrString)
Diag(CaseVals[i].second->getLHS()->getBeginLoc(),
diag::err_duplicate_case)
- << (PrevString.empty() ? StringRef(CaseValStr) : PrevString);
+ << (PrevString.empty() ? CaseValStr.str() : PrevString);
else
Diag(CaseVals[i].second->getLHS()->getBeginLoc(),
diag::err_duplicate_case_differing_expr)
- << (PrevString.empty() ? StringRef(CaseValStr) : PrevString)
- << (CurrString.empty() ? StringRef(CaseValStr) : CurrString)
+ << (PrevString.empty() ? CaseValStr.str() : PrevString)
+ << (CurrString.empty() ? CaseValStr.str() : CurrString)
<< CaseValStr;
Diag(CaseVals[i - 1].second->getLHS()->getBeginLoc(),
@@ -1151,7 +1432,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
if (OverlapStmt) {
// If we have a duplicate, report it.
Diag(CR->getLHS()->getBeginLoc(), diag::err_duplicate_case)
- << OverlapVal.toString(10);
+ << toString(OverlapVal, 10);
Diag(OverlapStmt->getLHS()->getBeginLoc(),
diag::note_duplicate_case_prev);
// FIXME: We really want to remove the bogus case stmt from the
@@ -1167,7 +1448,7 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// TODO: it would be nice if we printed enums as enums, chars as
// chars, etc.
Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition)
- << ConstantCondValue.toString(10)
+ << toString(ConstantCondValue, 10)
<< CondExpr->getSourceRange();
}
@@ -1180,7 +1461,8 @@ Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
// If switch has default case, then ignore it.
if (!CaseListIsErroneous && !CaseListIsIncomplete && !HasConstantCond &&
- ET && ET->getDecl()->isCompleteDefinition()) {
+ ET && ET->getDecl()->isCompleteDefinition() &&
+ !empty(ET->getDecl()->enumerators())) {
const EnumDecl *ED = ET->getDecl();
EnumValsTy EnumVals;
@@ -1990,7 +2272,7 @@ Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
// If the type contained 'auto', deduce the 'auto' to 'id'.
if (FirstType->getContainedAutoType()) {
OpaqueValueExpr OpaqueId(D->getLocation(), Context.getObjCIdType(),
- VK_RValue);
+ VK_PRValue);
Expr *DeducedInit = &OpaqueId;
if (DeduceAutoType(D->getTypeSourceInfo(), DeducedInit, FirstType) ==
DAR_Failed)
@@ -3000,6 +3282,12 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
// C99 6.8.6.2p1: A break shall appear only in or as a loop body.
return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
}
+ if (S->getFlags() & Scope::ConditionVarScope) {
+ // We cannot 'continue;' from within a statement expression in the
+ // initializer of a condition variable because we would jump past the
+ // initialization of that variable.
+ return StmtError(Diag(ContinueLoc, diag::err_continue_from_cond_var_init));
+ }
CheckJumpOutOfSEHFinally(*this, ContinueLoc, *S);
return new (Context) ContinueStmt(ContinueLoc);
@@ -3020,295 +3308,205 @@ Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
return new (Context) BreakStmt(BreakLoc);
}
-/// Determine whether the given expression is a candidate for
-/// copy elision in either a return statement or a throw expression.
-///
-/// \param ReturnType If we're determining the copy elision candidate for
-/// a return statement, this is the return type of the function. If we're
-/// determining the copy elision candidate for a throw expression, this will
-/// be a NULL type.
+/// Determine whether the given expression might be move-eligible or
+/// copy-elidable in either a (co_)return statement or throw expression,
+/// without considering function return type, if applicable.
///
-/// \param E The expression being returned from the function or block, or
-/// being thrown.
+/// \param E The expression being returned from the function or block,
+/// being thrown, or being co_returned from a coroutine. This expression
+/// might be modified by the implementation.
///
-/// \param CESK Whether we allow function parameters or
-/// id-expressions that could be moved out of the function to be considered NRVO
-/// candidates. C++ prohibits these for NRVO itself, but we re-use this logic to
-/// determine whether we should try to move as part of a return or throw (which
-/// does allow function parameters).
+/// \param ForceCXX2b Overrides detection of current language mode
+/// and uses the rules for C++2b.
///
-/// \returns The NRVO candidate variable, if the return statement may use the
-/// NRVO, or NULL if there is no such candidate.
-VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType, Expr *E,
- CopyElisionSemanticsKind CESK) {
+/// \returns An aggregate which contains the Candidate and isMoveEligible
+/// and isCopyElidable methods. If Candidate is non-null, it means
+/// isMoveEligible() would be true under the most permissive language standard.
+Sema::NamedReturnInfo Sema::getNamedReturnInfo(Expr *&E,
+ SimplerImplicitMoveMode Mode) {
+ if (!E)
+ return NamedReturnInfo();
// - in a return statement in a function [where] ...
// ... the expression is the name of a non-volatile automatic object ...
- DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
+ const auto *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
if (!DR || DR->refersToEnclosingVariableOrCapture())
- return nullptr;
- VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ return NamedReturnInfo();
+ const auto *VD = dyn_cast<VarDecl>(DR->getDecl());
if (!VD)
- return nullptr;
-
- if (isCopyElisionCandidate(ReturnType, VD, CESK))
- return VD;
- return nullptr;
+ return NamedReturnInfo();
+ NamedReturnInfo Res = getNamedReturnInfo(VD);
+ if (Res.Candidate && !E->isXValue() &&
+ (Mode == SimplerImplicitMoveMode::ForceOn ||
+ (Mode != SimplerImplicitMoveMode::ForceOff &&
+ getLangOpts().CPlusPlus2b))) {
+ E = ImplicitCastExpr::Create(Context, VD->getType().getNonReferenceType(),
+ CK_NoOp, E, nullptr, VK_XValue,
+ FPOptionsOverride());
+ }
+ return Res;
}
-bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
- CopyElisionSemanticsKind CESK) {
- QualType VDType = VD->getType();
+/// Determine whether the given NRVO candidate variable is move-eligible or
+/// copy-elidable, without considering function return type.
+///
+/// \param VD The NRVO candidate variable.
+///
+/// \returns An aggregate which contains the Candidate and isMoveEligible
+/// and isCopyElidable methods. If Candidate is non-null, it means
+/// isMoveEligible() would be true under the most permissive language standard.
+Sema::NamedReturnInfo Sema::getNamedReturnInfo(const VarDecl *VD) {
+ NamedReturnInfo Info{VD, NamedReturnInfo::MoveEligibleAndCopyElidable};
+
+ // C++20 [class.copy.elision]p3:
// - in a return statement in a function with ...
- // ... a class return type ...
- if (!ReturnType.isNull() && !ReturnType->isDependentType()) {
- if (!ReturnType->isRecordType())
- return false;
- // ... the same cv-unqualified type as the function return type ...
- // When considering moving this expression out, allow dissimilar types.
- if (!(CESK & CES_AllowDifferentTypes) && !VDType->isDependentType() &&
- !Context.hasSameUnqualifiedType(ReturnType, VDType))
- return false;
- }
+ // (other than a function ... parameter)
+ if (VD->getKind() == Decl::ParmVar)
+ Info.S = NamedReturnInfo::MoveEligible;
+ else if (VD->getKind() != Decl::Var)
+ return NamedReturnInfo();
- // ...object (other than a function or catch-clause parameter)...
- if (VD->getKind() != Decl::Var &&
- !((CESK & CES_AllowParameters) && VD->getKind() == Decl::ParmVar))
- return false;
- if (!(CESK & CES_AllowExceptionVariables) && VD->isExceptionVariable())
- return false;
+ // (other than ... a catch-clause parameter)
+ if (VD->isExceptionVariable())
+ Info.S = NamedReturnInfo::MoveEligible;
// ...automatic...
- if (!VD->hasLocalStorage()) return false;
-
- // Return false if VD is a __block variable. We don't want to implicitly move
- // out of a __block variable during a return because we cannot assume the
- // variable will no longer be used.
- if (VD->hasAttr<BlocksAttr>()) return false;
+ if (!VD->hasLocalStorage())
+ return NamedReturnInfo();
- // ...non-volatile...
- if (VD->getType().isVolatileQualified())
- return false;
+ // We don't want to implicitly move out of a __block variable during a return
+ // because we cannot assume the variable will no longer be used.
+ if (VD->hasAttr<BlocksAttr>())
+ return NamedReturnInfo();
- if (CESK & CES_AllowDifferentTypes)
- return true;
+ QualType VDType = VD->getType();
+ if (VDType->isObjectType()) {
+ // C++17 [class.copy.elision]p3:
+ // ...non-volatile automatic object...
+ if (VDType.isVolatileQualified())
+ return NamedReturnInfo();
+ } else if (VDType->isRValueReferenceType()) {
+ // C++20 [class.copy.elision]p3:
+ // ...either a non-volatile object or an rvalue reference to a non-volatile
+ // object type...
+ QualType VDReferencedType = VDType.getNonReferenceType();
+ if (VDReferencedType.isVolatileQualified() ||
+ !VDReferencedType->isObjectType())
+ return NamedReturnInfo();
+ Info.S = NamedReturnInfo::MoveEligible;
+ } else {
+ return NamedReturnInfo();
+ }
// Variables with higher required alignment than their type's ABI
// alignment cannot use NRVO.
- if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() &&
- Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType()))
- return false;
+ if (!VD->hasDependentAlignment() &&
+ Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VDType))
+ Info.S = NamedReturnInfo::MoveEligible;
- return true;
+ return Info;
}
-/// Try to perform the initialization of a potentially-movable value,
-/// which is the operand to a return or throw statement.
-///
-/// This routine implements C++14 [class.copy]p32, which attempts to treat
-/// returned lvalues as rvalues in certain cases (to prefer move construction),
-/// then falls back to treating them as lvalues if that failed.
-///
-/// \param ConvertingConstructorsOnly If true, follow [class.copy]p32 and reject
-/// resolutions that find non-constructors, such as derived-to-base conversions
-/// or `operator T()&&` member functions. If false, do consider such
-/// conversion sequences.
+/// Updates given NamedReturnInfo's move-eligible and
+/// copy-elidable statuses, considering the function
+/// return type criteria as applicable to return statements.
///
-/// \param Res We will fill this in if move-initialization was possible.
-/// If move-initialization is not possible, such that we must fall back to
-/// treating the operand as an lvalue, we will leave Res in its original
-/// invalid state.
+/// \param Info The NamedReturnInfo object to update.
///
-/// \returns Whether we need to do the second overload resolution. If the first
-/// overload resolution fails, or if the first overload resolution succeeds but
-/// the selected constructor/operator doesn't match the additional criteria, we
-/// need to do the second overload resolution.
-static bool TryMoveInitialization(Sema &S, const InitializedEntity &Entity,
- const VarDecl *NRVOCandidate,
- QualType ResultType, Expr *&Value,
- bool ConvertingConstructorsOnly,
- bool IsDiagnosticsCheck, ExprResult &Res) {
- ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
- CK_NoOp, Value, VK_XValue, FPOptionsOverride());
-
- Expr *InitExpr = &AsRvalue;
-
- InitializationKind Kind = InitializationKind::CreateCopy(
- Value->getBeginLoc(), Value->getBeginLoc());
-
- InitializationSequence Seq(S, Entity, Kind, InitExpr);
-
- bool NeedSecondOverloadResolution = true;
- if (!Seq &&
- (IsDiagnosticsCheck || Seq.getFailedOverloadResult() != OR_Deleted)) {
- return NeedSecondOverloadResolution;
- }
-
- for (const InitializationSequence::Step &Step : Seq.steps()) {
- if (Step.Kind != InitializationSequence::SK_ConstructorInitialization &&
- Step.Kind != InitializationSequence::SK_UserConversion)
- continue;
+/// \param ReturnType This is the return type of the function.
+/// \returns The copy elision candidate, in case the initial return expression
+/// was copy elidable, or nullptr otherwise.
+const VarDecl *Sema::getCopyElisionCandidate(NamedReturnInfo &Info,
+ QualType ReturnType) {
+ if (!Info.Candidate)
+ return nullptr;
- FunctionDecl *FD = Step.Function.Function;
- if (ConvertingConstructorsOnly) {
- if (isa<CXXConstructorDecl>(FD)) {
- // C++14 [class.copy]p32:
- // [...] If the first overload resolution fails or was not performed,
- // or if the type of the first parameter of the selected constructor
- // is not an rvalue reference to the object's type (possibly
- // cv-qualified), overload resolution is performed again, considering
- // the object as an lvalue.
- const RValueReferenceType *RRefType =
- FD->getParamDecl(0)->getType()->getAs<RValueReferenceType>();
- if (!RRefType)
- break;
- if (!S.Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
- NRVOCandidate->getType()))
- break;
- } else {
- continue;
- }
- } else {
- if (isa<CXXConstructorDecl>(FD)) {
- // Check that overload resolution selected a constructor taking an
- // rvalue reference. If it selected an lvalue reference, then we
- // didn't need to cast this thing to an rvalue in the first place.
- if (!isa<RValueReferenceType>(FD->getParamDecl(0)->getType()))
- break;
- } else if (isa<CXXMethodDecl>(FD)) {
- // Check that overload resolution selected a conversion operator
- // taking an rvalue reference.
- if (cast<CXXMethodDecl>(FD)->getRefQualifier() != RQ_RValue)
- break;
- } else {
- continue;
- }
- }
+ auto invalidNRVO = [&] {
+ Info = NamedReturnInfo();
+ return nullptr;
+ };
- NeedSecondOverloadResolution = false;
- // Promote "AsRvalue" to the heap, since we now need this
- // expression node to persist.
- Value =
- ImplicitCastExpr::Create(S.Context, Value->getType(), CK_NoOp, Value,
- nullptr, VK_XValue, FPOptionsOverride());
+ // If we got a non-deduced auto ReturnType, we are in a dependent context and
+ // there is no point in allowing copy elision since we won't have it deduced
+ // by the point the VardDecl is instantiated, which is the last chance we have
+ // of deciding if the candidate is really copy elidable.
+ if ((ReturnType->getTypeClass() == Type::TypeClass::Auto &&
+ ReturnType->isCanonicalUnqualified()) ||
+ ReturnType->isSpecificBuiltinType(BuiltinType::Dependent))
+ return invalidNRVO();
+
+ if (!ReturnType->isDependentType()) {
+ // - in a return statement in a function with ...
+ // ... a class return type ...
+ if (!ReturnType->isRecordType())
+ return invalidNRVO();
- // Complete type-checking the initialization of the return type
- // using the constructor we found.
- Res = Seq.Perform(S, Entity, Kind, Value);
+ QualType VDType = Info.Candidate->getType();
+ // ... the same cv-unqualified type as the function return type ...
+ // When considering moving this expression out, allow dissimilar types.
+ if (!VDType->isDependentType() &&
+ !Context.hasSameUnqualifiedType(ReturnType, VDType))
+ Info.S = NamedReturnInfo::MoveEligible;
}
+ return Info.isCopyElidable() ? Info.Candidate : nullptr;
+}
- return NeedSecondOverloadResolution;
+/// Verify that the initialization sequence that was picked for the
+/// first overload resolution is permissible under C++98.
+///
+/// Reject (possibly converting) contructors not taking an rvalue reference,
+/// or user conversion operators which are not ref-qualified.
+static bool
+VerifyInitializationSequenceCXX98(const Sema &S,
+ const InitializationSequence &Seq) {
+ const auto *Step = llvm::find_if(Seq.steps(), [](const auto &Step) {
+ return Step.Kind == InitializationSequence::SK_ConstructorInitialization ||
+ Step.Kind == InitializationSequence::SK_UserConversion;
+ });
+ if (Step != Seq.step_end()) {
+ const auto *FD = Step->Function.Function;
+ if (isa<CXXConstructorDecl>(FD)
+ ? !FD->getParamDecl(0)->getType()->isRValueReferenceType()
+ : cast<CXXMethodDecl>(FD)->getRefQualifier() == RQ_None)
+ return false;
+ }
+ return true;
}
/// Perform the initialization of a potentially-movable value, which
/// is the result of return value.
///
-/// This routine implements C++14 [class.copy]p32, which attempts to treat
-/// returned lvalues as rvalues in certain cases (to prefer move construction),
-/// then falls back to treating them as lvalues if that failed.
-ExprResult
-Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
- const VarDecl *NRVOCandidate,
- QualType ResultType,
- Expr *Value,
- bool AllowNRVO) {
- // C++14 [class.copy]p32:
- // When the criteria for elision of a copy/move operation are met, but not for
- // an exception-declaration, and the object to be copied is designated by an
- // lvalue, or when the expression in a return statement is a (possibly
- // parenthesized) id-expression that names an object with automatic storage
- // duration declared in the body or parameter-declaration-clause of the
- // innermost enclosing function or lambda-expression, overload resolution to
- // select the constructor for the copy is first performed as if the object
- // were designated by an rvalue.
- ExprResult Res = ExprError();
- bool NeedSecondOverloadResolution = true;
-
- if (AllowNRVO) {
- bool AffectedByCWG1579 = false;
-
- if (!NRVOCandidate) {
- NRVOCandidate = getCopyElisionCandidate(ResultType, Value, CES_Default);
- if (NRVOCandidate &&
- !getDiagnostics().isIgnored(diag::warn_return_std_move_in_cxx11,
- Value->getExprLoc())) {
- const VarDecl *NRVOCandidateInCXX11 =
- getCopyElisionCandidate(ResultType, Value, CES_FormerDefault);
- AffectedByCWG1579 = (!NRVOCandidateInCXX11);
- }
- }
-
- if (NRVOCandidate) {
- NeedSecondOverloadResolution = TryMoveInitialization(
- *this, Entity, NRVOCandidate, ResultType, Value, true, false, Res);
- }
-
- if (!NeedSecondOverloadResolution && AffectedByCWG1579) {
- QualType QT = NRVOCandidate->getType();
- if (QT.getNonReferenceType().getUnqualifiedType().isTriviallyCopyableType(
- Context)) {
- // Adding 'std::move' around a trivially copyable variable is probably
- // pointless. Don't suggest it.
- } else {
- // Common cases for this are returning unique_ptr<Derived> from a
- // function of return type unique_ptr<Base>, or returning T from a
- // function of return type Expected<T>. This is totally fine in a
- // post-CWG1579 world, but was not fine before.
- assert(!ResultType.isNull());
- SmallString<32> Str;
- Str += "std::move(";
- Str += NRVOCandidate->getDeclName().getAsString();
- Str += ")";
- Diag(Value->getExprLoc(), diag::warn_return_std_move_in_cxx11)
- << Value->getSourceRange() << NRVOCandidate->getDeclName()
- << ResultType << QT;
- Diag(Value->getExprLoc(), diag::note_add_std_move_in_cxx11)
- << FixItHint::CreateReplacement(Value->getSourceRange(), Str);
- }
- } else if (NeedSecondOverloadResolution &&
- !getDiagnostics().isIgnored(diag::warn_return_std_move,
- Value->getExprLoc())) {
- const VarDecl *FakeNRVOCandidate =
- getCopyElisionCandidate(QualType(), Value, CES_AsIfByStdMove);
- if (FakeNRVOCandidate) {
- QualType QT = FakeNRVOCandidate->getType();
- if (QT->isLValueReferenceType()) {
- // Adding 'std::move' around an lvalue reference variable's name is
- // dangerous. Don't suggest it.
- } else if (QT.getNonReferenceType()
- .getUnqualifiedType()
- .isTriviallyCopyableType(Context)) {
- // Adding 'std::move' around a trivially copyable variable is probably
- // pointless. Don't suggest it.
- } else {
- ExprResult FakeRes = ExprError();
- Expr *FakeValue = Value;
- TryMoveInitialization(*this, Entity, FakeNRVOCandidate, ResultType,
- FakeValue, false, true, FakeRes);
- if (!FakeRes.isInvalid()) {
- bool IsThrow =
- (Entity.getKind() == InitializedEntity::EK_Exception);
- SmallString<32> Str;
- Str += "std::move(";
- Str += FakeNRVOCandidate->getDeclName().getAsString();
- Str += ")";
- Diag(Value->getExprLoc(), diag::warn_return_std_move)
- << Value->getSourceRange()
- << FakeNRVOCandidate->getDeclName() << IsThrow;
- Diag(Value->getExprLoc(), diag::note_add_std_move)
- << FixItHint::CreateReplacement(Value->getSourceRange(), Str);
- }
- }
- }
+/// This routine implements C++20 [class.copy.elision]p3, which attempts to
+/// treat returned lvalues as rvalues in certain cases (to prefer move
+/// construction), then falls back to treating them as lvalues if that failed.
+ExprResult Sema::PerformMoveOrCopyInitialization(
+ const InitializedEntity &Entity, const NamedReturnInfo &NRInfo, Expr *Value,
+ bool SupressSimplerImplicitMoves) {
+ if ((!getLangOpts().CPlusPlus2b || SupressSimplerImplicitMoves) &&
+ NRInfo.isMoveEligible()) {
+ ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack, Value->getType(),
+ CK_NoOp, Value, VK_XValue, FPOptionsOverride());
+ Expr *InitExpr = &AsRvalue;
+ auto Kind = InitializationKind::CreateCopy(Value->getBeginLoc(),
+ Value->getBeginLoc());
+ InitializationSequence Seq(*this, Entity, Kind, InitExpr);
+ auto Res = Seq.getFailedOverloadResult();
+ if ((Res == OR_Success || Res == OR_Deleted) &&
+ (getLangOpts().CPlusPlus11 ||
+ VerifyInitializationSequenceCXX98(*this, Seq))) {
+ // Promote "AsRvalue" to the heap, since we now need this
+ // expression node to persist.
+ Value =
+ ImplicitCastExpr::Create(Context, Value->getType(), CK_NoOp, Value,
+ nullptr, VK_XValue, FPOptionsOverride());
+ // Complete type-checking the initialization of the return type
+ // using the constructor we found.
+ return Seq.Perform(*this, Entity, Kind, Value);
}
}
-
// Either we didn't meet the criteria for treating an lvalue as an rvalue,
// above, or overload resolution failed. Either way, we need to try
// (again) now with the return value expression as written.
- if (NeedSecondOverloadResolution)
- Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
-
- return Res;
+ return PerformCopyInitialization(Entity, SourceLocation(), Value);
}
/// Determine whether the declared return type of the specified function
@@ -3322,8 +3520,10 @@ static bool hasDeducedReturnType(FunctionDecl *FD) {
/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
/// for capturing scopes.
///
-StmtResult
-Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
+ Expr *RetValExp,
+ NamedReturnInfo &NRInfo,
+ bool SupressSimplerImplicitMoves) {
// If this is the first return we've seen, infer the return type.
// [expr.prim.lambda]p4 in C++11; block literals follow the same rules.
CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
@@ -3402,7 +3602,7 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
if (CurCap->ReturnType.isNull())
CurCap->ReturnType = FnRetType;
}
- assert(!FnRetType.isNull());
+ const VarDecl *NRVOCandidate = getCopyElisionCandidate(NRInfo, FnRetType);
if (auto *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
if (CurBlock->FunctionType->castAs<FunctionType>()->getNoReturnAttr()) {
@@ -3425,7 +3625,6 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// Otherwise, verify that this result type matches the previous one. We are
// pickier with blocks than for normal functions because we don't have GCC
// compatibility to worry about here.
- const VarDecl *NRVOCandidate = nullptr;
if (FnRetType->isDependentType()) {
// Delay processing for now. TODO: there are lots of dependent
// types we can conclusively prove aren't void.
@@ -3453,20 +3652,16 @@ Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization.
// the C version of which boils down to CheckSingleAssignmentConstraints.
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
- InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
- FnRetType,
- NRVOCandidate != nullptr);
- ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
- FnRetType, RetValExp);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(
+ ReturnLoc, FnRetType, NRVOCandidate != nullptr);
+ ExprResult Res = PerformMoveOrCopyInitialization(
+ Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves);
if (Res.isInvalid()) {
// FIXME: Cleanup temporaries here, anyway?
return StmtError();
}
RetValExp = Res.get();
CheckReturnValExpr(RetValExp, FnRetType, ReturnLoc);
- } else {
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
}
if (RetValExp) {
@@ -3670,13 +3865,37 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
return R;
}
+static bool CheckSimplerImplicitMovesMSVCWorkaround(const Sema &S,
+ const Expr *E) {
+ if (!E || !S.getLangOpts().CPlusPlus2b || !S.getLangOpts().MSVCCompat)
+ return false;
+ const Decl *D = E->getReferencedDeclOfCallee();
+ if (!D || !S.SourceMgr.isInSystemHeader(D->getLocation()))
+ return false;
+ for (const DeclContext *DC = D->getDeclContext(); DC; DC = DC->getParent()) {
+ if (DC->isStdNamespace())
+ return true;
+ }
+ return false;
+}
+
StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// Check for unexpanded parameter packs.
if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
return StmtError();
+ // HACK: We supress simpler implicit move here in msvc compatibility mode
+ // just as a temporary work around, as the MSVC STL has issues with
+ // this change.
+ bool SupressSimplerImplicitMoves =
+ CheckSimplerImplicitMovesMSVCWorkaround(*this, RetValExp);
+ NamedReturnInfo NRInfo = getNamedReturnInfo(
+ RetValExp, SupressSimplerImplicitMoves ? SimplerImplicitMoveMode::ForceOff
+ : SimplerImplicitMoveMode::Normal);
+
if (isa<CapturingScopeInfo>(getCurFunction()))
- return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
+ return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp, NRInfo,
+ SupressSimplerImplicitMoves);
QualType FnRetType;
QualType RelatedRetType;
@@ -3748,6 +3967,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
}
}
}
+ const VarDecl *NRVOCandidate = getCopyElisionCandidate(NRInfo, FnRetType);
bool HasDependentReturnType = FnRetType->isDependentType();
@@ -3854,8 +4074,6 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
/* NRVOCandidate=*/nullptr);
} else {
assert(RetValExp || HasDependentReturnType);
- const VarDecl *NRVOCandidate = nullptr;
-
QualType RetType = RelatedRetType.isNull() ? FnRetType : RelatedRetType;
// C99 6.8.6.4p3(136): The return statement is not an assignment. The
@@ -3864,15 +4082,12 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// In C++ the return statement is handled via a copy initialization,
// the C version of which boils down to CheckSingleAssignmentConstraints.
- if (RetValExp)
- NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, CES_Strict);
if (!HasDependentReturnType && !RetValExp->isTypeDependent()) {
// we have a non-void function with an expression, continue checking
- InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
- RetType,
- NRVOCandidate != nullptr);
- ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
- RetType, RetValExp);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(
+ ReturnLoc, RetType, NRVOCandidate != nullptr);
+ ExprResult Res = PerformMoveOrCopyInitialization(
+ Entity, NRInfo, RetValExp, SupressSimplerImplicitMoves);
if (Res.isInvalid()) {
// FIXME: Clean up temporaries here anyway?
return StmtError();
diff --git a/clang/lib/Sema/SemaStmtAsm.cpp b/clang/lib/Sema/SemaStmtAsm.cpp
index 3b631bf747c6..243d0b921cd7 100644
--- a/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/clang/lib/Sema/SemaStmtAsm.cpp
@@ -228,7 +228,7 @@ getClobberConflictLocation(MultiExprArg Exprs, StringLiteral **Constraints,
StringRef Clobber = Clobbers[i]->getString();
// We only check registers, therefore we don't check cc and memory
// clobbers
- if (Clobber == "cc" || Clobber == "memory")
+ if (Clobber == "cc" || Clobber == "memory" || Clobber == "unwind")
continue;
Clobber = Target.getNormalizedGCCRegisterName(Clobber, true);
// Go over the output's registers we collected
@@ -405,7 +405,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
if (!Info.isValidAsmImmediate(IntResult))
return StmtError(Diag(InputExpr->getBeginLoc(),
diag::err_invalid_asm_value_for_constraint)
- << IntResult.toString(10)
+ << toString(IntResult, 10)
<< Info.getConstraintStr()
<< InputExpr->getSourceRange());
}
@@ -453,6 +453,8 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
<< Info.getConstraintStr();
}
+ Optional<SourceLocation> UnwindClobberLoc;
+
// Check that the clobbers are valid.
for (unsigned i = 0; i != NumClobbers; i++) {
StringLiteral *Literal = Clobbers[i];
@@ -468,6 +470,19 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
NumInputs, Names, Constraints, Exprs.data(), AsmString,
NumClobbers, Clobbers, NumLabels, RParenLoc);
}
+
+ if (Clobber == "unwind") {
+ UnwindClobberLoc = Literal->getBeginLoc();
+ }
+ }
+
+ // Using unwind clobber and asm-goto together is not supported right now.
+ if (UnwindClobberLoc && NumLabels > 0) {
+ targetDiag(*UnwindClobberLoc, diag::err_asm_unwind_and_goto);
+ return new (Context)
+ GCCAsmStmt(Context, AsmLoc, IsSimple, IsVolatile, NumOutputs, NumInputs,
+ Names, Constraints, Exprs.data(), AsmString, NumClobbers,
+ Clobbers, NumLabels, RParenLoc);
}
GCCAsmStmt *NS =
@@ -720,7 +735,7 @@ void Sema::FillInlineAsmIdentifierInfo(Expr *Res,
Expr::EvalResult Eval;
if (T->isFunctionType() || T->isDependentType())
return Info.setLabel(Res);
- if (Res->isRValue()) {
+ if (Res->isPRValue()) {
bool IsEnum = isa<clang::EnumType>(T);
if (DeclRefExpr *DRE = dyn_cast<clang::DeclRefExpr>(Res))
if (DRE->getDecl()->getKind() == Decl::EnumConstant)
diff --git a/clang/lib/Sema/SemaStmtAttr.cpp b/clang/lib/Sema/SemaStmtAttr.cpp
index 8031aa6b0ece..4f2977f89ce1 100644
--- a/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/clang/lib/Sema/SemaStmtAttr.cpp
@@ -26,14 +26,12 @@ using namespace sema;
static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
FallThroughAttr Attr(S.Context, A);
- if (!isa<NullStmt>(St)) {
+ if (isa<SwitchCase>(St)) {
S.Diag(A.getRange().getBegin(), diag::err_fallthrough_attr_wrong_target)
- << Attr.getSpelling() << St->getBeginLoc();
- if (isa<SwitchCase>(St)) {
- SourceLocation L = S.getLocForEndOfToken(Range.getEnd());
- S.Diag(L, diag::note_fallthrough_insert_semi_fixit)
- << FixItHint::CreateInsertion(L, ";");
- }
+ << A << St->getBeginLoc();
+ SourceLocation L = S.getLocForEndOfToken(Range.getEnd());
+ S.Diag(L, diag::note_fallthrough_insert_semi_fixit)
+ << FixItHint::CreateInsertion(L, ";");
return nullptr;
}
auto *FnScope = S.getCurFunction();
@@ -54,11 +52,6 @@ static Attr *handleFallThroughAttr(Sema &S, Stmt *St, const ParsedAttr &A,
static Attr *handleSuppressAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
- if (A.getNumArgs() < 1) {
- S.Diag(A.getLoc(), diag::err_attribute_too_few_arguments) << A << 1;
- return nullptr;
- }
-
std::vector<StringRef> DiagnosticIdentifiers;
for (unsigned I = 0, E = A.getNumArgs(); I != E; ++I) {
StringRef RuleName;
@@ -88,10 +81,10 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
PragmaNameLoc->Ident->getName())
.Default("clang loop");
- if (St->getStmtClass() != Stmt::DoStmtClass &&
- St->getStmtClass() != Stmt::ForStmtClass &&
- St->getStmtClass() != Stmt::CXXForRangeStmtClass &&
- St->getStmtClass() != Stmt::WhileStmtClass) {
+ // This could be handled automatically by adding a Subjects definition in
+ // Attr.td, but that would make the diagnostic behavior worse in this case
+ // because the user spells this attribute as a pragma.
+ if (!isa<DoStmt, ForStmt, CXXForRangeStmt, WhileStmt>(St)) {
std::string Pragma = "#pragma " + std::string(PragmaName);
S.Diag(St->getBeginLoc(), diag::err_pragma_loop_precedes_nonloop) << Pragma;
return nullptr;
@@ -205,9 +198,6 @@ public:
static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
NoMergeAttr NMA(S.Context, A);
- if (S.CheckAttrNoArgs(A))
- return nullptr;
-
CallExprFinder CEF(S, St);
if (!CEF.foundCallExpr()) {
@@ -219,6 +209,12 @@ static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) NoMergeAttr(S.Context, A);
}
+static Attr *handleMustTailAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ // Validation is in Sema::ActOnAttributedStmt().
+ return ::new (S.Context) MustTailAttr(S.Context, A);
+}
+
static Attr *handleLikely(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
@@ -237,9 +233,22 @@ static Attr *handleUnlikely(Sema &S, Stmt *St, const ParsedAttr &A,
return ::new (S.Context) UnlikelyAttr(S.Context, A);
}
+#define WANT_STMT_MERGE_LOGIC
+#include "clang/Sema/AttrParsedAttrImpl.inc"
+#undef WANT_STMT_MERGE_LOGIC
+
static void
CheckForIncompatibleAttributes(Sema &S,
const SmallVectorImpl<const Attr *> &Attrs) {
+ // The vast majority of attributed statements will only have one attribute
+ // on them, so skip all of the checking in the common case.
+ if (Attrs.size() < 2)
+ return;
+
+ // First, check for the easy cases that are table-generated for us.
+ if (!DiagnoseMutualExclusions(S, Attrs))
+ return;
+
// There are 6 categories of loop hints attributes: vectorize, interleave,
// unroll, unroll_and_jam, pipeline and distribute. Except for distribute they
// come in two variants: a state form and a numeric form. The state form
@@ -342,32 +351,6 @@ CheckForIncompatibleAttributes(Sema &S,
<< CategoryState.NumericAttr->getDiagnosticName(Policy);
}
}
-
- // C++20 [dcl.attr.likelihood]p1 The attribute-token likely shall not appear
- // in an attribute-specifier-seq that contains the attribute-token unlikely.
- const LikelyAttr *Likely = nullptr;
- const UnlikelyAttr *Unlikely = nullptr;
- for (const auto *I : Attrs) {
- if (const auto *Attr = dyn_cast<LikelyAttr>(I)) {
- if (Unlikely) {
- S.Diag(Attr->getLocation(), diag::err_attributes_are_not_compatible)
- << Attr << Unlikely << Attr->getRange();
- S.Diag(Unlikely->getLocation(), diag::note_conflicting_attribute)
- << Unlikely->getRange();
- return;
- }
- Likely = Attr;
- } else if (const auto *Attr = dyn_cast<UnlikelyAttr>(I)) {
- if (Likely) {
- S.Diag(Attr->getLocation(), diag::err_attributes_are_not_compatible)
- << Attr << Likely << Attr->getRange();
- S.Diag(Likely->getLocation(), diag::note_conflicting_attribute)
- << Likely->getRange();
- return;
- }
- Unlikely = Attr;
- }
- }
}
static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
@@ -377,17 +360,8 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
// opencl_unroll_hint can have 0 arguments (compiler
// determines unrolling factor) or 1 argument (the unroll factor provided
// by the user).
-
- unsigned NumArgs = A.getNumArgs();
-
- if (NumArgs > 1) {
- S.Diag(A.getLoc(), diag::err_attribute_too_many_arguments) << A << 1;
- return nullptr;
- }
-
unsigned UnrollFactor = 0;
-
- if (NumArgs == 1) {
+ if (A.getNumArgs() == 1) {
Expr *E = A.getArgAsExpr(0);
Optional<llvm::APSInt> ArgVal;
@@ -398,28 +372,42 @@ static Attr *handleOpenCLUnrollHint(Sema &S, Stmt *St, const ParsedAttr &A,
}
int Val = ArgVal->getSExtValue();
-
if (Val <= 0) {
S.Diag(A.getRange().getBegin(),
diag::err_attribute_requires_positive_integer)
<< A << /* positive */ 0;
return nullptr;
}
- UnrollFactor = Val;
+ UnrollFactor = static_cast<unsigned>(Val);
}
- return OpenCLUnrollHintAttr::CreateImplicit(S.Context, UnrollFactor);
+ return ::new (S.Context) OpenCLUnrollHintAttr(S.Context, A, UnrollFactor);
}
static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
- switch (A.getKind()) {
- case ParsedAttr::UnknownAttribute:
+ if (A.isInvalid() || A.getKind() == ParsedAttr::IgnoredAttribute)
+ return nullptr;
+
+ // Unknown attributes are automatically warned on. Target-specific attributes
+ // which do not apply to the current target architecture are treated as
+ // though they were unknown attributes.
+ const TargetInfo *Aux = S.Context.getAuxTargetInfo();
+ if (A.getKind() == ParsedAttr::UnknownAttribute ||
+ !(A.existsInTarget(S.Context.getTargetInfo()) ||
+ (S.Context.getLangOpts().SYCLIsDevice && Aux &&
+ A.existsInTarget(*Aux)))) {
S.Diag(A.getLoc(), A.isDeclspecAttribute()
? (unsigned)diag::warn_unhandled_ms_attribute_ignored
: (unsigned)diag::warn_unknown_attribute_ignored)
<< A << A.getRange();
return nullptr;
+ }
+
+ if (S.checkCommonAttributeFeatures(St, A))
+ return nullptr;
+
+ switch (A.getKind()) {
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
case ParsedAttr::AT_LoopHint:
@@ -430,32 +418,29 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleSuppressAttr(S, St, A, Range);
case ParsedAttr::AT_NoMerge:
return handleNoMergeAttr(S, St, A, Range);
+ case ParsedAttr::AT_MustTail:
+ return handleMustTailAttr(S, St, A, Range);
case ParsedAttr::AT_Likely:
return handleLikely(S, St, A, Range);
case ParsedAttr::AT_Unlikely:
return handleUnlikely(S, St, A, Range);
default:
- // if we're here, then we parsed a known attribute, but didn't recognize
- // it as a statement attribute => it is declaration attribute
+ // N.B., ClangAttrEmitter.cpp emits a diagnostic helper that ensures a
+ // declaration attribute is not written on a statement, but this code is
+ // needed for attributes in Attr.td that do not list any subjects.
S.Diag(A.getRange().getBegin(), diag::err_decl_attribute_invalid_on_stmt)
<< A << St->getBeginLoc();
return nullptr;
}
}
-StmtResult Sema::ProcessStmtAttributes(Stmt *S,
- const ParsedAttributesView &AttrList,
- SourceRange Range) {
- SmallVector<const Attr*, 8> Attrs;
- for (const ParsedAttr &AL : AttrList) {
- if (Attr *a = ProcessStmtAttribute(*this, S, AL, Range))
- Attrs.push_back(a);
+void Sema::ProcessStmtAttributes(Stmt *S,
+ const ParsedAttributesWithRange &InAttrs,
+ SmallVectorImpl<const Attr *> &OutAttrs) {
+ for (const ParsedAttr &AL : InAttrs) {
+ if (const Attr *A = ProcessStmtAttribute(*this, S, AL, InAttrs.Range))
+ OutAttrs.push_back(A);
}
- CheckForIncompatibleAttributes(*this, Attrs);
-
- if (Attrs.empty())
- return S;
-
- return ActOnAttributedStmt(Range.getBegin(), Attrs, S);
+ CheckForIncompatibleAttributes(*this, OutAttrs);
}
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 12880b95b9c6..175388198324 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -1105,8 +1105,17 @@ bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstr,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc) {
- ConceptDecl *CD =
- cast<ConceptDecl>(TypeConstr->Template.get().getAsTemplateDecl());
+ return BuildTypeConstraint(SS, TypeConstr, ConstrainedParameter, EllipsisLoc,
+ false);
+}
+
+bool Sema::BuildTypeConstraint(const CXXScopeSpec &SS,
+ TemplateIdAnnotation *TypeConstr,
+ TemplateTypeParmDecl *ConstrainedParameter,
+ SourceLocation EllipsisLoc,
+ bool AllowUnexpandedPack) {
+ TemplateName TN = TypeConstr->Template.get();
+ ConceptDecl *CD = cast<ConceptDecl>(TN.getAsTemplateDecl());
// C++2a [temp.param]p4:
// [...] The concept designated by a type-constraint shall be a type
@@ -1126,15 +1135,24 @@ bool Sema::ActOnTypeConstraint(const CXXScopeSpec &SS,
return true;
}
+ DeclarationNameInfo ConceptName(DeclarationName(TypeConstr->Name),
+ TypeConstr->TemplateNameLoc);
+
TemplateArgumentListInfo TemplateArgs;
if (TypeConstr->LAngleLoc.isValid()) {
TemplateArgs =
makeTemplateArgumentListInfo(*this, *TypeConstr);
+
+ if (EllipsisLoc.isInvalid() && !AllowUnexpandedPack) {
+ for (TemplateArgumentLoc Arg : TemplateArgs.arguments()) {
+ if (DiagnoseUnexpandedParameterPack(Arg, UPPC_TypeConstraint))
+ return true;
+ }
+ }
}
return AttachTypeConstraint(
SS.isSet() ? SS.getWithLocInContext(Context) : NestedNameSpecifierLoc(),
- DeclarationNameInfo(DeclarationName(TypeConstr->Name),
- TypeConstr->TemplateNameLoc), CD,
+ ConceptName, CD,
TypeConstr->LAngleLoc.isValid() ? &TemplateArgs : nullptr,
ConstrainedParameter, EllipsisLoc);
}
@@ -1238,8 +1256,8 @@ bool Sema::AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *NTTP,
}
// FIXME: Concepts: This should be the type of the placeholder, but this is
// unclear in the wording right now.
- DeclRefExpr *Ref = BuildDeclRefExpr(NTTP, NTTP->getType(), VK_RValue,
- NTTP->getLocation());
+ DeclRefExpr *Ref =
+ BuildDeclRefExpr(NTTP, NTTP->getType(), VK_PRValue, NTTP->getLocation());
if (!Ref)
return true;
ExprResult ImmediatelyDeclaredConstraint =
@@ -1677,6 +1695,9 @@ Sema::ActOnTemplateParameterList(unsigned Depth,
if (ExportLoc.isValid())
Diag(ExportLoc, diag::warn_template_export_unsupported);
+ for (NamedDecl *P : Params)
+ warnOnReservedIdentifier(P);
+
return TemplateParameterList::Create(
Context, TemplateLoc, LAngleLoc,
llvm::makeArrayRef(Params.data(), Params.size()),
@@ -1849,7 +1870,7 @@ DeclResult Sema::CheckClassTemplate(
Diag(KWLoc, diag::err_using_decl_conflict_reverse);
Diag(Shadow->getTargetDecl()->getLocation(),
diag::note_using_decl_target);
- Diag(Shadow->getUsingDecl()->getLocation(), diag::note_using_decl) << 0;
+ Diag(Shadow->getIntroducer()->getLocation(), diag::note_using_decl) << 0;
// Recover by ignoring the old declaration.
PrevDecl = PrevClassTemplate = nullptr;
}
@@ -2196,7 +2217,7 @@ struct ConvertConstructorToDeductionGuideTransform {
return nullptr;
TypeSourceInfo *NewTInfo = TLB.getTypeSourceInfo(SemaRef.Context, NewType);
- return buildDeductionGuide(TemplateParams, CD->getExplicitSpecifier(),
+ return buildDeductionGuide(TemplateParams, CD, CD->getExplicitSpecifier(),
NewTInfo, CD->getBeginLoc(), CD->getLocation(),
CD->getEndLoc(), MaterializedTypedefs);
}
@@ -2226,7 +2247,7 @@ struct ConvertConstructorToDeductionGuideTransform {
Params.push_back(NewParam);
}
- return buildDeductionGuide(Template->getTemplateParameters(),
+ return buildDeductionGuide(Template->getTemplateParameters(), nullptr,
ExplicitSpecifier(), TSI, Loc, Loc, Loc);
}
@@ -2384,9 +2405,9 @@ private:
NewDefArg = new (SemaRef.Context)
OpaqueValueExpr(OldParam->getDefaultArg()->getBeginLoc(),
ParamTy.getNonLValueExprType(SemaRef.Context),
- ParamTy->isLValueReferenceType() ? VK_LValue :
- ParamTy->isRValueReferenceType() ? VK_XValue :
- VK_RValue);
+ ParamTy->isLValueReferenceType() ? VK_LValue
+ : ParamTy->isRValueReferenceType() ? VK_XValue
+ : VK_PRValue);
}
ParmVarDecl *NewParam = ParmVarDecl::Create(SemaRef.Context, DC,
@@ -2404,9 +2425,9 @@ private:
}
FunctionTemplateDecl *buildDeductionGuide(
- TemplateParameterList *TemplateParams, ExplicitSpecifier ES,
- TypeSourceInfo *TInfo, SourceLocation LocStart, SourceLocation Loc,
- SourceLocation LocEnd,
+ TemplateParameterList *TemplateParams, CXXConstructorDecl *Ctor,
+ ExplicitSpecifier ES, TypeSourceInfo *TInfo, SourceLocation LocStart,
+ SourceLocation Loc, SourceLocation LocEnd,
llvm::ArrayRef<TypedefNameDecl *> MaterializedTypedefs = {}) {
DeclarationNameInfo Name(DeductionGuideName, Loc);
ArrayRef<ParmVarDecl *> Params =
@@ -2415,7 +2436,7 @@ private:
// Build the implicit deduction guide template.
auto *Guide =
CXXDeductionGuideDecl::Create(SemaRef.Context, DC, LocStart, ES, Name,
- TInfo->getType(), TInfo, LocEnd);
+ TInfo->getType(), TInfo, LocEnd, Ctor);
Guide->setImplicit();
Guide->setParams(Params);
@@ -2494,6 +2515,12 @@ void Sema::DeclareImplicitDeductionGuides(TemplateDecl *Template,
if (!CD || (!FTD && CD->isFunctionTemplateSpecialization()))
continue;
+ // Cannot make a deduction guide when unparsed arguments are present.
+ if (std::any_of(CD->param_begin(), CD->param_end(), [](ParmVarDecl *P) {
+ return !P || P->hasUnparsedDefaultArg();
+ }))
+ continue;
+
Transform.transformConstructor(FTD, CD);
AddedAny = true;
}
@@ -3561,7 +3588,9 @@ public:
OS << VD->getName();
if (const auto *IV = dyn_cast<VarTemplateSpecializationDecl>(VD)) {
// This is a template variable, print the expanded template arguments.
- printTemplateArgumentList(OS, IV->getTemplateArgs().asArray(), Policy);
+ printTemplateArgumentList(
+ OS, IV->getTemplateArgs().asArray(), Policy,
+ IV->getSpecializedTemplate()->getTemplateParameters());
}
return true;
}
@@ -6874,8 +6903,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Arg = PE->getPattern();
ExprResult E = ImpCastExprToType(
Arg, ParamType.getNonLValueExprType(Context), CK_Dependent,
- ParamType->isLValueReferenceType() ? VK_LValue :
- ParamType->isRValueReferenceType() ? VK_XValue : VK_RValue);
+ ParamType->isLValueReferenceType() ? VK_LValue
+ : ParamType->isRValueReferenceType() ? VK_XValue
+ : VK_PRValue);
if (E.isInvalid())
return ExprError();
if (PE) {
@@ -7172,10 +7202,10 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
// Complain if an unsigned parameter received a negative value.
- if (IntegerType->isUnsignedIntegerOrEnumerationType()
- && (OldValue.isSigned() && OldValue.isNegative())) {
+ if (IntegerType->isUnsignedIntegerOrEnumerationType() &&
+ (OldValue.isSigned() && OldValue.isNegative())) {
Diag(Arg->getBeginLoc(), diag::warn_template_arg_negative)
- << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << toString(OldValue, 10) << toString(Value, 10) << Param->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
@@ -7190,7 +7220,7 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
RequiredBits = OldValue.getMinSignedBits();
if (RequiredBits > AllowedBits) {
Diag(Arg->getBeginLoc(), diag::warn_template_arg_too_large)
- << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << toString(OldValue, 10) << toString(Value, 10) << Param->getType()
<< Arg->getSourceRange();
Diag(Param->getLocation(), diag::note_template_param_here);
}
@@ -7603,7 +7633,7 @@ Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
if (OrigT->isEnumeralType()) {
// FIXME: This is a hack. We need a better way to handle substituted
// non-type template parameters.
- E = CStyleCastExpr::Create(Context, OrigT, VK_RValue, CK_IntegralCast, E,
+ E = CStyleCastExpr::Create(Context, OrigT, VK_PRValue, CK_IntegralCast, E,
nullptr, CurFPFeatureOverrides(),
Context.getTrivialTypeSourceInfo(OrigT, Loc),
Loc, Loc);
@@ -10899,7 +10929,9 @@ Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
}
Out << " = ";
- Args[I].print(getPrintingPolicy(), Out);
+ Args[I].print(
+ getPrintingPolicy(), Out,
+ TemplateParameterList::shouldIncludeTypeForArgument(Params, I));
}
Out << ']';
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index ee4316e7a632..08e798304b0c 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -455,11 +455,13 @@ static Sema::TemplateDeductionResult DeduceNullPtrTemplateArgument(
const NonTypeTemplateParmDecl *NTTP, QualType NullPtrType,
TemplateDeductionInfo &Info,
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
- Expr *Value =
- S.ImpCastExprToType(new (S.Context) CXXNullPtrLiteralExpr(
- S.Context.NullPtrTy, NTTP->getLocation()),
- NullPtrType, CK_NullToPointer)
- .get();
+ Expr *Value = S.ImpCastExprToType(
+ new (S.Context) CXXNullPtrLiteralExpr(S.Context.NullPtrTy,
+ NTTP->getLocation()),
+ NullPtrType,
+ NullPtrType->isMemberPointerType() ? CK_NullToMemberPointer
+ : CK_NullToPointer)
+ .get();
return DeduceNonTypeTemplateArgument(S, TemplateParams, NTTP,
DeducedTemplateArgument(Value),
Value->getType(), Info, Deduced);
@@ -3077,6 +3079,10 @@ Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
*this, Sema::ExpressionEvaluationContext::Unevaluated);
SFINAETrap Trap(*this);
+ // This deduction has no relation to any outer instantiation we might be
+ // performing.
+ LocalInstantiationScope InstantiationScope(*this);
+
SmallVector<DeducedTemplateArgument, 4> Deduced;
Deduced.resize(Partial->getTemplateParameters()->size());
if (TemplateDeductionResult Result
@@ -3125,6 +3131,10 @@ Sema::DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
*this, Sema::ExpressionEvaluationContext::Unevaluated);
SFINAETrap Trap(*this);
+ // This deduction has no relation to any outer instantiation we might be
+ // performing.
+ LocalInstantiationScope InstantiationScope(*this);
+
SmallVector<DeducedTemplateArgument, 4> Deduced;
Deduced.resize(Partial->getTemplateParameters()->size());
if (TemplateDeductionResult Result = ::DeduceTemplateArguments(
@@ -3869,7 +3879,7 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
// "lvalue reference to A" is used in place of A for type deduction.
if (isForwardingReference(QualType(ParamRefType, 0), FirstInnerIndex) &&
Arg->isLValue()) {
- if (S.getLangOpts().OpenCL)
+ if (S.getLangOpts().OpenCL && !ArgType.hasAddressSpace())
ArgType = S.Context.getAddrSpaceQualType(ArgType, LangAS::opencl_generic);
ArgType = S.Context.getLValueReferenceType(ArgType);
}
@@ -3918,7 +3928,7 @@ static bool AdjustFunctionParmAndArgTypesForDeduction(
if (isSimpleTemplateIdType(ParamType) ||
(isa<PointerType>(ParamType) &&
isSimpleTemplateIdType(
- ParamType->getAs<PointerType>()->getPointeeType())))
+ ParamType->castAs<PointerType>()->getPointeeType())))
TDF |= TDF_DerivedClass;
return false;
@@ -4695,10 +4705,9 @@ CheckDeducedPlaceholderConstraints(Sema &S, const AutoType &Type,
llvm::raw_string_ostream OS(Buf);
OS << "'" << Concept->getName();
if (TypeLoc.hasExplicitTemplateArgs()) {
- OS << "<";
- for (const auto &Arg : Type.getTypeConstraintArguments())
- Arg.print(S.getPrintingPolicy(), OS);
- OS << ">";
+ printTemplateArgumentList(
+ OS, Type.getTypeConstraintArguments(), S.getPrintingPolicy(),
+ Type.getTypeConstraintConcept()->getTemplateParameters());
}
OS << "'";
OS.flush();
@@ -5464,6 +5473,9 @@ static bool isAtLeastAsSpecializedAs(Sema &S, QualType T1, QualType T2,
Deduced.end());
Sema::InstantiatingTemplate Inst(S, Info.getLocation(), P2, DeducedArgs,
Info);
+ if (Inst.isInvalid())
+ return false;
+
auto *TST1 = T1->castAs<TemplateSpecializationType>();
bool AtLeastAsSpecialized;
S.runWithSufficientStackSpace(Info.getLocation(), [&] {
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 8bd812b39de4..f18f77d3442a 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -805,9 +805,10 @@ void Sema::PrintInstantiationStack() {
SmallString<128> TemplateArgsStr;
llvm::raw_svector_ostream OS(TemplateArgsStr);
cast<NamedDecl>(Active->Entity)->printName(OS);
- if (!isa<FunctionDecl>(Active->Entity))
+ if (!isa<FunctionDecl>(Active->Entity)) {
printTemplateArgumentList(OS, Active->template_arguments(),
getPrintingPolicy());
+ }
Diags.Report(Active->PointOfInstantiation, DiagID) << OS.str()
<< Active->InstantiationRange;
break;
@@ -1474,8 +1475,8 @@ TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
ExprType.addConst();
return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(
- ExprType, TargetType->isReferenceType() ? VK_LValue : VK_RValue, NTTP,
- E->getLocation(), Arg);
+ ExprType, TargetType->isReferenceType() ? VK_LValue : VK_PRValue,
+ NTTP, E->getLocation(), Arg);
}
Arg = getPackSubstitutedTemplateArgument(getSema(), Arg);
@@ -2385,10 +2386,10 @@ ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
}
if (AttachTypeConstraint(
TC->getNestedNameSpecifierLoc(), TC->getConceptNameInfo(),
- TC->getNamedConcept(), &InstArgs, Inst,
+ TC->getNamedConcept(), TemplArgInfo ? &InstArgs : nullptr, Inst,
TTP->isParameterPack()
? cast<CXXFoldExpr>(TC->getImmediatelyDeclaredConstraint())
- ->getEllipsisLoc()
+ ->getEllipsisLoc()
: SourceLocation()))
return nullptr;
}
@@ -2816,7 +2817,8 @@ Sema::InstantiateClass(SourceLocation PointOfInstantiation,
if (!Instantiation->isInvalidDecl()) {
// Perform any dependent diagnostics from the pattern.
- PerformDependentDiagnostics(Pattern, TemplateArgs);
+ if (Pattern->isDependentContext())
+ PerformDependentDiagnostics(Pattern, TemplateArgs);
// Instantiate any out-of-line class template partial
// specializations now.
@@ -3420,7 +3422,8 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
Instantiation->getTemplateInstantiationPattern();
DeclContext::lookup_result Lookup =
ClassPattern->lookup(Field->getDeclName());
- FieldDecl *Pattern = cast<FieldDecl>(Lookup.front());
+ FieldDecl *Pattern = Lookup.find_first<FieldDecl>();
+ assert(Pattern);
InstantiateInClassInitializer(PointOfInstantiation, Field, Pattern,
TemplateArgs);
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index dc1e0ef60cac..be4c51930789 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -9,6 +9,7 @@
//
//===----------------------------------------------------------------------===/
+#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
@@ -23,6 +24,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Sema/Initialization.h"
#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -474,7 +476,7 @@ static void instantiateOMPDeclareVariantAttr(
SourceLocation(), SubstFD,
/* RefersToEnclosingVariableOrCapture */ false,
/* NameLoc */ SubstFD->getLocation(),
- SubstFD->getType(), ExprValueKind::VK_RValue);
+ SubstFD->getType(), ExprValueKind::VK_PRValue);
}
}
}
@@ -548,6 +550,35 @@ static void instantiateDependentAMDGPUWavesPerEUAttr(
S.addAMDGPUWavesPerEUAttr(New, Attr, MinExpr, MaxExpr);
}
+// This doesn't take any template parameters, but we have a custom action that
+// needs to happen when the kernel itself is instantiated. We need to run the
+// ItaniumMangler to mark the names required to name this kernel.
+static void instantiateDependentSYCLKernelAttr(
+ Sema &S, const MultiLevelTemplateArgumentList &TemplateArgs,
+ const SYCLKernelAttr &Attr, Decl *New) {
+ // Functions cannot be partially specialized, so if we are being instantiated,
+ // we are obviously a complete specialization. Since this attribute is only
+ // valid on function template declarations, we know that this is a full
+ // instantiation of a kernel.
+ S.AddSYCLKernelLambda(cast<FunctionDecl>(New));
+
+ // Evaluate whether this would change any of the already evaluated
+ // __builtin_sycl_unique_stable_name values.
+ for (auto &Itr : S.Context.SYCLUniqueStableNameEvaluatedValues) {
+ const std::string &CurName = Itr.first->ComputeName(S.Context);
+ if (Itr.second != CurName) {
+ S.Diag(New->getLocation(),
+ diag::err_kernel_invalidates_sycl_unique_stable_name);
+ S.Diag(Itr.first->getLocation(),
+ diag::note_sycl_unique_stable_name_evaluated_here);
+ // Update this so future diagnostics work correctly.
+ Itr.second = CurName;
+ }
+ }
+
+ New->addAttr(Attr.clone(S.getASTContext()));
+}
+
/// Determine whether the attribute A might be relevent to the declaration D.
/// If not, we can skip instantiating it. The attribute may or may not have
/// been instantiated yet.
@@ -723,6 +754,11 @@ void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
continue;
}
+ if (auto *A = dyn_cast<SYCLKernelAttr>(TmplAttr)) {
+ instantiateDependentSYCLKernelAttr(*this, TemplateArgs, *A, New);
+ continue;
+ }
+
assert(!TmplAttr->isPackExpansion());
if (TmplAttr->isLateParsed() && LateAttrs) {
// Late parsed attributes must be instantiated and attached after the
@@ -856,10 +892,11 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
}
- // HACK: g++ has a bug where it gets the value kind of ?: wrong.
- // libstdc++ relies upon this bug in its implementation of common_type.
- // If we happen to be processing that implementation, fake up the g++ ?:
- // semantics. See LWG issue 2141 for more information on the bug.
+ // HACK: 2012-10-23 g++ has a bug where it gets the value kind of ?: wrong.
+ // libstdc++ relies upon this bug in its implementation of common_type. If we
+ // happen to be processing that implementation, fake up the g++ ?:
+ // semantics. See LWG issue 2141 for more information on the bug. The bugs
+ // are fixed in g++ and libstdc++ 4.9.0 (2014-04-22).
const DecltypeType *DT = DI->getType()->getAs<DecltypeType>();
CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D->getDeclContext());
if (DT && RD && isa<ConditionalOperator>(DT->getUnderlyingExpr()) &&
@@ -1050,11 +1087,30 @@ Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D,
SemaRef.BuildVariableInstantiation(Var, D, TemplateArgs, LateAttrs, Owner,
StartingScope, InstantiatingVarTemplate);
-
if (D->isNRVOVariable()) {
- QualType ReturnType = cast<FunctionDecl>(DC)->getReturnType();
- if (SemaRef.isCopyElisionCandidate(ReturnType, Var, Sema::CES_Strict))
- Var->setNRVOVariable(true);
+ QualType RT;
+ if (auto *F = dyn_cast<FunctionDecl>(DC))
+ RT = F->getReturnType();
+ else if (isa<BlockDecl>(DC))
+ RT = cast<FunctionType>(SemaRef.getCurBlock()->FunctionType)
+ ->getReturnType();
+ else
+ llvm_unreachable("Unknown context type");
+
+ // This is the last chance we have of checking copy elision eligibility
+ // for functions in dependent contexts. The sema actions for building
+ // the return statement during template instantiation will have no effect
+ // regarding copy elision, since NRVO propagation runs on the scope exit
+ // actions, and these are not run on instantiation.
+ // This might run through some VarDecls which were returned from non-taken
+ // 'if constexpr' branches, and these will end up being constructed on the
+ // return slot even if they will never be returned, as a sort of accidental
+ // 'optimization'. Notably, functions with 'auto' return types won't have it
+ // deduced by this point. Coupled with the limitation described
+ // previously, this makes it very hard to support copy elision for these.
+ Sema::NamedReturnInfo Info = SemaRef.getNamedReturnInfo(Var);
+ bool NRVO = SemaRef.getCopyElisionCandidate(Info, RT) != nullptr;
+ Var->setNRVOVariable(NRVO);
}
Var->setImplicit(D->isImplicit());
@@ -1517,48 +1573,18 @@ Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
return nullptr;
}
- bool AdoptedPreviousTemplateParams = false;
if (PrevClassTemplate) {
- bool Complain = true;
-
- // HACK: libstdc++ 4.2.1 contains an ill-formed friend class
- // template for struct std::tr1::__detail::_Map_base, where the
- // template parameters of the friend declaration don't match the
- // template parameters of the original declaration. In this one
- // case, we don't complain about the ill-formed friend
- // declaration.
- if (isFriend && Pattern->getIdentifier() &&
- Pattern->getIdentifier()->isStr("_Map_base") &&
- DC->isNamespace() &&
- cast<NamespaceDecl>(DC)->getIdentifier() &&
- cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__detail")) {
- DeclContext *DCParent = DC->getParent();
- if (DCParent->isNamespace() &&
- cast<NamespaceDecl>(DCParent)->getIdentifier() &&
- cast<NamespaceDecl>(DCParent)->getIdentifier()->isStr("tr1")) {
- if (cast<Decl>(DCParent)->isInStdNamespace())
- Complain = false;
- }
- }
-
TemplateParameterList *PrevParams
= PrevClassTemplate->getMostRecentDecl()->getTemplateParameters();
// Make sure the parameter lists match.
- if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams,
- Complain,
- Sema::TPL_TemplateMatch)) {
- if (Complain)
- return nullptr;
-
- AdoptedPreviousTemplateParams = true;
- InstParams = PrevParams;
- }
+ if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams, true,
+ Sema::TPL_TemplateMatch))
+ return nullptr;
// Do some additional validation, then merge default arguments
// from the existing declarations.
- if (!AdoptedPreviousTemplateParams &&
- SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
+ if (SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
Sema::TPC_ClassTemplate))
return nullptr;
}
@@ -1800,9 +1826,16 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
PrevDecl = cast<CXXRecordDecl>(Prev);
}
- CXXRecordDecl *Record = CXXRecordDecl::Create(
- SemaRef.Context, D->getTagKind(), Owner, D->getBeginLoc(),
- D->getLocation(), D->getIdentifier(), PrevDecl);
+ CXXRecordDecl *Record = nullptr;
+ if (D->isLambda())
+ Record = CXXRecordDecl::CreateLambda(
+ SemaRef.Context, Owner, D->getLambdaTypeInfo(), D->getLocation(),
+ D->isDependentLambda(), D->isGenericLambda(),
+ D->getLambdaCaptureDefault());
+ else
+ Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
+ D->getBeginLoc(), D->getLocation(),
+ D->getIdentifier(), PrevDecl);
// Substitute the nested name specifier, if any.
if (SubstQualifier(D, Record))
@@ -2281,6 +2314,20 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
if (InstantiatedExplicitSpecifier.isInvalid())
return nullptr;
+ // Implicit destructors/constructors created for local classes in
+ // DeclareImplicit* (see SemaDeclCXX.cpp) might not have an associated TSI.
+ // Unfortunately there isn't enough context in those functions to
+ // conditionally populate the TSI without breaking non-template related use
+ // cases. Populate TSIs prior to calling SubstFunctionType to make sure we get
+ // a proper transformation.
+ if (cast<CXXRecordDecl>(D->getParent())->isLambda() &&
+ !D->getTypeSourceInfo() &&
+ isa<CXXConstructorDecl, CXXDestructorDecl>(D)) {
+ TypeSourceInfo *TSI =
+ SemaRef.Context.getTrivialTypeSourceInfo(D->getType());
+ D->setTypeSourceInfo(TSI);
+ }
+
SmallVector<ParmVarDecl *, 4> Params;
TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
if (!TInfo)
@@ -2370,6 +2417,9 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
Destructor->isInlineSpecified(), false, Destructor->getConstexprKind(),
TrailingRequiresClause);
Method->setRangeEnd(Destructor->getEndLoc());
+ Method->setDeclName(SemaRef.Context.DeclarationNames.getCXXDestructorName(
+ SemaRef.Context.getCanonicalType(
+ SemaRef.Context.getTypeDeclType(Record))));
} else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
Method = CXXConversionDecl::Create(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
@@ -2610,7 +2660,6 @@ Decl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
TemplateTypeParmDecl *D) {
- // TODO: don't always clone when decls are refcounted.
assert(D->getTypeForDecl()->isTemplateTypeParmType());
Optional<unsigned> NumExpanded;
@@ -3009,6 +3058,53 @@ Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
return Inst;
}
+Decl *TemplateDeclInstantiator::VisitBaseUsingDecls(BaseUsingDecl *D,
+ BaseUsingDecl *Inst,
+ LookupResult *Lookup) {
+
+ bool isFunctionScope = Owner->isFunctionOrMethod();
+
+ for (auto *Shadow : D->shadows()) {
+ // FIXME: UsingShadowDecl doesn't preserve its immediate target, so
+ // reconstruct it in the case where it matters. Hm, can we extract it from
+ // the DeclSpec when parsing and save it in the UsingDecl itself?
+ NamedDecl *OldTarget = Shadow->getTargetDecl();
+ if (auto *CUSD = dyn_cast<ConstructorUsingShadowDecl>(Shadow))
+ if (auto *BaseShadow = CUSD->getNominatedBaseClassShadowDecl())
+ OldTarget = BaseShadow;
+
+ NamedDecl *InstTarget = nullptr;
+ if (auto *EmptyD =
+ dyn_cast<UnresolvedUsingIfExistsDecl>(Shadow->getTargetDecl())) {
+ InstTarget = UnresolvedUsingIfExistsDecl::Create(
+ SemaRef.Context, Owner, EmptyD->getLocation(), EmptyD->getDeclName());
+ } else {
+ InstTarget = cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl(
+ Shadow->getLocation(), OldTarget, TemplateArgs));
+ }
+ if (!InstTarget)
+ return nullptr;
+
+ UsingShadowDecl *PrevDecl = nullptr;
+ if (Lookup &&
+ SemaRef.CheckUsingShadowDecl(Inst, InstTarget, *Lookup, PrevDecl))
+ continue;
+
+ if (UsingShadowDecl *OldPrev = getPreviousDeclForInstantiation(Shadow))
+ PrevDecl = cast_or_null<UsingShadowDecl>(SemaRef.FindInstantiatedDecl(
+ Shadow->getLocation(), OldPrev, TemplateArgs));
+
+ UsingShadowDecl *InstShadow = SemaRef.BuildUsingShadowDecl(
+ /*Scope*/ nullptr, Inst, InstTarget, PrevDecl);
+ SemaRef.Context.setInstantiatedFromUsingShadowDecl(InstShadow, Shadow);
+
+ if (isFunctionScope)
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Shadow, InstShadow);
+ }
+
+ return Inst;
+}
+
Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
// The nested name specifier may be dependent, for example
@@ -3034,11 +3130,9 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
NameInfo.setName(SemaRef.Context.DeclarationNames.getCXXConstructorName(
SemaRef.Context.getCanonicalType(SemaRef.Context.getRecordType(RD))));
- // We only need to do redeclaration lookups if we're in a class
- // scope (in fact, it's not really even possible in non-class
- // scopes).
+ // We only need to do redeclaration lookups if we're in a class scope (in
+ // fact, it's not really even possible in non-class scopes).
bool CheckRedeclaration = Owner->isRecord();
-
LookupResult Prev(SemaRef, NameInfo, Sema::LookupUsingDeclName,
Sema::ForVisibleRedeclaration);
@@ -3059,12 +3153,11 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
D->hasTypename(), SS,
D->getLocation(), Prev))
NewUD->setInvalidDecl();
-
}
if (!NewUD->isInvalidDecl() &&
- SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), D->hasTypename(),
- SS, NameInfo, D->getLocation()))
+ SemaRef.CheckUsingDeclQualifier(D->getUsingLoc(), D->hasTypename(), SS,
+ NameInfo, D->getLocation(), nullptr, D))
NewUD->setInvalidDecl();
SemaRef.Context.setInstantiatedFromUsingDecl(NewUD, D);
@@ -3075,46 +3168,39 @@ Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
if (NewUD->isInvalidDecl())
return NewUD;
+ // If the using scope was dependent, or we had dependent bases, we need to
+ // recheck the inheritance
if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName)
SemaRef.CheckInheritingConstructorUsingDecl(NewUD);
- bool isFunctionScope = Owner->isFunctionOrMethod();
+ return VisitBaseUsingDecls(D, NewUD, CheckRedeclaration ? &Prev : nullptr);
+}
- // Process the shadow decls.
- for (auto *Shadow : D->shadows()) {
- // FIXME: UsingShadowDecl doesn't preserve its immediate target, so
- // reconstruct it in the case where it matters.
- NamedDecl *OldTarget = Shadow->getTargetDecl();
- if (auto *CUSD = dyn_cast<ConstructorUsingShadowDecl>(Shadow))
- if (auto *BaseShadow = CUSD->getNominatedBaseClassShadowDecl())
- OldTarget = BaseShadow;
+Decl *TemplateDeclInstantiator::VisitUsingEnumDecl(UsingEnumDecl *D) {
+ // Cannot be a dependent type, but still could be an instantiation
+ EnumDecl *EnumD = cast_or_null<EnumDecl>(SemaRef.FindInstantiatedDecl(
+ D->getLocation(), D->getEnumDecl(), TemplateArgs));
- NamedDecl *InstTarget =
- cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl(
- Shadow->getLocation(), OldTarget, TemplateArgs));
- if (!InstTarget)
- return nullptr;
+ if (SemaRef.RequireCompleteEnumDecl(EnumD, EnumD->getLocation()))
+ return nullptr;
- UsingShadowDecl *PrevDecl = nullptr;
- if (CheckRedeclaration) {
- if (SemaRef.CheckUsingShadowDecl(NewUD, InstTarget, Prev, PrevDecl))
- continue;
- } else if (UsingShadowDecl *OldPrev =
- getPreviousDeclForInstantiation(Shadow)) {
- PrevDecl = cast_or_null<UsingShadowDecl>(SemaRef.FindInstantiatedDecl(
- Shadow->getLocation(), OldPrev, TemplateArgs));
- }
+ UsingEnumDecl *NewUD =
+ UsingEnumDecl::Create(SemaRef.Context, Owner, D->getUsingLoc(),
+ D->getEnumLoc(), D->getLocation(), EnumD);
- UsingShadowDecl *InstShadow =
- SemaRef.BuildUsingShadowDecl(/*Scope*/nullptr, NewUD, InstTarget,
- PrevDecl);
- SemaRef.Context.setInstantiatedFromUsingShadowDecl(InstShadow, Shadow);
+ SemaRef.Context.setInstantiatedFromUsingEnumDecl(NewUD, D);
+ NewUD->setAccess(D->getAccess());
+ Owner->addDecl(NewUD);
- if (isFunctionScope)
- SemaRef.CurrentInstantiationScope->InstantiatedLocal(Shadow, InstShadow);
- }
+ // Don't process the shadow decls for an invalid decl.
+ if (NewUD->isInvalidDecl())
+ return NewUD;
- return NewUD;
+ // We don't have to recheck for duplication of the UsingEnumDecl itself, as it
+ // cannot be dependent, and will therefore have been checked during template
+ // definition.
+
+ return VisitBaseUsingDecls(D, NewUD, nullptr);
}
Decl *TemplateDeclInstantiator::VisitUsingShadowDecl(UsingShadowDecl *D) {
@@ -3214,13 +3300,16 @@ Decl *TemplateDeclInstantiator::instantiateUnresolvedUsingDecl(
SourceLocation EllipsisLoc =
InstantiatingSlice ? SourceLocation() : D->getEllipsisLoc();
+ bool IsUsingIfExists = D->template hasAttr<UsingIfExistsAttr>();
NamedDecl *UD = SemaRef.BuildUsingDeclaration(
/*Scope*/ nullptr, D->getAccess(), D->getUsingLoc(),
/*HasTypename*/ TD, TypenameLoc, SS, NameInfo, EllipsisLoc,
ParsedAttributesView(),
- /*IsInstantiation*/ true);
- if (UD)
+ /*IsInstantiation*/ true, IsUsingIfExists);
+ if (UD) {
+ SemaRef.InstantiateAttrs(TemplateArgs, D, UD);
SemaRef.Context.setInstantiatedFromUsingDecl(UD, D);
+ }
return UD;
}
@@ -3235,6 +3324,11 @@ Decl *TemplateDeclInstantiator::VisitUnresolvedUsingValueDecl(
return instantiateUnresolvedUsingDecl(D);
}
+Decl *TemplateDeclInstantiator::VisitUnresolvedUsingIfExistsDecl(
+ UnresolvedUsingIfExistsDecl *D) {
+ llvm_unreachable("referring to unresolved decl out of UsingShadowDecl");
+}
+
Decl *TemplateDeclInstantiator::VisitUsingPackDecl(UsingPackDecl *D) {
SmallVector<NamedDecl*, 8> Expansions;
for (auto *UD : D->expansions()) {
@@ -4841,16 +4935,85 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// Introduce a new scope where local variable instantiations will be
// recorded, unless we're actually a member function within a local
// class, in which case we need to merge our results with the parent
- // scope (of the enclosing function).
+ // scope (of the enclosing function). The exception is instantiating
+ // a function template specialization, since the template to be
+ // instantiated already has references to locals properly substituted.
bool MergeWithParentScope = false;
if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Function->getDeclContext()))
- MergeWithParentScope = Rec->isLocalClass();
+ MergeWithParentScope =
+ Rec->isLocalClass() && !Function->isFunctionTemplateSpecialization();
LocalInstantiationScope Scope(*this, MergeWithParentScope);
+ auto RebuildTypeSourceInfoForDefaultSpecialMembers = [&]() {
+ // Special members might get their TypeSourceInfo set up w.r.t the
+ // PatternDecl context, in which case parameters could still be pointing
+ // back to the original class, make sure arguments are bound to the
+ // instantiated record instead.
+ assert(PatternDecl->isDefaulted() &&
+ "Special member needs to be defaulted");
+ auto PatternSM = getDefaultedFunctionKind(PatternDecl).asSpecialMember();
+ if (!(PatternSM == Sema::CXXCopyConstructor ||
+ PatternSM == Sema::CXXCopyAssignment ||
+ PatternSM == Sema::CXXMoveConstructor ||
+ PatternSM == Sema::CXXMoveAssignment))
+ return;
+
+ auto *NewRec = dyn_cast<CXXRecordDecl>(Function->getDeclContext());
+ const auto *PatternRec =
+ dyn_cast<CXXRecordDecl>(PatternDecl->getDeclContext());
+ if (!NewRec || !PatternRec)
+ return;
+ if (!PatternRec->isLambda())
+ return;
+
+ struct SpecialMemberTypeInfoRebuilder
+ : TreeTransform<SpecialMemberTypeInfoRebuilder> {
+ using Base = TreeTransform<SpecialMemberTypeInfoRebuilder>;
+ const CXXRecordDecl *OldDecl;
+ CXXRecordDecl *NewDecl;
- if (PatternDecl->isDefaulted())
+ SpecialMemberTypeInfoRebuilder(Sema &SemaRef, const CXXRecordDecl *O,
+ CXXRecordDecl *N)
+ : TreeTransform(SemaRef), OldDecl(O), NewDecl(N) {}
+
+ bool TransformExceptionSpec(SourceLocation Loc,
+ FunctionProtoType::ExceptionSpecInfo &ESI,
+ SmallVectorImpl<QualType> &Exceptions,
+ bool &Changed) {
+ return false;
+ }
+
+ QualType TransformRecordType(TypeLocBuilder &TLB, RecordTypeLoc TL) {
+ const RecordType *T = TL.getTypePtr();
+ RecordDecl *Record = cast_or_null<RecordDecl>(
+ getDerived().TransformDecl(TL.getNameLoc(), T->getDecl()));
+ if (Record != OldDecl)
+ return Base::TransformRecordType(TLB, TL);
+
+ QualType Result = getDerived().RebuildRecordType(NewDecl);
+ if (Result.isNull())
+ return QualType();
+
+ RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+ } IR{*this, PatternRec, NewRec};
+
+ TypeSourceInfo *NewSI = IR.TransformType(Function->getTypeSourceInfo());
+ Function->setType(NewSI->getType());
+ Function->setTypeSourceInfo(NewSI);
+
+ ParmVarDecl *Parm = Function->getParamDecl(0);
+ TypeSourceInfo *NewParmSI = IR.TransformType(Parm->getTypeSourceInfo());
+ Parm->setType(NewParmSI->getType());
+ Parm->setTypeSourceInfo(NewParmSI);
+ };
+
+ if (PatternDecl->isDefaulted()) {
+ RebuildTypeSourceInfoForDefaultSpecialMembers();
SetDeclDefaulted(Function, PatternDecl->getLocation());
- else {
+ } else {
MultiLevelTemplateArgumentList TemplateArgs =
getTemplateInstantiationArgs(Function, nullptr, false, PatternDecl);
@@ -5030,7 +5193,6 @@ void Sema::BuildVariableInstantiation(
NewVar->setCXXForRangeDecl(OldVar->isCXXForRangeDecl());
NewVar->setObjCForDecl(OldVar->isObjCForDecl());
NewVar->setConstexpr(OldVar->isConstexpr());
- MaybeAddCUDAConstantAttr(NewVar);
NewVar->setInitCapture(OldVar->isInitCapture());
NewVar->setPreviousDeclInSameBlockScope(
OldVar->isPreviousDeclInSameBlockScope());
diff --git a/clang/lib/Sema/SemaType.cpp b/clang/lib/Sema/SemaType.cpp
index 4178024d1264..b78331cdfe91 100644
--- a/clang/lib/Sema/SemaType.cpp
+++ b/clang/lib/Sema/SemaType.cpp
@@ -117,6 +117,7 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_RegCall: \
case ParsedAttr::AT_Pascal: \
case ParsedAttr::AT_SwiftCall: \
+ case ParsedAttr::AT_SwiftAsyncCall: \
case ParsedAttr::AT_VectorCall: \
case ParsedAttr::AT_AArch64VectorPcs: \
case ParsedAttr::AT_MSABI: \
@@ -634,7 +635,7 @@ static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
// C++11 attributes before the decl specifiers actually appertain to
// the declarators. Move them straight there. We don't support the
// 'put them wherever you like' semantics we allow for GNU attributes.
- if (attr.isCXX11Attribute()) {
+ if (attr.isStandardAttributeSyntax()) {
moveAttrFromListToList(attr, state.getCurrentAttributes(),
state.getDeclarator().getAttributes());
return;
@@ -687,9 +688,9 @@ static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
// non-owning copy and iterate over that.
ParsedAttributesView AttrsCopy{state.getDeclarator().getAttributes()};
for (ParsedAttr &attr : AttrsCopy) {
- // Do not distribute C++11 attributes. They have strict rules for what
+ // Do not distribute [[]] attributes. They have strict rules for what
// they appertain to.
- if (attr.isCXX11Attribute())
+ if (attr.isStandardAttributeSyntax())
continue;
switch (attr.getKind()) {
@@ -1256,26 +1257,6 @@ getImageAccess(const ParsedAttributesView &Attrs) {
return OpenCLAccessAttr::Keyword_read_only;
}
-static QualType ConvertConstrainedAutoDeclSpecToType(Sema &S, DeclSpec &DS,
- AutoTypeKeyword AutoKW) {
- assert(DS.isConstrainedAuto());
- TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
- TemplateArgumentListInfo TemplateArgsInfo;
- TemplateArgsInfo.setLAngleLoc(TemplateId->LAngleLoc);
- TemplateArgsInfo.setRAngleLoc(TemplateId->RAngleLoc);
- ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
- TemplateId->NumArgs);
- S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
- llvm::SmallVector<TemplateArgument, 8> TemplateArgs;
- for (auto &ArgLoc : TemplateArgsInfo.arguments())
- TemplateArgs.push_back(ArgLoc.getArgument());
- return S.Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false,
- /*IsPack=*/false,
- cast<ConceptDecl>(TemplateId->Template.get()
- .getAsTemplateDecl()),
- TemplateArgs);
-}
-
/// Convert the specified declspec to the appropriate type
/// object.
/// \param state Specifies the declarator containing the declaration specifier
@@ -1544,6 +1525,14 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_float: Result = Context.FloatTy; break;
case DeclSpec::TST_double:
+ if (S.getLangOpts().OpenCL) {
+ if (!S.getOpenCLOptions().isSupported("cl_khr_fp64", S.getLangOpts()))
+ S.Diag(DS.getTypeSpecTypeLoc(),
+ diag::err_opencl_double_requires_extension)
+ << (S.getLangOpts().OpenCLVersion >= 300);
+ else if (!S.getOpenCLOptions().isAvailableOption("cl_khr_fp64", S.getLangOpts()))
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::ext_opencl_double_without_pragma);
+ }
if (DS.getTypeSpecWidth() == TypeSpecifierWidth::Long)
Result = Context.LongDoubleTy;
else
@@ -1656,29 +1645,39 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
break;
case DeclSpec::TST_auto:
+ case DeclSpec::TST_decltype_auto: {
+ auto AutoKW = DS.getTypeSpecType() == DeclSpec::TST_decltype_auto
+ ? AutoTypeKeyword::DecltypeAuto
+ : AutoTypeKeyword::Auto;
+
+ ConceptDecl *TypeConstraintConcept = nullptr;
+ llvm::SmallVector<TemplateArgument, 8> TemplateArgs;
if (DS.isConstrainedAuto()) {
- Result = ConvertConstrainedAutoDeclSpecToType(S, DS,
- AutoTypeKeyword::Auto);
- break;
+ if (TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId()) {
+ TypeConstraintConcept =
+ cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl());
+ TemplateArgumentListInfo TemplateArgsInfo;
+ TemplateArgsInfo.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgsInfo.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+ for (const auto &ArgLoc : TemplateArgsInfo.arguments())
+ TemplateArgs.push_back(ArgLoc.getArgument());
+ } else {
+ declarator.setInvalidType(true);
+ }
}
- Result = Context.getAutoType(QualType(), AutoTypeKeyword::Auto, false);
+ Result = S.Context.getAutoType(QualType(), AutoKW,
+ /*IsDependent*/ false, /*IsPack=*/false,
+ TypeConstraintConcept, TemplateArgs);
break;
+ }
case DeclSpec::TST_auto_type:
Result = Context.getAutoType(QualType(), AutoTypeKeyword::GNUAutoType, false);
break;
- case DeclSpec::TST_decltype_auto:
- if (DS.isConstrainedAuto()) {
- Result =
- ConvertConstrainedAutoDeclSpecToType(S, DS,
- AutoTypeKeyword::DecltypeAuto);
- break;
- }
- Result = Context.getAutoType(QualType(), AutoTypeKeyword::DecltypeAuto,
- /*IsDependent*/ false);
- break;
-
case DeclSpec::TST_unknown_anytype:
Result = Context.UnknownAnyTy;
break;
@@ -1723,9 +1722,26 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
if (Result->containsErrors())
declarator.setInvalidType();
- if (S.getLangOpts().OpenCL &&
- S.checkOpenCLDisabledTypeDeclSpec(DS, Result))
- declarator.setInvalidType(true);
+ if (S.getLangOpts().OpenCL) {
+ const auto &OpenCLOptions = S.getOpenCLOptions();
+ StringRef OptName;
+ // OpenCL C v3.0 s6.3.3 - OpenCL image types require __opencl_c_images
+ // support
+ if ((Result->isImageType() || Result->isSamplerT()) &&
+ (S.getLangOpts().OpenCLVersion >= 300 &&
+ !OpenCLOptions.isSupported("__opencl_c_images", S.getLangOpts())))
+ OptName = "__opencl_c_images";
+ else if (Result->isOCLImage3dWOType() &&
+ !OpenCLOptions.isSupported("cl_khr_3d_image_writes",
+ S.getLangOpts()))
+ OptName = "cl_khr_3d_image_writes";
+
+ if (!OptName.empty()) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_opencl_requires_extension)
+ << 0 << Result << OptName;
+ declarator.setInvalidType();
+ }
+ }
bool IsFixedPointType = DS.getTypeSpecType() == DeclSpec::TST_accum ||
DS.getTypeSpecType() == DeclSpec::TST_fract;
@@ -2060,10 +2076,9 @@ static QualType deduceOpenCLPointeeAddrSpace(Sema &S, QualType PointeeType) {
!PointeeType->isSamplerT() &&
!PointeeType.hasAddressSpace())
PointeeType = S.getASTContext().getAddrSpaceQualType(
- PointeeType,
- S.getLangOpts().OpenCLCPlusPlus || S.getLangOpts().OpenCLVersion == 200
- ? LangAS::opencl_generic
- : LangAS::opencl_private);
+ PointeeType, S.getLangOpts().OpenCLGenericAddressSpace
+ ? LangAS::opencl_generic
+ : LangAS::opencl_private);
return PointeeType;
}
@@ -2090,8 +2105,9 @@ QualType Sema::BuildPointerType(QualType T,
}
if (T->isFunctionType() && getLangOpts().OpenCL &&
- !getOpenCLOptions().isEnabled("__cl_clang_function_pointers")) {
- Diag(Loc, diag::err_opencl_function_pointer);
+ !getOpenCLOptions().isAvailableOption("__cl_clang_function_pointers",
+ getLangOpts())) {
+ Diag(Loc, diag::err_opencl_function_pointer) << /*pointer*/ 0;
return QualType();
}
@@ -2163,6 +2179,13 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
if (checkQualifiedFunction(*this, T, Loc, QFK_Reference))
return QualType();
+ if (T->isFunctionType() && getLangOpts().OpenCL &&
+ !getOpenCLOptions().isAvailableOption("__cl_clang_function_pointers",
+ getLangOpts())) {
+ Diag(Loc, diag::err_opencl_function_pointer) << /*reference*/ 1;
+ return QualType();
+ }
+
// In ARC, it is forbidden to build references to unqualified pointers.
if (getLangOpts().ObjCAutoRefCount)
T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
@@ -2389,7 +2412,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
}
// Do lvalue-to-rvalue conversions on the array size expression.
- if (ArraySize && !ArraySize->isRValue()) {
+ if (ArraySize && !ArraySize->isPRValue()) {
ExprResult Result = DefaultLvalueConversion(ArraySize);
if (Result.isInvalid())
return QualType();
@@ -2490,7 +2513,7 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
: ConstVal.getActiveBits();
if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
Diag(ArraySize->getBeginLoc(), diag::err_array_too_large)
- << ConstVal.toString(10) << ArraySize->getSourceRange();
+ << toString(ConstVal, 10) << ArraySize->getSourceRange();
return QualType();
}
@@ -2758,16 +2781,21 @@ static void checkExtParameterInfos(Sema &S, ArrayRef<QualType> paramTypes,
llvm::function_ref<SourceLocation(unsigned)> getParamLoc) {
assert(EPI.ExtParameterInfos && "shouldn't get here without param infos");
- bool hasCheckedSwiftCall = false;
- auto checkForSwiftCC = [&](unsigned paramIndex) {
- // Only do this once.
- if (hasCheckedSwiftCall) return;
- hasCheckedSwiftCall = true;
- if (EPI.ExtInfo.getCC() == CC_Swift) return;
+ bool emittedError = false;
+ auto actualCC = EPI.ExtInfo.getCC();
+ enum class RequiredCC { OnlySwift, SwiftOrSwiftAsync };
+ auto checkCompatible = [&](unsigned paramIndex, RequiredCC required) {
+ bool isCompatible =
+ (required == RequiredCC::OnlySwift)
+ ? (actualCC == CC_Swift)
+ : (actualCC == CC_Swift || actualCC == CC_SwiftAsync);
+ if (isCompatible || emittedError)
+ return;
S.Diag(getParamLoc(paramIndex), diag::err_swift_param_attr_not_swiftcall)
- << getParameterABISpelling(EPI.ExtParameterInfos[paramIndex].getABI());
+ << getParameterABISpelling(EPI.ExtParameterInfos[paramIndex].getABI())
+ << (required == RequiredCC::OnlySwift);
+ emittedError = true;
};
-
for (size_t paramIndex = 0, numParams = paramTypes.size();
paramIndex != numParams; ++paramIndex) {
switch (EPI.ExtParameterInfos[paramIndex].getABI()) {
@@ -2778,7 +2806,7 @@ static void checkExtParameterInfos(Sema &S, ArrayRef<QualType> paramTypes,
// swift_indirect_result parameters must be a prefix of the function
// arguments.
case ParameterABI::SwiftIndirectResult:
- checkForSwiftCC(paramIndex);
+ checkCompatible(paramIndex, RequiredCC::SwiftOrSwiftAsync);
if (paramIndex != 0 &&
EPI.ExtParameterInfos[paramIndex - 1].getABI()
!= ParameterABI::SwiftIndirectResult) {
@@ -2788,12 +2816,16 @@ static void checkExtParameterInfos(Sema &S, ArrayRef<QualType> paramTypes,
continue;
case ParameterABI::SwiftContext:
- checkForSwiftCC(paramIndex);
+ checkCompatible(paramIndex, RequiredCC::SwiftOrSwiftAsync);
+ continue;
+
+ // SwiftAsyncContext is not limited to swiftasynccall functions.
+ case ParameterABI::SwiftAsyncContext:
continue;
// swift_error parameters must be preceded by a swift_context parameter.
case ParameterABI::SwiftErrorResult:
- checkForSwiftCC(paramIndex);
+ checkCompatible(paramIndex, RequiredCC::OnlySwift);
if (paramIndex == 0 ||
EPI.ExtParameterInfos[paramIndex - 1].getABI() !=
ParameterABI::SwiftContext) {
@@ -2889,6 +2921,13 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return QualType();
}
+ if (T->isFunctionType() && getLangOpts().OpenCL &&
+ !getOpenCLOptions().isAvailableOption("__cl_clang_function_pointers",
+ getLangOpts())) {
+ Diag(Loc, diag::err_opencl_function_pointer) << /*pointer*/ 0;
+ return QualType();
+ }
+
// Adjust the default free function calling convention to the default method
// calling convention.
bool IsCtorOrDtor =
@@ -3205,31 +3244,52 @@ InventTemplateParameter(TypeProcessingState &state, QualType T,
// extract its type constraints to attach to the template parameter.
AutoTypeLoc AutoLoc = TrailingTSI->getTypeLoc().getContainedAutoTypeLoc();
TemplateArgumentListInfo TAL(AutoLoc.getLAngleLoc(), AutoLoc.getRAngleLoc());
- for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx)
+ bool Invalid = false;
+ for (unsigned Idx = 0; Idx < AutoLoc.getNumArgs(); ++Idx) {
+ if (D.getEllipsisLoc().isInvalid() && !Invalid &&
+ S.DiagnoseUnexpandedParameterPack(AutoLoc.getArgLoc(Idx),
+ Sema::UPPC_TypeConstraint))
+ Invalid = true;
TAL.addArgument(AutoLoc.getArgLoc(Idx));
+ }
- S.AttachTypeConstraint(AutoLoc.getNestedNameSpecifierLoc(),
- AutoLoc.getConceptNameInfo(),
- AutoLoc.getNamedConcept(),
- AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr,
- InventedTemplateParam, D.getEllipsisLoc());
+ if (!Invalid) {
+ S.AttachTypeConstraint(
+ AutoLoc.getNestedNameSpecifierLoc(), AutoLoc.getConceptNameInfo(),
+ AutoLoc.getNamedConcept(),
+ AutoLoc.hasExplicitTemplateArgs() ? &TAL : nullptr,
+ InventedTemplateParam, D.getEllipsisLoc());
+ }
} else {
// The 'auto' appears in the decl-specifiers; we've not finished forming
// TypeSourceInfo for it yet.
TemplateIdAnnotation *TemplateId = D.getDeclSpec().getRepAsTemplateId();
TemplateArgumentListInfo TemplateArgsInfo;
+ bool Invalid = false;
if (TemplateId->LAngleLoc.isValid()) {
ASTTemplateArgsPtr TemplateArgsPtr(TemplateId->getTemplateArgs(),
TemplateId->NumArgs);
S.translateTemplateArguments(TemplateArgsPtr, TemplateArgsInfo);
+
+ if (D.getEllipsisLoc().isInvalid()) {
+ for (TemplateArgumentLoc Arg : TemplateArgsInfo.arguments()) {
+ if (S.DiagnoseUnexpandedParameterPack(Arg,
+ Sema::UPPC_TypeConstraint)) {
+ Invalid = true;
+ break;
+ }
+ }
+ }
+ }
+ if (!Invalid) {
+ S.AttachTypeConstraint(
+ D.getDeclSpec().getTypeSpecScope().getWithLocInContext(S.Context),
+ DeclarationNameInfo(DeclarationName(TemplateId->Name),
+ TemplateId->TemplateNameLoc),
+ cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl()),
+ TemplateId->LAngleLoc.isValid() ? &TemplateArgsInfo : nullptr,
+ InventedTemplateParam, D.getEllipsisLoc());
}
- S.AttachTypeConstraint(
- D.getDeclSpec().getTypeSpecScope().getWithLocInContext(S.Context),
- DeclarationNameInfo(DeclarationName(TemplateId->Name),
- TemplateId->TemplateNameLoc),
- cast<ConceptDecl>(TemplateId->Template.get().getAsTemplateDecl()),
- TemplateId->LAngleLoc.isValid() ? &TemplateArgsInfo : nullptr,
- InventedTemplateParam, D.getEllipsisLoc());
}
}
@@ -4994,7 +5054,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// FIXME: This really should be in BuildFunctionType.
if (T->isHalfType()) {
if (S.getLangOpts().OpenCL) {
- if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
+ if (!S.getOpenCLOptions().isAvailableOption("cl_khr_fp16",
+ S.getLangOpts())) {
S.Diag(D.getIdentifierLoc(), diag::err_opencl_invalid_return)
<< T << 0 /*pointer hint*/;
D.setInvalidType(true);
@@ -5019,7 +5080,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// (s6.9.e and s6.12.5 OpenCL v2.0) except for printf.
// We also allow here any toolchain reserved identifiers.
if (FTI.isVariadic &&
- !S.getOpenCLOptions().isEnabled("__cl_clang_variadic_functions") &&
+ !S.getOpenCLOptions().isAvailableOption(
+ "__cl_clang_variadic_functions", S.getLangOpts()) &&
!(D.getIdentifier() &&
((D.getIdentifier()->getName() == "printf" &&
(LangOpts.OpenCLCPlusPlus || LangOpts.OpenCLVersion >= 120)) ||
@@ -5214,7 +5276,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// Disallow half FP parameters.
// FIXME: This really should be in BuildFunctionType.
if (S.getLangOpts().OpenCL) {
- if (!S.getOpenCLOptions().isEnabled("cl_khr_fp16")) {
+ if (!S.getOpenCLOptions().isAvailableOption("cl_khr_fp16",
+ S.getLangOpts())) {
S.Diag(Param->getLocation(), diag::err_opencl_invalid_param)
<< ParamTy << 0;
D.setInvalidType();
@@ -5946,6 +6009,8 @@ namespace {
if (!DS.isConstrainedAuto())
return;
TemplateIdAnnotation *TemplateId = DS.getRepAsTemplateId();
+ if (!TemplateId)
+ return;
if (DS.getTypeSpecScope().isNotEmpty())
TL.setNestedNameSpecifierLoc(
DS.getTypeSpecScope().getWithLocInContext(Context));
@@ -6369,6 +6434,7 @@ static bool BuildAddressSpaceIndex(Sema &S, LangAS &ASIdx,
llvm::APSInt max(addrSpace.getBitWidth());
max =
Qualifiers::MaxAddressSpace - (unsigned)LangAS::FirstTargetAddressSpace;
+
if (addrSpace > max) {
S.Diag(AttrLoc, diag::err_attribute_address_space_too_high)
<< (unsigned)max.getZExtValue() << AddrSpace->getSourceRange();
@@ -6484,7 +6550,9 @@ static void HandleAddressSpaceTypeAttribute(QualType &Type,
Attr.setInvalid();
} else {
// The keyword-based type attributes imply which address space to use.
- ASIdx = Attr.asOpenCLLangAS();
+ ASIdx = S.getLangOpts().SYCLIsDevice ? Attr.asSYCLLangAS()
+ : Attr.asOpenCLLangAS();
+
if (ASIdx == LangAS::Default)
llvm_unreachable("Invalid address space");
@@ -7299,6 +7367,8 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<PascalAttr>(Ctx, Attr);
case ParsedAttr::AT_SwiftCall:
return createSimpleAttr<SwiftCallAttr>(Ctx, Attr);
+ case ParsedAttr::AT_SwiftAsyncCall:
+ return createSimpleAttr<SwiftAsyncCallAttr>(Ctx, Attr);
case ParsedAttr::AT_VectorCall:
return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
case ParsedAttr::AT_AArch64VectorPcs:
@@ -7959,8 +8029,6 @@ static void HandleLifetimeBoundAttr(TypeProcessingState &State,
CurType = State.getAttributedType(
createSimpleAttr<LifetimeBoundAttr>(State.getSema().Context, Attr),
CurType, CurType);
- } else {
- Attr.diagnoseAppertainsTo(State.getSema(), nullptr);
}
}
@@ -7998,7 +8066,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (attr.isInvalid())
continue;
- if (attr.isCXX11Attribute()) {
+ if (attr.isStandardAttributeSyntax()) {
// [[gnu::...]] attributes are treated as declaration attributes, so may
// not appertain to a DeclaratorChunk. If we handle them as type
// attributes, accept them in that position and diagnose the GCC
@@ -8027,8 +8095,8 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// otherwise, add it to the FnAttrs list for rechaining.
switch (attr.getKind()) {
default:
- // A C++11 attribute on a declarator chunk must appertain to a type.
- if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk) {
+ // A [[]] attribute on a declarator chunk must appertain to a type.
+ if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk) {
state.getSema().Diag(attr.getLoc(), diag::err_attribute_not_type_attr)
<< attr;
attr.setUsedAsTypeAttr();
@@ -8036,7 +8104,7 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case ParsedAttr::UnknownAttribute:
- if (attr.isCXX11Attribute() && TAL == TAL_DeclChunk)
+ if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk)
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
<< attr << attr.getRange();
@@ -8823,6 +8891,29 @@ QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
return Context.getTypeOfExprType(E);
}
+/// getDecltypeForParenthesizedExpr - Given an expr, will return the type for
+/// that expression, as in [dcl.type.simple]p4 but without taking id-expressions
+/// and class member access into account.
+QualType Sema::getDecltypeForParenthesizedExpr(Expr *E) {
+ // C++11 [dcl.type.simple]p4:
+ // [...]
+ QualType T = E->getType();
+ switch (E->getValueKind()) {
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // type of e;
+ case VK_XValue:
+ return Context.getRValueReferenceType(T);
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // type of e;
+ case VK_LValue:
+ return Context.getLValueReferenceType(T);
+ // - otherwise, decltype(e) is the type of e.
+ case VK_PRValue:
+ return T;
+ }
+ llvm_unreachable("Unknown value kind");
+}
+
/// getDecltypeForExpr - Given an expr, will return the decltype for
/// that expression, according to the rules in C++11
/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
@@ -8830,6 +8921,10 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
if (E->isTypeDependent())
return S.Context.DependentTy;
+ Expr *IDExpr = E;
+ if (auto *ImplCastExpr = dyn_cast<ImplicitCastExpr>(E))
+ IDExpr = ImplCastExpr->getSubExpr();
+
// C++11 [dcl.type.simple]p4:
// The type denoted by decltype(e) is defined as follows:
@@ -8840,7 +8935,7 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// Note that this does not pick up the implicit 'const' for a template
// parameter object. This rule makes no difference before C++20 so we apply
// it unconditionally.
- if (const auto *SNTTPE = dyn_cast<SubstNonTypeTemplateParmExpr>(E))
+ if (const auto *SNTTPE = dyn_cast<SubstNonTypeTemplateParmExpr>(IDExpr))
return SNTTPE->getParameterType(S.Context);
// - if e is an unparenthesized id-expression or an unparenthesized class
@@ -8849,21 +8944,22 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// functions, the program is ill-formed;
//
// We apply the same rules for Objective-C ivar and property references.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(IDExpr)) {
const ValueDecl *VD = DRE->getDecl();
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(VD))
return TPO->getType().getUnqualifiedType();
return VD->getType();
- } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(IDExpr)) {
if (const ValueDecl *VD = ME->getMemberDecl())
if (isa<FieldDecl>(VD) || isa<VarDecl>(VD))
return VD->getType();
- } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(E)) {
+ } else if (const ObjCIvarRefExpr *IR = dyn_cast<ObjCIvarRefExpr>(IDExpr)) {
return IR->getDecl()->getType();
- } else if (const ObjCPropertyRefExpr *PR = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ } else if (const ObjCPropertyRefExpr *PR =
+ dyn_cast<ObjCPropertyRefExpr>(IDExpr)) {
if (PR->isExplicitProperty())
return PR->getExplicitProperty()->getType();
- } else if (auto *PE = dyn_cast<PredefinedExpr>(E)) {
+ } else if (auto *PE = dyn_cast<PredefinedExpr>(IDExpr)) {
return PE->getType();
}
@@ -8876,8 +8972,8 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
// entity.
using namespace sema;
if (S.getCurLambda()) {
- if (isa<ParenExpr>(E)) {
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (isa<ParenExpr>(IDExpr)) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(IDExpr->IgnoreParens())) {
if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
if (!T.isNull())
@@ -8887,22 +8983,7 @@ static QualType getDecltypeForExpr(Sema &S, Expr *E) {
}
}
-
- // C++11 [dcl.type.simple]p4:
- // [...]
- QualType T = E->getType();
- switch (E->getValueKind()) {
- // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
- // type of e;
- case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
- // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
- // type of e;
- case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
- // - otherwise, decltype(e) is the type of e.
- case VK_RValue: break;
- }
-
- return T;
+ return S.getDecltypeForParenthesizedExpr(E);
}
QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc,
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index 0a596e50658b..70ba631dbfc6 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -1311,9 +1311,9 @@ public:
/// By default, performs semantic analysis to build the new statement.
/// Subclasses may override this routine to provide different behavior.
StmtResult RebuildAttributedStmt(SourceLocation AttrLoc,
- ArrayRef<const Attr*> Attrs,
+ ArrayRef<const Attr *> Attrs,
Stmt *SubStmt) {
- return SemaRef.ActOnAttributedStmt(AttrLoc, Attrs, SubStmt);
+ return SemaRef.BuildAttributedStmt(AttrLoc, Attrs, SubStmt);
}
/// Build a new "if" statement.
@@ -1546,6 +1546,14 @@ public:
return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
}
+ /// Build a new OpenMP Canonical loop.
+ ///
+ /// Ensures that the outermost loop in @p LoopStmt is wrapped by a
+ /// OMPCanonicalLoop.
+ StmtResult RebuildOMPCanonicalLoop(Stmt *LoopStmt) {
+ return getSema().ActOnOpenMPCanonicalLoop(LoopStmt);
+ }
+
/// Build a new OpenMP executable directive.
///
/// By default, performs semantic analysis to build the new statement.
@@ -1618,6 +1626,27 @@ public:
return getSema().ActOnOpenMPSimdlenClause(Len, StartLoc, LParenLoc, EndLoc);
}
+ OMPClause *RebuildOMPSizesClause(ArrayRef<Expr *> Sizes,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPSizesClause(Sizes, StartLoc, LParenLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'full' clause.
+ OMPClause *RebuildOMPFullClause(SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPFullClause(StartLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'partial' clause.
+ OMPClause *RebuildOMPPartialClause(Expr *Factor, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPPartialClause(Factor, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
/// Build a new OpenMP 'allocator' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -2155,6 +2184,78 @@ public:
LParenLoc, EndLoc);
}
+ /// Build a new OpenMP 'init' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
+ bool IsTarget, bool IsTargetSync,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPInitClause(InteropVar, PrefExprs, IsTarget,
+ IsTargetSync, StartLoc, LParenLoc,
+ VarLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'use' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc, SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPUseClause(InteropVar, StartLoc, LParenLoc,
+ VarLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'destroy' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation VarLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDestroyClause(InteropVar, StartLoc, LParenLoc,
+ VarLoc, EndLoc);
+ }
+
+ /// Build a new OpenMP 'novariants' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNovariantsClause(Expr *Condition,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNovariantsClause(Condition, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'nocontext' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPNocontextClause(Expr *Condition, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPNocontextClause(Condition, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
+ /// Build a new OpenMP 'filter' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPFilterClause(ThreadID, StartLoc, LParenLoc,
+ EndLoc);
+ }
+
/// Rebuild the operand to an Objective-C \@synchronized statement.
///
/// By default, performs semantic analysis to build the new statement.
@@ -2312,6 +2413,13 @@ public:
return SEHFinallyStmt::Create(getSema().getASTContext(), Loc, Block);
}
+ ExprResult RebuildSYCLUniqueStableNameExpr(SourceLocation OpLoc,
+ SourceLocation LParen,
+ SourceLocation RParen,
+ TypeSourceInfo *TSI) {
+ return getSema().BuildSYCLUniqueStableNameExpr(OpLoc, LParen, RParen, TSI);
+ }
+
/// Build a new predefined expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -3121,8 +3229,9 @@ public:
if (Constructor->isInheritingConstructor())
FoundCtor = Constructor->getInheritedConstructor().getConstructor();
- SmallVector<Expr*, 8> ConvertedArgs;
- if (getSema().CompleteConstructorCall(FoundCtor, Args, Loc, ConvertedArgs))
+ SmallVector<Expr *, 8> ConvertedArgs;
+ if (getSema().CompleteConstructorCall(FoundCtor, T, Args, Loc,
+ ConvertedArgs))
return ExprError();
return getSema().BuildCXXConstructExpr(Loc, T, Constructor,
@@ -3504,7 +3613,7 @@ public:
FunctionDecl *Builtin = cast<FunctionDecl>(Lookup.front());
Expr *Callee = new (SemaRef.Context)
DeclRefExpr(SemaRef.Context, Builtin, false,
- SemaRef.Context.BuiltinFnTy, VK_RValue, BuiltinLoc);
+ SemaRef.Context.BuiltinFnTy, VK_PRValue, BuiltinLoc);
QualType CalleePtrTy = SemaRef.Context.getPointerType(Builtin->getType());
Callee = SemaRef.ImpCastExprToType(Callee, CalleePtrTy,
CK_BuiltinFnToFnPtr).get();
@@ -3943,12 +4052,10 @@ Sema::ConditionResult TreeTransform<Derived>::TransformCondition(
return Sema::ConditionResult();
}
-template<typename Derived>
-NestedNameSpecifierLoc
-TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
- NestedNameSpecifierLoc NNS,
- QualType ObjectType,
- NamedDecl *FirstQualifierInScope) {
+template <typename Derived>
+NestedNameSpecifierLoc TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS, QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
Qualifier = Qualifier.getPrefix())
@@ -3962,28 +4069,26 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
switch (QNNS->getKind()) {
case NestedNameSpecifier::Identifier: {
Sema::NestedNameSpecInfo IdInfo(QNNS->getAsIdentifier(),
- Q.getLocalBeginLoc(), Q.getLocalEndLoc(), ObjectType);
+ Q.getLocalBeginLoc(), Q.getLocalEndLoc(),
+ ObjectType);
if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/nullptr, IdInfo, false,
SS, FirstQualifierInScope, false))
return NestedNameSpecifierLoc();
- }
break;
+ }
case NestedNameSpecifier::Namespace: {
- NamespaceDecl *NS
- = cast_or_null<NamespaceDecl>(
- getDerived().TransformDecl(
- Q.getLocalBeginLoc(),
- QNNS->getAsNamespace()));
+ NamespaceDecl *NS =
+ cast_or_null<NamespaceDecl>(getDerived().TransformDecl(
+ Q.getLocalBeginLoc(), QNNS->getAsNamespace()));
SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
break;
}
case NestedNameSpecifier::NamespaceAlias: {
- NamespaceAliasDecl *Alias
- = cast_or_null<NamespaceAliasDecl>(
- getDerived().TransformDecl(Q.getLocalBeginLoc(),
- QNNS->getAsNamespaceAlias()));
+ NamespaceAliasDecl *Alias =
+ cast_or_null<NamespaceAliasDecl>(getDerived().TransformDecl(
+ Q.getLocalBeginLoc(), QNNS->getAsNamespaceAlias()));
SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(),
Q.getLocalEndLoc());
break;
@@ -4019,7 +4124,7 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
if (TL.getType()->isEnumeralType())
SemaRef.Diag(TL.getBeginLoc(),
diag::warn_cxx98_compat_enum_nested_name_spec);
- SS.Extend(SemaRef.Context, /*FIXME:*/SourceLocation(), TL,
+ SS.Extend(SemaRef.Context, /*FIXME:*/ SourceLocation(), TL,
Q.getLocalEndLoc());
break;
}
@@ -4028,7 +4133,7 @@ TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
TypedefTypeLoc TTL = TL.getAs<TypedefTypeLoc>();
if (!TTL || !TTL.getTypedefNameDecl()->isInvalidDecl()) {
SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
- << TL.getType() << SS.getRange();
+ << TL.getType() << SS.getRange();
}
return NestedNameSpecifierLoc();
}
@@ -4217,10 +4322,10 @@ void TreeTransform<Derived>::InventTemplateArgumentLoc(
Arg, QualType(), getDerived().getBaseLocation());
}
-template<typename Derived>
+template <typename Derived>
bool TreeTransform<Derived>::TransformTemplateArgument(
- const TemplateArgumentLoc &Input,
- TemplateArgumentLoc &Output, bool Uneval) {
+ const TemplateArgumentLoc &Input, TemplateArgumentLoc &Output,
+ bool Uneval) {
const TemplateArgument &Arg = Input.getArgument();
switch (Arg.getKind()) {
case TemplateArgument::Null:
@@ -4269,7 +4374,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
DI = InventTypeSourceInfo(Input.getArgument().getAsType());
DI = getDerived().TransformType(DI);
- if (!DI) return true;
+ if (!DI)
+ return true;
Output = TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
return false;
@@ -4285,9 +4391,8 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
CXXScopeSpec SS;
SS.Adopt(QualifierLoc);
- TemplateName Template
- = getDerived().TransformTemplateName(SS, Arg.getAsTemplate(),
- Input.getTemplateNameLoc());
+ TemplateName Template = getDerived().TransformTemplateName(
+ SS, Arg.getAsTemplate(), Input.getTemplateNameLoc());
if (Template.isNull())
return true;
@@ -4309,11 +4414,13 @@ bool TreeTransform<Derived>::TransformTemplateArgument(
Sema::ExpressionEvaluationContextRecord::EK_TemplateArgument);
Expr *InputExpr = Input.getSourceExpression();
- if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
+ if (!InputExpr)
+ InputExpr = Input.getArgument().getAsExpr();
ExprResult E = getDerived().TransformExpr(InputExpr);
E = SemaRef.ActOnConstantExpression(E);
- if (E.isInvalid()) return true;
+ if (E.isInvalid())
+ return true;
Output = TemplateArgumentLoc(TemplateArgument(E.get()), E.get());
return false;
}
@@ -5723,8 +5830,8 @@ TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
return getDerived().TransformFunctionProtoType(
TLB, TL, nullptr, Qualifiers(),
[&](FunctionProtoType::ExceptionSpecInfo &ESI, bool &Changed) {
- return This->TransformExceptionSpec(TL.getBeginLoc(), ESI,
- ExceptionStorage, Changed);
+ return This->getDerived().TransformExceptionSpec(
+ TL.getBeginLoc(), ESI, ExceptionStorage, Changed);
});
}
@@ -6427,11 +6534,9 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
ConceptDecl *NewCD = nullptr;
TemplateArgumentListInfo NewTemplateArgs;
NestedNameSpecifierLoc NewNestedNameSpec;
- if (TL.getTypePtr()->isConstrained()) {
- NewCD = cast_or_null<ConceptDecl>(
- getDerived().TransformDecl(
- TL.getConceptNameLoc(),
- TL.getTypePtr()->getTypeConstraintConcept()));
+ if (T->isConstrained()) {
+ NewCD = cast_or_null<ConceptDecl>(getDerived().TransformDecl(
+ TL.getConceptNameLoc(), T->getTypeConstraintConcept()));
NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
@@ -6453,7 +6558,8 @@ QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
QualType Result = TL.getType();
if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced ||
- T->isDependentType()) {
+ T->isDependentType() || T->isConstrained()) {
+ // FIXME: Maybe don't rebuild if all template arguments are the same.
llvm::SmallVector<TemplateArgument, 4> NewArgList;
NewArgList.reserve(NewArgList.size());
for (const auto &ArgLoc : NewTemplateArgs.arguments())
@@ -7237,7 +7343,8 @@ TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S,
for (const auto *I : S->getAttrs()) {
const Attr *R = getDerived().TransformAttr(I);
AttrsChanged |= (I != R);
- Attrs.push_back(R);
+ if (R)
+ Attrs.push_back(R);
}
StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt(), SDK);
@@ -7247,6 +7354,11 @@ TreeTransform<Derived>::TransformAttributedStmt(AttributedStmt *S,
if (SubStmt.get() == S->getSubStmt() && !AttrsChanged)
return S;
+ // If transforming the attributes failed for all of the attributes in the
+ // statement, don't make an AttributedStmt without attributes.
+ if (Attrs.empty())
+ return SubStmt;
+
return getDerived().RebuildAttributedStmt(S->getAttrLoc(), Attrs,
SubStmt.get());
}
@@ -8314,6 +8426,16 @@ TreeTransform<Derived>::TransformSEHLeaveStmt(SEHLeaveStmt *S) {
//===----------------------------------------------------------------------===//
// OpenMP directive transformation
//===----------------------------------------------------------------------===//
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPCanonicalLoop(OMPCanonicalLoop *L) {
+ // OMPCanonicalLoops are eliminated during transformation, since they will be
+ // recomputed by semantic analysis of the associated OMPLoopBasedDirective
+ // after transformation.
+ return getDerived().TransformStmt(L->getLoopStmt());
+}
+
template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
OMPExecutableDirective *D) {
@@ -8348,8 +8470,11 @@ StmtResult TreeTransform<Derived>::TransformOMPExecutableDirective(
D->getDirectiveKind() == OMPD_master)
CS = D->getAssociatedStmt();
else
- CS = D->getInnermostCapturedStmt()->getCapturedStmt();
+ CS = D->getRawStmt();
Body = getDerived().TransformStmt(CS);
+ if (Body.isUsable() && isOpenMPLoopDirective(D->getDirectiveKind()) &&
+ getSema().getLangOpts().OpenMPIRBuilder)
+ Body = getDerived().RebuildOMPCanonicalLoop(Body.get());
}
AssociatedStmt =
getDerived().getSema().ActOnOpenMPRegionEnd(Body, TClauses);
@@ -8403,6 +8528,28 @@ TreeTransform<Derived>::TransformOMPSimdDirective(OMPSimdDirective *D) {
template <typename Derived>
StmtResult
+TreeTransform<Derived>::TransformOMPTileDirective(OMPTileDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(D->getDirectiveKind(), DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPUnrollDirective(OMPUnrollDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(D->getDirectiveKind(), DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
TreeTransform<Derived>::TransformOMPForDirective(OMPForDirective *D) {
DeclarationNameInfo DirName;
getDerived().getSema().StartOpenMPDSABlock(OMPD_for, DirName, nullptr,
@@ -8980,6 +9127,38 @@ TreeTransform<Derived>::TransformOMPTargetTeamsDistributeSimdDirective(
return Res;
}
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPInteropDirective(OMPInteropDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_interop, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPDispatchDirective(OMPDispatchDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_dispatch, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPMaskedDirective(OMPMaskedDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_masked, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
@@ -9044,6 +9223,53 @@ TreeTransform<Derived>::TransformOMPSimdlenClause(OMPSimdlenClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPSizesClause(OMPSizesClause *C) {
+ SmallVector<Expr *, 4> TransformedSizes;
+ TransformedSizes.reserve(C->getNumSizes());
+ bool Changed = false;
+ for (Expr *E : C->getSizesRefs()) {
+ if (!E) {
+ TransformedSizes.push_back(nullptr);
+ continue;
+ }
+
+ ExprResult T = getDerived().TransformExpr(E);
+ if (T.isInvalid())
+ return nullptr;
+ if (E != T.get())
+ Changed = true;
+ TransformedSizes.push_back(T.get());
+ }
+
+ if (!Changed && !getDerived().AlwaysRebuild())
+ return C;
+ return RebuildOMPSizesClause(TransformedSizes, C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPFullClause(OMPFullClause *C) {
+ if (!getDerived().AlwaysRebuild())
+ return C;
+ return RebuildOMPFullClause(C->getBeginLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPPartialClause(OMPPartialClause *C) {
+ ExprResult T = getDerived().TransformExpr(C->getFactor());
+ if (T.isInvalid())
+ return nullptr;
+ Expr *Factor = T.get();
+ bool Changed = Factor != C->getFactor();
+
+ if (!Changed && !getDerived().AlwaysRebuild())
+ return C;
+ return RebuildOMPPartialClause(Factor, C->getBeginLoc(), C->getLParenLoc(),
+ C->getEndLoc());
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPCollapseClause(OMPCollapseClause *C) {
ExprResult E = getDerived().TransformExpr(C->getNumForLoops());
@@ -9211,10 +9437,76 @@ TreeTransform<Derived>::TransformOMPNogroupClause(OMPNogroupClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPInitClause(OMPInitClause *C) {
+ ExprResult IVR = getDerived().TransformExpr(C->getInteropVar());
+ if (IVR.isInvalid())
+ return nullptr;
+
+ llvm::SmallVector<Expr *, 8> PrefExprs;
+ PrefExprs.reserve(C->varlist_size() - 1);
+ for (Expr *E : llvm::drop_begin(C->varlists())) {
+ ExprResult ER = getDerived().TransformExpr(cast<Expr>(E));
+ if (ER.isInvalid())
+ return nullptr;
+ PrefExprs.push_back(ER.get());
+ }
+ return getDerived().RebuildOMPInitClause(
+ IVR.get(), PrefExprs, C->getIsTarget(), C->getIsTargetSync(),
+ C->getBeginLoc(), C->getLParenLoc(), C->getVarLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPUseClause(OMPUseClause *C) {
+ ExprResult ER = getDerived().TransformExpr(C->getInteropVar());
+ if (ER.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPUseClause(ER.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getVarLoc(),
+ C->getEndLoc());
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPDestroyClause(OMPDestroyClause *C) {
- // No need to rebuild this clause, no template-dependent parameters.
- return C;
+ ExprResult ER;
+ if (Expr *IV = C->getInteropVar()) {
+ ER = getDerived().TransformExpr(IV);
+ if (ER.isInvalid())
+ return nullptr;
+ }
+ return getDerived().RebuildOMPDestroyClause(ER.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getVarLoc(),
+ C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNovariantsClause(OMPNovariantsClause *C) {
+ ExprResult Cond = getDerived().TransformExpr(C->getCondition());
+ if (Cond.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNovariantsClause(
+ Cond.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPNocontextClause(OMPNocontextClause *C) {
+ ExprResult Cond = getDerived().TransformExpr(C->getCondition());
+ if (Cond.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPNocontextClause(
+ Cond.get(), C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+}
+
+template <typename Derived>
+OMPClause *
+TreeTransform<Derived>::TransformOMPFilterClause(OMPFilterClause *C) {
+ ExprResult ThreadID = getDerived().TransformExpr(C->getThreadID());
+ if (ThreadID.isInvalid())
+ return nullptr;
+ return getDerived().RebuildOMPFilterClause(ThreadID.get(), C->getBeginLoc(),
+ C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -9936,6 +10228,24 @@ TreeTransform<Derived>::TransformConstantExpr(ConstantExpr *E) {
return TransformExpr(E->getSubExpr());
}
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformSYCLUniqueStableNameExpr(
+ SYCLUniqueStableNameExpr *E) {
+ if (!E->isTypeDependent())
+ return E;
+
+ TypeSourceInfo *NewT = getDerived().TransformType(E->getTypeSourceInfo());
+
+ if (!NewT)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && E->getTypeSourceInfo() == NewT)
+ return E;
+
+ return getDerived().RebuildSYCLUniqueStableNameExpr(
+ E->getLocation(), E->getLParenLocation(), E->getRParenLocation(), NewT);
+}
+
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
@@ -11325,15 +11635,20 @@ TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
TInfo, E->getEndLoc());
}
- // We don't know whether the subexpression is potentially evaluated until
- // after we perform semantic analysis. We speculatively assume it is
- // unevaluated; it will get fixed later if the subexpression is in fact
- // potentially evaluated.
- EnterExpressionEvaluationContext Unevaluated(
- SemaRef, Sema::ExpressionEvaluationContext::Unevaluated,
- Sema::ReuseLambdaContextDecl);
+ // Typeid's operand is an unevaluated context, unless it's a polymorphic
+ // type. We must not unilaterally enter unevaluated context here, as then
+ // semantic processing can re-transform an already transformed operand.
+ Expr *Op = E->getExprOperand();
+ auto EvalCtx = Sema::ExpressionEvaluationContext::Unevaluated;
+ if (E->isGLValue())
+ if (auto *RecordT = Op->getType()->getAs<RecordType>())
+ if (cast<CXXRecordDecl>(RecordT->getDecl())->isPolymorphic())
+ EvalCtx = SemaRef.ExprEvalContexts.back().Context;
- ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, EvalCtx,
+ Sema::ReuseLambdaContextDecl);
+
+ ExprResult SubExpr = getDerived().TransformExpr(Op);
if (SubExpr.isInvalid())
return ExprError();
@@ -12184,15 +12499,13 @@ ExprResult TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
nullptr);
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
- DependentScopeDeclRefExpr *E,
- bool IsAddressOfOperand,
- TypeSourceInfo **RecoveryTSI) {
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *E, bool IsAddressOfOperand,
+ TypeSourceInfo **RecoveryTSI) {
assert(E->getQualifierLoc());
- NestedNameSpecifierLoc QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ NestedNameSpecifierLoc QualifierLoc =
+ getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
if (!QualifierLoc)
return ExprError();
SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
@@ -12201,14 +12514,13 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
// destination type name (if present) resolves the same way after
// instantiation as it did in the local scope.
- DeclarationNameInfo NameInfo
- = getDerived().TransformDeclarationNameInfo(E->getNameInfo());
+ DeclarationNameInfo NameInfo =
+ getDerived().TransformDeclarationNameInfo(E->getNameInfo());
if (!NameInfo.getName())
return ExprError();
if (!E->hasExplicitTemplateArgs()) {
- if (!getDerived().AlwaysRebuild() &&
- QualifierLoc == E->getQualifierLoc() &&
+ if (!getDerived().AlwaysRebuild() && QualifierLoc == E->getQualifierLoc() &&
// Note: it is sufficient to compare the Name component of NameInfo:
// if name has not changed, DNLoc has not changed either.
NameInfo.getName() == E->getDeclName())
@@ -12220,9 +12532,8 @@ TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
}
TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
- if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
- E->getNumTemplateArgs(),
- TransArgs))
+ if (getDerived().TransformTemplateArguments(
+ E->getTemplateArgs(), E->getNumTemplateArgs(), TransArgs))
return ExprError();
return getDerived().RebuildDependentScopeDeclRefExpr(
@@ -12241,7 +12552,8 @@ TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
(E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1)))) &&
(!getDerived().DropCallArgument(E->getArg(0))) &&
!E->isListInitialization()))
- return getDerived().TransformExpr(E->getArg(0));
+ return getDerived().TransformInitializer(E->getArg(0),
+ /*DirectInit*/ false);
TemporaryBase Rebase(*this, /*FIXME*/ E->getBeginLoc(), DeclarationName());
@@ -12504,10 +12816,11 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
E->getCaptureDefault());
getDerived().transformedLocalDecl(OldClass, {Class});
- Optional<std::tuple<unsigned, bool, Decl *>> Mangling;
+ Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling;
if (getDerived().ReplacingOriginal())
- Mangling = std::make_tuple(OldClass->getLambdaManglingNumber(),
- OldClass->hasKnownLambdaInternalLinkage(),
+ Mangling = std::make_tuple(OldClass->hasKnownLambdaInternalLinkage(),
+ OldClass->getLambdaManglingNumber(),
+ OldClass->getDeviceLambdaManglingNumber(),
OldClass->getLambdaContextDecl());
// Build the call operator.
@@ -12868,18 +13181,18 @@ TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
&TransArgs);
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old) {
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformUnresolvedMemberExpr(
+ UnresolvedMemberExpr *Old) {
// Transform the base of the expression.
- ExprResult Base((Expr*) nullptr);
+ ExprResult Base((Expr *)nullptr);
QualType BaseType;
if (!Old->isImplicitAccess()) {
Base = getDerived().TransformExpr(Old->getBase());
if (Base.isInvalid())
return ExprError();
- Base = getSema().PerformMemberExprBaseConversion(Base.get(),
- Old->isArrow());
+ Base =
+ getSema().PerformMemberExprBaseConversion(Base.get(), Old->isArrow());
if (Base.isInvalid())
return ExprError();
BaseType = Base.get()->getType();
@@ -12889,27 +13202,24 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
NestedNameSpecifierLoc QualifierLoc;
if (Old->getQualifierLoc()) {
- QualifierLoc
- = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
+ QualifierLoc =
+ getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
if (!QualifierLoc)
return ExprError();
}
SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
- LookupResult R(SemaRef, Old->getMemberNameInfo(),
- Sema::LookupOrdinaryName);
+ LookupResult R(SemaRef, Old->getMemberNameInfo(), Sema::LookupOrdinaryName);
// Transform the declaration set.
- if (TransformOverloadExprDecls(Old, /*RequiresADL*/false, R))
+ if (TransformOverloadExprDecls(Old, /*RequiresADL*/ false, R))
return ExprError();
// Determine the naming class.
if (Old->getNamingClass()) {
- CXXRecordDecl *NamingClass
- = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
- Old->getMemberLoc(),
- Old->getNamingClass()));
+ CXXRecordDecl *NamingClass = cast_or_null<CXXRecordDecl>(
+ getDerived().TransformDecl(Old->getMemberLoc(), Old->getNamingClass()));
if (!NamingClass)
return ExprError();
@@ -12920,9 +13230,8 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
if (Old->hasExplicitTemplateArgs()) {
TransArgs.setLAngleLoc(Old->getLAngleLoc());
TransArgs.setRAngleLoc(Old->getRAngleLoc());
- if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
- Old->getNumTemplateArgs(),
- TransArgs))
+ if (getDerived().TransformTemplateArguments(
+ Old->getTemplateArgs(), Old->getNumTemplateArgs(), TransArgs))
return ExprError();
}
@@ -12932,16 +13241,10 @@ TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old)
// nested-name-qualifier (and therefore could do the lookup).
NamedDecl *FirstQualifierInScope = nullptr;
- return getDerived().RebuildUnresolvedMemberExpr(Base.get(),
- BaseType,
- Old->getOperatorLoc(),
- Old->isArrow(),
- QualifierLoc,
- TemplateKWLoc,
- FirstQualifierInScope,
- R,
- (Old->hasExplicitTemplateArgs()
- ? &TransArgs : nullptr));
+ return getDerived().RebuildUnresolvedMemberExpr(
+ Base.get(), BaseType, Old->getOperatorLoc(), Old->isArrow(), QualifierLoc,
+ TemplateKWLoc, FirstQualifierInScope, R,
+ (Old->hasExplicitTemplateArgs() ? &TransArgs : nullptr));
}
template<typename Derived>
@@ -13014,7 +13317,7 @@ TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
auto *VD = cast<ValueDecl>(Pack);
ExprResult DRE = getSema().BuildDeclRefExpr(
VD, VD->getType().getNonLValueExprType(getSema().Context),
- VD->getType()->isReferenceType() ? VK_LValue : VK_RValue,
+ VD->getType()->isReferenceType() ? VK_LValue : VK_PRValue,
E->getPackLoc());
if (DRE.isInvalid())
return ExprError();
@@ -13820,7 +14123,14 @@ TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) {
- llvm_unreachable("Cannot transform asType expressions yet");
+ ExprResult SrcExpr = getDerived().TransformExpr(E->getSrcExpr());
+ if (SrcExpr.isInvalid())
+ return ExprError();
+
+ QualType Type = getDerived().TransformType(E->getType());
+
+ return SemaRef.BuildAsTypeExpr(SrcExpr.get(), Type, E->getBuiltinLoc(),
+ E->getRParenLoc());
}
template<typename Derived>
@@ -14113,7 +14423,11 @@ QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(SourceLocation Loc,
// A valid resolved using typename decl points to exactly one type decl.
assert(++Using->shadow_begin() == Using->shadow_end());
- Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl());
+
+ NamedDecl *Target = Using->shadow_begin()->getTargetDecl();
+ if (SemaRef.DiagnoseUseOfDecl(Target, Loc))
+ return QualType();
+ Ty = cast<TypeDecl>(Target);
} else {
assert(isa<UnresolvedUsingTypenameDecl>(D) &&
"UnresolvedUsingTypenameDecl transformed to non-using decl");
@@ -14334,11 +14648,9 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
SourceLocation RBrace;
if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
- DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
- LBrace = SourceLocation::getFromRawEncoding(
- NameLoc.CXXOperatorName.BeginOpNameLoc);
- RBrace = SourceLocation::getFromRawEncoding(
- NameLoc.CXXOperatorName.EndOpNameLoc);
+ DeclarationNameLoc NameLoc = DRE->getNameInfo().getInfo();
+ LBrace = NameLoc.getCXXOperatorNameBeginLoc();
+ RBrace = NameLoc.getCXXOperatorNameEndLoc();
} else {
LBrace = Callee->getBeginLoc();
RBrace = OpLoc;
diff --git a/clang/lib/Serialization/ASTCommon.cpp b/clang/lib/Serialization/ASTCommon.cpp
index fec6dd5cf17d..5fe1f96327dd 100644
--- a/clang/lib/Serialization/ASTCommon.cpp
+++ b/clang/lib/Serialization/ASTCommon.cpp
@@ -242,6 +242,11 @@ serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
ID = PREDEF_TYPE_##Id##_ID; \
break;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case BuiltinType::Id: \
+ ID = PREDEF_TYPE_##Id##_ID; \
+ break;
+#include "clang/Basic/RISCVVTypes.def"
case BuiltinType::BuiltinFn:
ID = PREDEF_TYPE_BUILTIN_FN;
break;
@@ -389,6 +394,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::NonTypeTemplateParm:
case Decl::TemplateTemplateParm:
case Decl::Using:
+ case Decl::UsingEnum:
case Decl::UsingPack:
case Decl::ObjCMethod:
case Decl::ObjCCategory:
@@ -422,6 +428,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Concept:
case Decl::LifetimeExtendedTemporary:
case Decl::RequiresExprBody:
+ case Decl::UnresolvedUsingIfExists:
return false;
// These indirectly derive from Redeclarable<T> but are not actually
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index 1f68f6bc3e38..83bade9941b3 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -114,6 +114,7 @@
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -782,9 +783,11 @@ static bool checkHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
StringRef SpecificModuleCachePath,
StringRef ExistingModuleCachePath,
DiagnosticsEngine *Diags,
- const LangOptions &LangOpts) {
+ const LangOptions &LangOpts,
+ const PreprocessorOptions &PPOpts) {
if (LangOpts.Modules) {
- if (SpecificModuleCachePath != ExistingModuleCachePath) {
+ if (SpecificModuleCachePath != ExistingModuleCachePath &&
+ !PPOpts.AllowPCHWithDifferentModulesCachePath) {
if (Diags)
Diags->Report(diag::err_pch_modulecache_mismatch)
<< SpecificModuleCachePath << ExistingModuleCachePath;
@@ -801,7 +804,7 @@ bool PCHValidator::ReadHeaderSearchOptions(const HeaderSearchOptions &HSOpts,
return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
PP.getHeaderSearchInfo().getModuleCachePath(),
Complain ? &Reader.Diags : nullptr,
- PP.getLangOpts());
+ PP.getLangOpts(), PP.getPreprocessorOpts());
}
void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) {
@@ -812,6 +815,31 @@ void PCHValidator::ReadCounter(const ModuleFile &M, unsigned Value) {
// AST reader implementation
//===----------------------------------------------------------------------===//
+static uint64_t readULEB(const unsigned char *&P) {
+ unsigned Length = 0;
+ const char *Error = nullptr;
+
+ uint64_t Val = llvm::decodeULEB128(P, &Length, nullptr, &Error);
+ if (Error)
+ llvm::report_fatal_error(Error);
+ P += Length;
+ return Val;
+}
+
+/// Read ULEB-encoded key length and data length.
+static std::pair<unsigned, unsigned>
+readULEBKeyDataLength(const unsigned char *&P) {
+ unsigned KeyLen = readULEB(P);
+ if ((unsigned)KeyLen != KeyLen)
+ llvm::report_fatal_error("key too large");
+
+ unsigned DataLen = readULEB(P);
+ if ((unsigned)DataLen != DataLen)
+ llvm::report_fatal_error("data too large");
+
+ return std::make_pair(KeyLen, DataLen);
+}
+
void ASTReader::setDeserializationListener(ASTDeserializationListener *Listener,
bool TakeOwnership) {
DeserializationListener = Listener;
@@ -824,11 +852,7 @@ unsigned ASTSelectorLookupTrait::ComputeHash(Selector Sel) {
std::pair<unsigned, unsigned>
ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
- return std::make_pair(KeyLen, DataLen);
+ return readULEBKeyDataLength(d);
}
ASTSelectorLookupTrait::internal_key_type
@@ -894,11 +918,7 @@ unsigned ASTIdentifierLookupTraitBase::ComputeHash(const internal_key_type& a) {
std::pair<unsigned, unsigned>
ASTIdentifierLookupTraitBase::ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
- return std::make_pair(KeyLen, DataLen);
+ return readULEBKeyDataLength(d);
}
ASTIdentifierLookupTraitBase::internal_key_type
@@ -1086,11 +1106,7 @@ ASTDeclContextNameLookupTrait::ReadFileRef(const unsigned char *&d) {
std::pair<unsigned, unsigned>
ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char *&d) {
- using namespace llvm::support;
-
- unsigned KeyLen = endian::readNext<uint16_t, little, unaligned>(d);
- unsigned DataLen = endian::readNext<uint16_t, little, unaligned>(d);
- return std::make_pair(KeyLen, DataLen);
+ return readULEBKeyDataLength(d);
}
ASTDeclContextNameLookupTrait::internal_key_type
@@ -1466,7 +1482,7 @@ bool ASTReader::ReadSLocEntry(int ID) {
}
BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
- unsigned BaseOffset = F->SLocEntryBaseOffset;
+ SourceLocation::UIntTy BaseOffset = F->SLocEntryBaseOffset;
++NumSLocEntriesRead;
Expected<llvm::BitstreamEntry> MaybeEntry = SLocEntryCursor.advance();
@@ -1847,11 +1863,7 @@ bool HeaderFileInfoTrait::EqualKey(internal_key_ref a, internal_key_ref b) {
std::pair<unsigned, unsigned>
HeaderFileInfoTrait::ReadKeyDataLength(const unsigned char*& d) {
- using namespace llvm::support;
-
- unsigned KeyLen = (unsigned) endian::readNext<uint16_t, little, unaligned>(d);
- unsigned DataLen = (unsigned) *d++;
- return std::make_pair(KeyLen, DataLen);
+ return readULEBKeyDataLength(d);
}
HeaderFileInfoTrait::internal_key_type
@@ -1911,11 +1923,9 @@ HeaderFileInfoTrait::ReadData(internal_key_ref key, const unsigned char *d,
std::string Filename = std::string(key.Filename);
if (key.Imported)
Reader.ResolveImportedPath(M, Filename);
- // FIXME: This is not always the right filename-as-written, but we're not
- // going to use this information to rebuild the module, so it doesn't make
- // a lot of difference.
- Module::Header H = {std::string(key.Filename),
- *FileMgr.getOptionalFileRef(Filename)};
+ // FIXME: NameAsWritten
+ Module::Header H = {std::string(key.Filename), "",
+ *FileMgr.getFile(Filename)};
ModMap.addHeader(Mod, H, HeaderRole, /*Imported*/true);
HFI.isModuleHeader |= !(HeaderRole & ModuleMap::TextualHeader);
}
@@ -2750,9 +2760,17 @@ ASTReader::ReadControlBlock(ModuleFile &F,
}
bool hasErrors = Record[6];
- if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
- Diag(diag::err_pch_with_compiler_errors);
- return HadErrors;
+ if (hasErrors && !DisableValidation) {
+ // If requested by the caller and the module hasn't already been read
+ // or compiled, mark modules on error as out-of-date.
+ if ((ClientLoadCapabilities & ARR_TreatModuleWithErrorsAsOutOfDate) &&
+ canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
+ return OutOfDate;
+
+ if (!AllowASTWithCompilerErrors) {
+ Diag(diag::err_pch_with_compiler_errors);
+ return HadErrors;
+ }
}
if (hasErrors) {
Diags.ErrorOccurred = true;
@@ -2832,9 +2850,14 @@ ASTReader::ReadControlBlock(ModuleFile &F,
StoredSignature, Capabilities);
// If we diagnosed a problem, produce a backtrace.
- if (isDiagnosedResult(Result, Capabilities))
+ bool recompilingFinalized =
+ Result == OutOfDate && (Capabilities & ARR_OutOfDate) &&
+ getModuleManager().getModuleCache().isPCMFinal(F.FileName);
+ if (isDiagnosedResult(Result, Capabilities) || recompilingFinalized)
Diag(diag::note_module_file_imported_by)
<< F.FileName << !F.ModuleName.empty() << F.ModuleName;
+ if (recompilingFinalized)
+ Diag(diag::note_module_file_conflict);
switch (Result) {
case Failure: return Failure;
@@ -2900,7 +2923,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
F.Kind != MK_ExplicitModule && F.Kind != MK_PrebuiltModule) {
auto BuildDir = PP.getFileManager().getDirectory(Blob);
if (!BuildDir || *BuildDir != M->Directory) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_imported_module_relocated)
<< F.ModuleName << Blob << M->Directory->getName();
return OutOfDate;
@@ -3181,12 +3204,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
}
case IDENTIFIER_TABLE:
- F.IdentifierTableData = Blob.data();
+ F.IdentifierTableData =
+ reinterpret_cast<const unsigned char *>(Blob.data());
if (Record[0]) {
F.IdentifierLookupTable = ASTIdentifierLookupTable::Create(
- (const unsigned char *)F.IdentifierTableData + Record[0],
- (const unsigned char *)F.IdentifierTableData + sizeof(uint32_t),
- (const unsigned char *)F.IdentifierTableData,
+ F.IdentifierTableData + Record[0],
+ F.IdentifierTableData + sizeof(uint32_t),
+ F.IdentifierTableData,
ASTIdentifierLookupTrait(*this, F));
PP.getIdentifierTable().setExternalIdentifierLookup(this);
@@ -3385,7 +3409,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SOURCE_LOCATION_OFFSETS: {
F.SLocEntryOffsets = (const uint32_t *)Blob.data();
F.LocalNumSLocEntries = Record[0];
- unsigned SLocSpaceSize = Record[1];
+ SourceLocation::UIntTy SLocSpaceSize = Record[1];
F.SLocEntryOffsetsBase = Record[2] + F.SourceManagerBlockStartOffset;
std::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
@@ -3403,7 +3427,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
F.FirstLoc = SourceLocation::getFromRawEncoding(F.SLocEntryBaseOffset);
// SLocEntryBaseOffset is lower than MaxLoadedOffset and decreasing.
- assert((F.SLocEntryBaseOffset & (1U << 31U)) == 0);
+ assert((F.SLocEntryBaseOffset & SourceLocation::MacroIDBit) == 0);
GlobalSLocOffsetMap.insert(
std::make_pair(SourceManager::MaxLoadedOffset - F.SLocEntryBaseOffset
- SLocSpaceSize,&F));
@@ -3412,8 +3436,8 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
// Invalid stays invalid.
F.SLocRemap.insertOrReplace(std::make_pair(0U, 0));
// This module. Base was 2 when being compiled.
- F.SLocRemap.insertOrReplace(std::make_pair(2U,
- static_cast<int>(F.SLocEntryBaseOffset - 2)));
+ F.SLocRemap.insertOrReplace(std::make_pair(
+ 2U, static_cast<SourceLocation::IntTy>(F.SLocEntryBaseOffset - 2)));
TotalNumSLocEntries += F.LocalNumSLocEntries;
break;
@@ -3611,36 +3635,13 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
auto &OptInfo = OpenCLExtensions.OptMap[Name];
OptInfo.Supported = Record[I++] != 0;
OptInfo.Enabled = Record[I++] != 0;
+ OptInfo.WithPragma = Record[I++] != 0;
OptInfo.Avail = Record[I++];
OptInfo.Core = Record[I++];
OptInfo.Opt = Record[I++];
}
break;
- case OPENCL_EXTENSION_TYPES:
- for (unsigned I = 0, E = Record.size(); I != E;) {
- auto TypeID = static_cast<::TypeID>(Record[I++]);
- auto *Type = GetType(TypeID).getTypePtr();
- auto NumExt = static_cast<unsigned>(Record[I++]);
- for (unsigned II = 0; II != NumExt; ++II) {
- auto Ext = ReadString(Record, I);
- OpenCLTypeExtMap[Type].insert(Ext);
- }
- }
- break;
-
- case OPENCL_EXTENSION_DECLS:
- for (unsigned I = 0, E = Record.size(); I != E;) {
- auto DeclID = static_cast<::DeclID>(Record[I++]);
- auto *Decl = GetDecl(DeclID);
- auto NumExt = static_cast<unsigned>(Record[I++]);
- for (unsigned II = 0; II != NumExt; ++II) {
- auto Ext = ReadString(Record, I);
- OpenCLDeclExtMap[Decl].insert(Ext);
- }
- }
- break;
-
case TENTATIVE_DEFINITIONS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
@@ -3816,7 +3817,7 @@ ASTReader::ReadASTBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case DECLS_TO_CHECK_FOR_DEFERRED_DIAGS:
for (unsigned I = 0, N = Record.size(); I != N; ++I)
- DeclsToCheckForDeferredDiags.push_back(getGlobalDeclID(F, Record[I]));
+ DeclsToCheckForDeferredDiags.insert(getGlobalDeclID(F, Record[I]));
break;
}
}
@@ -3837,8 +3838,11 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
}
// Continuous range maps we may be updating in our module.
+ using SLocRemapBuilder =
+ ContinuousRangeMap<SourceLocation::UIntTy, SourceLocation::IntTy,
+ 2>::Builder;
using RemapBuilder = ContinuousRangeMap<uint32_t, int, 2>::Builder;
- RemapBuilder SLocRemap(F.SLocRemap);
+ SLocRemapBuilder SLocRemap(F.SLocRemap);
RemapBuilder IdentifierRemap(F.IdentifierRemap);
RemapBuilder MacroRemap(F.MacroRemap);
RemapBuilder PreprocessedEntityRemap(F.PreprocessedEntityRemap);
@@ -3869,7 +3873,7 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
return;
}
- uint32_t SLocOffset =
+ SourceLocation::UIntTy SLocOffset =
endian::readNext<uint32_t, little, unaligned>(Data);
uint32_t IdentifierIDOffset =
endian::readNext<uint32_t, little, unaligned>(Data);
@@ -3886,15 +3890,21 @@ void ASTReader::ReadModuleOffsetMap(ModuleFile &F) const {
uint32_t TypeIndexOffset =
endian::readNext<uint32_t, little, unaligned>(Data);
- uint32_t None = std::numeric_limits<uint32_t>::max();
-
auto mapOffset = [&](uint32_t Offset, uint32_t BaseOffset,
RemapBuilder &Remap) {
+ constexpr uint32_t None = std::numeric_limits<uint32_t>::max();
if (Offset != None)
Remap.insert(std::make_pair(Offset,
static_cast<int>(BaseOffset - Offset)));
};
- mapOffset(SLocOffset, OM->SLocEntryBaseOffset, SLocRemap);
+
+ constexpr SourceLocation::UIntTy SLocNone =
+ std::numeric_limits<SourceLocation::UIntTy>::max();
+ if (SLocOffset != SLocNone)
+ SLocRemap.insert(std::make_pair(
+ SLocOffset, static_cast<SourceLocation::IntTy>(
+ OM->SLocEntryBaseOffset - SLocOffset)));
+
mapOffset(IdentifierIDOffset, OM->BaseIdentifierID, IdentifierRemap);
mapOffset(MacroIDOffset, OM->BaseMacroID, MacroRemap);
mapOffset(PreprocessedEntityIDOffset, OM->BasePreprocessedEntityID,
@@ -3932,7 +3942,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
if (!bool(PP.getPreprocessorOpts().DisablePCHOrModuleValidation &
DisableValidationForModuleKind::Module) &&
!ModMap) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0) {
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities)) {
if (auto ASTFE = M ? M->getASTFile() : None) {
// This module was defined by an imported (explicit) module.
Diag(diag::err_module_file_conflict) << F.ModuleName << F.FileName
@@ -3963,7 +3973,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
assert((ImportedBy || F.Kind == MK_ImplicitModule) &&
"top-level import should be verified");
bool NotImported = F.Kind == MK_ImplicitModule && !ImportedBy;
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_imported_module_modmap_changed)
<< F.ModuleName << (NotImported ? F.FileName : ImportedBy->FileName)
<< ModMap->getName() << F.ModuleMapPath << NotImported;
@@ -3974,13 +3984,13 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
for (unsigned I = 0, N = Record[Idx++]; I < N; ++I) {
// FIXME: we should use input files rather than storing names.
std::string Filename = ReadPath(F, Record, Idx);
- auto F = FileMgr.getFile(Filename, false, false);
- if (!F) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ auto SF = FileMgr.getFile(Filename, false, false);
+ if (!SF) {
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Error("could not find file '" + Filename +"' referenced by AST file");
return OutOfDate;
}
- AdditionalStoredMaps.insert(*F);
+ AdditionalStoredMaps.insert(*SF);
}
// Check any additional module map files (e.g. module.private.modulemap)
@@ -3990,7 +4000,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// Remove files that match
// Note: SmallPtrSet::erase is really remove
if (!AdditionalStoredMaps.erase(ModMap)) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_module_different_modmap)
<< F.ModuleName << /*new*/0 << ModMap->getName();
return OutOfDate;
@@ -4001,7 +4011,7 @@ ASTReader::ReadModuleMapFileBlock(RecordData &Record, ModuleFile &F,
// Check any additional module map files that are in the pcm, but not
// found in header search. Cases that match are already removed.
for (const FileEntry *ModMap : AdditionalStoredMaps) {
- if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
+ if (!canRecoverFromOutOfDate(F.FileName, ClientLoadCapabilities))
Diag(diag::err_module_different_modmap)
<< F.ModuleName << /*not new*/1 << ModMap->getName();
return OutOfDate;
@@ -4154,7 +4164,8 @@ static void updateModuleTimestamp(ModuleFile &MF) {
// Overwrite the timestamp file contents so that file's mtime changes.
std::string TimestampFilename = MF.getTimestampFilename();
std::error_code EC;
- llvm::raw_fd_ostream OS(TimestampFilename, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream OS(TimestampFilename, EC,
+ llvm::sys::fs::OF_TextWithCRLF);
if (EC)
return;
OS << "Timestamp file\n";
@@ -4313,8 +4324,7 @@ ASTReader::ASTReadResult ASTReader::ReadAST(StringRef FileName,
// Preload all the pending interesting identifiers by marking them out of
// date.
for (auto Offset : F.PreloadIdentifierOffsets) {
- const unsigned char *Data = reinterpret_cast<const unsigned char *>(
- F.IdentifierTableData + Offset);
+ const unsigned char *Data = F.IdentifierTableData + Offset;
ASTIdentifierLookupTrait Trait(*this, F);
auto KeyDataLen = Trait.ReadKeyDataLength(Data);
@@ -5148,8 +5158,8 @@ namespace {
StringRef SpecificModuleCachePath,
bool Complain) override {
return checkHeaderSearchOptions(HSOpts, SpecificModuleCachePath,
- ExistingModuleCachePath,
- nullptr, ExistingLangOpts);
+ ExistingModuleCachePath, nullptr,
+ ExistingLangOpts, ExistingPPOpts);
}
bool ReadPreprocessorOptions(const PreprocessorOptions &PPOpts,
@@ -5597,9 +5607,10 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_UMBRELLA_HEADER: {
std::string Filename = std::string(Blob);
ResolveImportedPath(F, Filename);
- if (auto Umbrella = PP.getFileManager().getOptionalFileRef(Filename)) {
+ if (auto Umbrella = PP.getFileManager().getFile(Filename)) {
if (!CurrentModule->getUmbrellaHeader())
- ModMap.setUmbrellaHeader(CurrentModule, *Umbrella, Blob);
+ // FIXME: NameAsWritten
+ ModMap.setUmbrellaHeader(CurrentModule, *Umbrella, Blob, "");
else if (CurrentModule->getUmbrellaHeader().Entry != *Umbrella) {
if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
Error("mismatched umbrella headers in submodule");
@@ -5630,10 +5641,10 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
case SUBMODULE_UMBRELLA_DIR: {
std::string Dirname = std::string(Blob);
ResolveImportedPath(F, Dirname);
- if (auto Umbrella =
- PP.getFileManager().getOptionalDirectoryRef(Dirname)) {
+ if (auto Umbrella = PP.getFileManager().getDirectory(Dirname)) {
if (!CurrentModule->getUmbrellaDir())
- ModMap.setUmbrellaDir(CurrentModule, *Umbrella, Blob);
+ // FIXME: NameAsWritten
+ ModMap.setUmbrellaDir(CurrentModule, *Umbrella, Blob, "");
else if (CurrentModule->getUmbrellaDir().Entry != *Umbrella) {
if ((ClientLoadCapabilities & ARR_OutOfDate) == 0)
Error("mismatched umbrella directories in submodule");
@@ -5927,6 +5938,12 @@ ASTReader::getModulePreprocessedEntities(ModuleFile &Mod) const {
PreprocessingRecord::iterator());
}
+bool ASTReader::canRecoverFromOutOfDate(StringRef ModuleFileName,
+ unsigned int ClientLoadCapabilities) {
+ return ClientLoadCapabilities & ARR_OutOfDate &&
+ !getModuleManager().getModuleCache().isPCMFinal(ModuleFileName);
+}
+
llvm::iterator_range<ASTReader::ModuleDeclIterator>
ASTReader::getModuleFileLevelDecls(ModuleFile &Mod) {
return llvm::make_range(
@@ -7062,6 +7079,11 @@ QualType ASTReader::GetType(TypeID ID) {
T = Context.Id##Ty; \
break;
#include "clang/Basic/PPCTypes.def"
+#define RVV_TYPE(Name, Id, SingletonId) \
+ case PREDEF_TYPE_##Id##_ID: \
+ T = Context.SingletonId; \
+ break;
+#include "clang/Basic/RISCVVTypes.def"
}
assert(!T.isNull() && "Unknown predefined type");
@@ -7174,6 +7196,11 @@ void ASTReader::CompleteRedeclChain(const Decl *D) {
return;
}
+ if (!D->getDeclContext()) {
+ assert(isa<TranslationUnitDecl>(D) && "Not a TU?");
+ return;
+ }
+
const DeclContext *DC = D->getDeclContext()->getRedeclContext();
// If this is a named declaration, complete it by looking it up
@@ -7628,9 +7655,10 @@ ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
// Load the list of declarations.
SmallVector<NamedDecl *, 64> Decls;
+ llvm::SmallPtrSet<NamedDecl *, 8> Found;
for (DeclID ID : It->second.Table.find(Name)) {
NamedDecl *ND = cast<NamedDecl>(GetDecl(ID));
- if (ND->getDeclName() == Name)
+ if (ND->getDeclName() == Name && Found.insert(ND).second)
Decls.push_back(ND);
}
@@ -7869,8 +7897,6 @@ void ASTReader::InitializeSema(Sema &S) {
}
SemaObj->OpenCLFeatures = OpenCLExtensions;
- SemaObj->OpenCLTypeExtMap = OpenCLTypeExtMap;
- SemaObj->OpenCLDeclExtMap = OpenCLDeclExtMap;
UpdateSema();
}
@@ -8334,18 +8360,15 @@ void ASTReader::ReadUnusedLocalTypedefNameCandidates(
}
void ASTReader::ReadDeclsToCheckForDeferredDiags(
- llvm::SmallVector<Decl *, 4> &Decls) {
- for (unsigned I = 0, N = DeclsToCheckForDeferredDiags.size(); I != N;
- ++I) {
- auto *D = dyn_cast_or_null<Decl>(
- GetDecl(DeclsToCheckForDeferredDiags[I]));
+ llvm::SmallSetVector<Decl *, 4> &Decls) {
+ for (auto I : DeclsToCheckForDeferredDiags) {
+ auto *D = dyn_cast_or_null<Decl>(GetDecl(I));
if (D)
- Decls.push_back(D);
+ Decls.insert(D);
}
DeclsToCheckForDeferredDiags.clear();
}
-
void ASTReader::ReadReferencedSelectors(
SmallVectorImpl<std::pair<Selector, SourceLocation>> &Sels) {
if (ReferencedSelectorsData.empty())
@@ -8512,17 +8535,13 @@ IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
ModuleFile *M = I->second;
unsigned Index = ID - M->BaseIdentifierID;
- const char *Str = M->IdentifierTableData + M->IdentifierOffsets[Index];
-
- // All of the strings in the AST file are preceded by a 16-bit length.
- // Extract that 16-bit length to avoid having to execute strlen().
- // NOTE: 'StrLenPtr' is an 'unsigned char*' so that we load bytes as
- // unsigned integers. This is important to avoid integer overflow when
- // we cast them to 'unsigned'.
- const unsigned char *StrLenPtr = (const unsigned char*) Str - 2;
- unsigned StrLen = (((unsigned) StrLenPtr[0])
- | (((unsigned) StrLenPtr[1]) << 8)) - 1;
- auto &II = PP.getIdentifierTable().get(StringRef(Str, StrLen));
+ const unsigned char *Data =
+ M->IdentifierTableData + M->IdentifierOffsets[Index];
+
+ ASTIdentifierLookupTrait Trait(*this, *M);
+ auto KeyDataLen = Trait.ReadKeyDataLength(Data);
+ auto Key = Trait.ReadKey(Data, KeyDataLen.first);
+ auto &II = PP.getIdentifierTable().get(Key);
IdentifiersLoaded[ID] = &II;
markIdentifierFromAST(*this, II);
if (DeserializationListener)
@@ -8737,25 +8756,18 @@ ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
DeclarationNameLoc
ASTRecordReader::readDeclarationNameLoc(DeclarationName Name) {
- DeclarationNameLoc DNLoc;
switch (Name.getNameKind()) {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- DNLoc.NamedType.TInfo = readTypeSourceInfo();
- break;
+ return DeclarationNameLoc::makeNamedTypeLoc(readTypeSourceInfo());
case DeclarationName::CXXOperatorName:
- DNLoc.CXXOperatorName.BeginOpNameLoc
- = readSourceLocation().getRawEncoding();
- DNLoc.CXXOperatorName.EndOpNameLoc
- = readSourceLocation().getRawEncoding();
- break;
+ return DeclarationNameLoc::makeCXXOperatorNameLoc(readSourceRange());
case DeclarationName::CXXLiteralOperatorName:
- DNLoc.CXXLiteralOperatorName.OpNameLoc
- = readSourceLocation().getRawEncoding();
- break;
+ return DeclarationNameLoc::makeCXXLiteralOperatorNameLoc(
+ readSourceLocation());
case DeclarationName::Identifier:
case DeclarationName::ObjCZeroArgSelector:
@@ -8765,7 +8777,7 @@ ASTRecordReader::readDeclarationNameLoc(DeclarationName Name) {
case DeclarationName::CXXDeductionGuideName:
break;
}
- return DNLoc;
+ return DeclarationNameLoc();
}
DeclarationNameInfo ASTRecordReader::readDeclarationNameInfo() {
@@ -11730,6 +11742,17 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_simdlen:
C = new (Context) OMPSimdlenClause();
break;
+ case llvm::omp::OMPC_sizes: {
+ unsigned NumSizes = Record.readInt();
+ C = OMPSizesClause::CreateEmpty(Context, NumSizes);
+ break;
+ }
+ case llvm::omp::OMPC_full:
+ C = OMPFullClause::CreateEmpty(Context);
+ break;
+ case llvm::omp::OMPC_partial:
+ C = OMPPartialClause::CreateEmpty(Context);
+ break;
case llvm::omp::OMPC_allocator:
C = new (Context) OMPAllocatorClause();
break;
@@ -11952,9 +11975,21 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_order:
C = new (Context) OMPOrderClause();
break;
+ case llvm::omp::OMPC_init:
+ C = OMPInitClause::CreateEmpty(Context, Record.readInt());
+ break;
+ case llvm::omp::OMPC_use:
+ C = new (Context) OMPUseClause();
+ break;
case llvm::omp::OMPC_destroy:
C = new (Context) OMPDestroyClause();
break;
+ case llvm::omp::OMPC_novariants:
+ C = new (Context) OMPNovariantsClause();
+ break;
+ case llvm::omp::OMPC_nocontext:
+ C = new (Context) OMPNocontextClause();
+ break;
case llvm::omp::OMPC_detach:
C = new (Context) OMPDetachClause();
break;
@@ -11964,6 +11999,9 @@ OMPClause *OMPClauseReader::readClause() {
case llvm::omp::OMPC_affinity:
C = OMPAffinityClause::CreateEmpty(Context, Record.readInt());
break;
+ case llvm::omp::OMPC_filter:
+ C = new (Context) OMPFilterClause();
+ break;
#define OMP_CLAUSE_NO_CLASS(Enum, Str) \
case llvm::omp::Enum: \
break;
@@ -12021,6 +12059,19 @@ void OMPClauseReader::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
C->setLParenLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPSizesClause(OMPSizesClause *C) {
+ for (Expr *&E : C->getSizesRefs())
+ E = Record.readSubExpr();
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPFullClause(OMPFullClause *C) {}
+
+void OMPClauseReader::VisitOMPPartialClause(OMPPartialClause *C) {
+ C->setFactor(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
void OMPClauseReader::VisitOMPAllocatorClause(OMPAllocatorClause *C) {
C->setAllocator(Record.readExpr());
C->setLParenLoc(Record.readSourceLocation());
@@ -12109,7 +12160,42 @@ void OMPClauseReader::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseReader::VisitOMPNogroupClause(OMPNogroupClause *) {}
-void OMPClauseReader::VisitOMPDestroyClause(OMPDestroyClause *) {}
+void OMPClauseReader::VisitOMPInitClause(OMPInitClause *C) {
+ unsigned NumVars = C->varlist_size();
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ C->setIsTarget(Record.readBool());
+ C->setIsTargetSync(Record.readBool());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setVarLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPUseClause(OMPUseClause *C) {
+ C->setInteropVar(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setVarLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPDestroyClause(OMPDestroyClause *C) {
+ C->setInteropVar(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+ C->setVarLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPNovariantsClause(OMPNovariantsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setCondition(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
+void OMPClauseReader::VisitOMPNocontextClause(OMPNocontextClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setCondition(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
void OMPClauseReader::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
@@ -12894,6 +12980,12 @@ void OMPClauseReader::VisitOMPOrderClause(OMPOrderClause *C) {
C->setKindKwLoc(Record.readSourceLocation());
}
+void OMPClauseReader::VisitOMPFilterClause(OMPFilterClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ C->setThreadID(Record.readSubExpr());
+ C->setLParenLoc(Record.readSourceLocation());
+}
+
OMPTraitInfo *ASTRecordReader::readOMPTraitInfo() {
OMPTraitInfo &TI = getContext().getNewOMPTraitInfo();
TI.Sets.resize(readUInt32());
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index 6bfb9bd783b5..ff79f91e5db1 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -328,6 +328,7 @@ namespace clang {
void VisitTypedefDecl(TypedefDecl *TD);
void VisitTypeAliasDecl(TypeAliasDecl *TD);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingIfExistsDecl(UnresolvedUsingIfExistsDecl *D);
RedeclarableResult VisitTagDecl(TagDecl *TD);
void VisitEnumDecl(EnumDecl *ED);
RedeclarableResult VisitRecordDeclImpl(RecordDecl *RD);
@@ -389,6 +390,7 @@ namespace clang {
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingEnumDecl(UsingEnumDecl *D);
void VisitUsingPackDecl(UsingPackDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitConstructorUsingShadowDecl(ConstructorUsingShadowDecl *D);
@@ -1651,6 +1653,17 @@ void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
mergeMergeable(D);
}
+void ASTDeclReader::VisitUsingEnumDecl(UsingEnumDecl *D) {
+ VisitNamedDecl(D);
+ D->setUsingLoc(readSourceLocation());
+ D->setEnumLoc(readSourceLocation());
+ D->Enum = readDeclAs<EnumDecl>();
+ D->FirstUsingShadow.setPointer(readDeclAs<UsingShadowDecl>());
+ if (auto *Pattern = readDeclAs<UsingEnumDecl>())
+ Reader.getContext().setInstantiatedFromUsingEnumDecl(D, Pattern);
+ mergeMergeable(D);
+}
+
void ASTDeclReader::VisitUsingPackDecl(UsingPackDecl *D) {
VisitNamedDecl(D);
D->InstantiatedFrom = readDeclAs<NamedDecl>();
@@ -1707,6 +1720,11 @@ void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
mergeMergeable(D);
}
+void ASTDeclReader::VisitUnresolvedUsingIfExistsDecl(
+ UnresolvedUsingIfExistsDecl *D) {
+ VisitNamedDecl(D);
+}
+
void ASTDeclReader::ReadCXXDefinitionData(
struct CXXRecordDecl::DefinitionData &Data, const CXXRecordDecl *D) {
#define FIELD(Name, Width, Merge) \
@@ -1748,6 +1766,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
Lambda.NumExplicitCaptures = Record.readInt();
Lambda.HasKnownInternalLinkage = Record.readInt();
Lambda.ManglingNumber = Record.readInt();
+ D->setDeviceLambdaManglingNumber(Record.readInt());
Lambda.ContextDecl = readDeclID();
Lambda.Captures = (Capture *)Reader.getContext().Allocate(
sizeof(Capture) * Lambda.NumCaptures);
@@ -1953,6 +1972,7 @@ ASTDeclReader::VisitCXXRecordDeclImpl(CXXRecordDecl *D) {
void ASTDeclReader::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
D->setExplicitSpecifier(Record.readExplicitSpec());
+ D->Ctor = readDeclAs<CXXConstructorDecl>();
VisitFunctionDecl(D);
D->setIsCopyDeductionCandidate(Record.readInt());
}
@@ -3047,7 +3067,7 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
return true;
}
-/// Determine whether the two declarations refer to the same entity.pr
+/// Determine whether the two declarations refer to the same entity.
static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
@@ -3241,10 +3261,19 @@ static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
return isSameQualifier(UX->getQualifier(), UY->getQualifier()) &&
UX->isAccessDeclaration() == UY->isAccessDeclaration();
}
- if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X))
+ if (const auto *UX = dyn_cast<UnresolvedUsingTypenameDecl>(X)) {
return isSameQualifier(
UX->getQualifier(),
cast<UnresolvedUsingTypenameDecl>(Y)->getQualifier());
+ }
+
+ // Using-pack declarations are only created by instantiation, and match if
+ // they're instantiated from matching UnresolvedUsing...Decls.
+ if (const auto *UX = dyn_cast<UsingPackDecl>(X)) {
+ return declaresSameEntity(
+ UX->getInstantiatedFromUsingDecl(),
+ cast<UsingPackDecl>(Y)->getInstantiatedFromUsingDecl());
+ }
// Namespace alias definitions with the same target match.
if (const auto *NAX = dyn_cast<NamespaceAliasDecl>(X)) {
@@ -3836,6 +3865,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_USING_SHADOW:
D = UsingShadowDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_USING_ENUM:
+ D = UsingEnumDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_CONSTRUCTOR_USING_SHADOW:
D = ConstructorUsingShadowDecl::CreateDeserialized(Context, ID);
break;
@@ -3848,6 +3880,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_UNRESOLVED_USING_TYPENAME:
D = UnresolvedUsingTypenameDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_UNRESOLVED_USING_IF_EXISTS:
+ D = UnresolvedUsingIfExistsDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_CXX_RECORD:
D = CXXRecordDecl::CreateDeserialized(Context, ID);
break;
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 0e1af53303b4..b100f946f558 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -185,11 +185,13 @@ void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
+ bool IsSideEntry = Record.readInt();
auto *LD = readDeclAs<LabelDecl>();
LD->setStmt(S);
S->setDecl(LD);
S->setSubStmt(Record.readSubStmt());
S->setIdentLoc(readSourceLocation());
+ S->setSideEntry(IsSideEntry);
}
void ASTStmtReader::VisitAttributedStmt(AttributedStmt *S) {
@@ -579,6 +581,16 @@ void ASTStmtReader::VisitConstantExpr(ConstantExpr *E) {
E->setSubExpr(Record.readSubExpr());
}
+void ASTStmtReader::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
+ VisitExpr(E);
+
+ E->setLocation(readSourceLocation());
+ E->setLParenLocation(readSourceLocation());
+ E->setRParenLocation(readSourceLocation());
+
+ E->setTypeSourceInfo(Record.readTypeSourceInfo());
+}
+
void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
bool HasFunctionName = Record.readInt();
@@ -1099,10 +1111,9 @@ void ASTStmtReader::VisitCastExpr(CastExpr *E) {
void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
bool hasFP_Features;
- BinaryOperator::Opcode opc;
VisitExpr(E);
E->setHasStoredFPFeatures(hasFP_Features = Record.readInt());
- E->setOpcode(opc = (BinaryOperator::Opcode)Record.readInt());
+ E->setOpcode((BinaryOperator::Opcode)Record.readInt());
E->setLHS(Record.readSubExpr());
E->setRHS(Record.readSubExpr());
E->setOperatorLoc(readSourceLocation());
@@ -2273,19 +2284,29 @@ void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
// OpenMP Directives.
//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitOMPCanonicalLoop(OMPCanonicalLoop *S) {
+ VisitStmt(S);
+ for (Stmt *&SubStmt : S->SubStmts)
+ SubStmt = Record.readSubStmt();
+}
+
void ASTStmtReader::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
Record.readOMPChildren(E->Data);
E->setLocStart(readSourceLocation());
E->setLocEnd(readSourceLocation());
}
-void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
+void ASTStmtReader::VisitOMPLoopBasedDirective(OMPLoopBasedDirective *D) {
VisitStmt(D);
// Field CollapsedNum was read in ReadStmtFromStream.
Record.skipInts(1);
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPLoopDirective(OMPLoopDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2296,6 +2317,14 @@ void ASTStmtReader::VisitOMPSimdDirective(OMPSimdDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPTileDirective(OMPTileDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+}
+
+void ASTStmtReader::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+}
+
void ASTStmtReader::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
D->setHasCancel(Record.readBool());
@@ -2574,6 +2603,22 @@ void ASTStmtReader::VisitOMPTargetTeamsDistributeSimdDirective(
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPInteropDirective(OMPInteropDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
+void ASTStmtReader::VisitOMPDispatchDirective(OMPDispatchDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ D->setTargetCallLoc(Record.readSourceLocation());
+}
+
+void ASTStmtReader::VisitOMPMaskedDirective(OMPMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2771,6 +2816,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
/*StorageKind=*/Record[ASTStmtReader::NumExprFields]));
break;
+ case EXPR_SYCL_UNIQUE_STABLE_NAME:
+ S = SYCLUniqueStableNameExpr::CreateEmpty(Context);
+ break;
+
case EXPR_PREDEFINED:
S = PredefinedExpr::CreateEmpty(
Context,
@@ -3130,6 +3179,10 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
nullptr);
break;
+ case STMT_OMP_CANONICAL_LOOP:
+ S = OMPCanonicalLoop::createEmpty(Context);
+ break;
+
case STMT_OMP_PARALLEL_DIRECTIVE:
S =
OMPParallelDirective::CreateEmpty(Context,
@@ -3145,6 +3198,20 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_TILE_DIRECTIVE: {
+ unsigned NumLoops = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTileDirective::CreateEmpty(Context, NumClauses, NumLoops);
+ break;
+ }
+
+ case STMT_OMP_UNROLL_DIRECTIVE: {
+ assert(Record[ASTStmtReader::NumStmtFields] == 1 && "Unroll directive accepts only a single loop");
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPUnrollDirective::CreateEmpty(Context, NumClauses);
+ break;
+ }
+
case STMT_OMP_FOR_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3478,6 +3545,21 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_INTEROP_DIRECTIVE:
+ S = OMPInteropDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_DISPATCH_DIRECTIVE:
+ S = OMPDispatchDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
+ case STMT_OMP_MASKED_DIRECTIVE:
+ S = OMPMaskedDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case EXPR_CXX_OPERATOR_CALL:
S = CXXOperatorCallExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 6bfa7b0e7d6d..66c207ad9243 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -95,6 +95,7 @@
#include "llvm/Support/EndianStream.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/LEB128.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/OnDiskHashTable.h"
#include "llvm/Support/Path.h"
@@ -1615,6 +1616,15 @@ static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
return Stream.EmitAbbrev(std::move(Abbrev));
}
+/// Emit key length and data length as ULEB-encoded data, and return them as a
+/// pair.
+static std::pair<unsigned, unsigned>
+emitULEBKeyDataLength(unsigned KeyLen, unsigned DataLen, raw_ostream &Out) {
+ llvm::encodeULEB128(KeyLen, Out);
+ llvm::encodeULEB128(DataLen, Out);
+ return std::make_pair(KeyLen, DataLen);
+}
+
namespace {
// Trait used for the on-disk hash table of header search information.
@@ -1657,19 +1667,14 @@ namespace {
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, key_type_ref key, data_type_ref Data) {
- using namespace llvm::support;
-
- endian::Writer LE(Out, little);
unsigned KeyLen = key.Filename.size() + 1 + 8 + 8;
- LE.write<uint16_t>(KeyLen);
unsigned DataLen = 1 + 2 + 4 + 4;
for (auto ModInfo : Data.KnownHeaders)
if (Writer.getLocalOrImportedSubmoduleID(ModInfo.getModule()))
DataLen += 4;
if (Data.Unresolved.getPointer())
DataLen += 4;
- LE.write<uint8_t>(DataLen);
- return std::make_pair(KeyLen, DataLen);
+ return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
void EmitKey(raw_ostream& Out, key_type_ref key, unsigned KeyLen) {
@@ -2036,7 +2041,7 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
Record.push_back(Expansion.isExpansionTokenRange());
// Compute the token length for this macro expansion.
- unsigned NextOffset = SourceMgr.getNextLocalOffset();
+ SourceLocation::UIntTy NextOffset = SourceMgr.getNextLocalOffset();
if (I + 1 != N)
NextOffset = SourceMgr.getLocalSLocEntry(I + 1).getOffset();
Record.push_back(NextOffset - SLoc->getOffset() - 1);
@@ -2471,11 +2476,11 @@ void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec,
}
}
-unsigned ASTWriter::getLocalOrImportedSubmoduleID(Module *Mod) {
+unsigned ASTWriter::getLocalOrImportedSubmoduleID(const Module *Mod) {
if (!Mod)
return 0;
- llvm::DenseMap<Module *, unsigned>::iterator Known = SubmoduleIDs.find(Mod);
+ auto Known = SubmoduleIDs.find(Mod);
if (Known != SubmoduleIDs.end())
return Known->second;
@@ -3008,11 +3013,7 @@ public:
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, Selector Sel,
data_type_ref Methods) {
- using namespace llvm::support;
-
- endian::Writer LE(Out, little);
unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
- LE.write<uint16_t>(KeyLen);
unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
for (const ObjCMethodList *Method = &Methods.Instance; Method;
Method = Method->getNext())
@@ -3022,8 +3023,7 @@ public:
Method = Method->getNext())
if (Method->getMethod())
DataLen += 4;
- LE.write<uint16_t>(DataLen);
- return std::make_pair(KeyLen, DataLen);
+ return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
@@ -3320,6 +3320,15 @@ public:
std::pair<unsigned, unsigned>
EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
+ // Record the location of the identifier data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ Writer.SetIdentifierOffset(II, Out.tell());
+
+ // Emit the offset of the key/data length information to the interesting
+ // identifiers table if necessary.
+ if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
+ InterestingIdentifierOffsets->push_back(Out.tell());
+
unsigned KeyLen = II->getLength() + 1;
unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
auto MacroOffset = Writer.getMacroDirectivesOffset(II);
@@ -3336,31 +3345,11 @@ public:
DataLen += 4;
}
}
-
- using namespace llvm::support;
-
- endian::Writer LE(Out, little);
-
- assert((uint16_t)DataLen == DataLen && (uint16_t)KeyLen == KeyLen);
- LE.write<uint16_t>(DataLen);
- // We emit the key length after the data length so that every
- // string is preceded by a 16-bit length. This matches the PTH
- // format for storing identifiers.
- LE.write<uint16_t>(KeyLen);
- return std::make_pair(KeyLen, DataLen);
+ return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
void EmitKey(raw_ostream& Out, const IdentifierInfo* II,
unsigned KeyLen) {
- // Record the location of the key data. This is used when generating
- // the mapping from persistent IDs to strings.
- Writer.SetIdentifierOffset(II, Out.tell());
-
- // Emit the offset of the key/data length information to the interesting
- // identifiers table if necessary.
- if (InterestingIdentifierOffsets && isInterestingIdentifier(II))
- InterestingIdentifierOffsets->push_back(Out.tell() - 4);
-
Out.write(II->getNameStart(), KeyLen);
}
@@ -3573,9 +3562,6 @@ public:
std::pair<unsigned, unsigned> EmitKeyDataLength(raw_ostream &Out,
DeclarationNameKey Name,
data_type_ref Lookup) {
- using namespace llvm::support;
-
- endian::Writer LE(Out, little);
unsigned KeyLen = 1;
switch (Name.getKind()) {
case DeclarationName::Identifier:
@@ -3595,15 +3581,11 @@ public:
case DeclarationName::CXXUsingDirective:
break;
}
- LE.write<uint16_t>(KeyLen);
// 4 bytes for each DeclID.
unsigned DataLen = 4 * (Lookup.second - Lookup.first);
- assert(uint16_t(DataLen) == DataLen &&
- "too many decls for serialized lookup result");
- LE.write<uint16_t>(DataLen);
- return std::make_pair(KeyLen, DataLen);
+ return emitULEBKeyDataLength(KeyLen, DataLen, Out);
}
void EmitKey(raw_ostream &Out, DeclarationNameKey Name, unsigned) {
@@ -3976,78 +3958,13 @@ void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
auto V = I.getValue();
Record.push_back(V.Supported ? 1 : 0);
Record.push_back(V.Enabled ? 1 : 0);
+ Record.push_back(V.WithPragma ? 1 : 0);
Record.push_back(V.Avail);
Record.push_back(V.Core);
Record.push_back(V.Opt);
}
Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
}
-
-void ASTWriter::WriteOpenCLExtensionTypes(Sema &SemaRef) {
- if (!SemaRef.Context.getLangOpts().OpenCL)
- return;
-
- // Sort the elements of the map OpenCLTypeExtMap by TypeIDs,
- // without copying them.
- const llvm::DenseMap<const Type *, std::set<std::string>> &OpenCLTypeExtMap =
- SemaRef.OpenCLTypeExtMap;
- using ElementTy = std::pair<TypeID, const std::set<std::string> *>;
- llvm::SmallVector<ElementTy, 8> StableOpenCLTypeExtMap;
- StableOpenCLTypeExtMap.reserve(OpenCLTypeExtMap.size());
-
- for (const auto &I : OpenCLTypeExtMap)
- StableOpenCLTypeExtMap.emplace_back(
- getTypeID(I.first->getCanonicalTypeInternal()), &I.second);
-
- auto CompareByTypeID = [](const ElementTy &E1, const ElementTy &E2) -> bool {
- return E1.first < E2.first;
- };
- llvm::sort(StableOpenCLTypeExtMap, CompareByTypeID);
-
- RecordData Record;
- for (const ElementTy &E : StableOpenCLTypeExtMap) {
- Record.push_back(E.first); // TypeID
- const std::set<std::string> *ExtSet = E.second;
- Record.push_back(static_cast<unsigned>(ExtSet->size()));
- for (const std::string &Ext : *ExtSet)
- AddString(Ext, Record);
- }
-
- Stream.EmitRecord(OPENCL_EXTENSION_TYPES, Record);
-}
-
-void ASTWriter::WriteOpenCLExtensionDecls(Sema &SemaRef) {
- if (!SemaRef.Context.getLangOpts().OpenCL)
- return;
-
- // Sort the elements of the map OpenCLDeclExtMap by DeclIDs,
- // without copying them.
- const llvm::DenseMap<const Decl *, std::set<std::string>> &OpenCLDeclExtMap =
- SemaRef.OpenCLDeclExtMap;
- using ElementTy = std::pair<DeclID, const std::set<std::string> *>;
- llvm::SmallVector<ElementTy, 8> StableOpenCLDeclExtMap;
- StableOpenCLDeclExtMap.reserve(OpenCLDeclExtMap.size());
-
- for (const auto &I : OpenCLDeclExtMap)
- StableOpenCLDeclExtMap.emplace_back(getDeclID(I.first), &I.second);
-
- auto CompareByDeclID = [](const ElementTy &E1, const ElementTy &E2) -> bool {
- return E1.first < E2.first;
- };
- llvm::sort(StableOpenCLDeclExtMap, CompareByDeclID);
-
- RecordData Record;
- for (const ElementTy &E : StableOpenCLDeclExtMap) {
- Record.push_back(E.first); // DeclID
- const std::set<std::string> *ExtSet = E.second;
- Record.push_back(static_cast<unsigned>(ExtSet->size()));
- for (const std::string &Ext : *ExtSet)
- AddString(Ext, Record);
- }
-
- Stream.EmitRecord(OPENCL_EXTENSION_DECLS, Record);
-}
-
void ASTWriter::WriteCUDAPragmas(Sema &SemaRef) {
if (SemaRef.ForceCUDAHostDeviceDepth > 0) {
RecordData::value_type Record[] = {SemaRef.ForceCUDAHostDeviceDepth};
@@ -4723,7 +4640,7 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// another module after it or have more than one entity inside it.
uint32_t None = std::numeric_limits<uint32_t>::max();
- auto writeBaseIDOrNone = [&](uint32_t BaseID, bool ShouldWrite) {
+ auto writeBaseIDOrNone = [&](auto BaseID, bool ShouldWrite) {
assert(BaseID < std::numeric_limits<uint32_t>::max() && "base id too high");
if (ShouldWrite)
LE.write<uint32_t>(BaseID);
@@ -4750,9 +4667,9 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
}
// Build a record containing all of the DeclsToCheckForDeferredDiags.
- RecordData DeclsToCheckForDeferredDiags;
+ SmallVector<serialization::DeclID, 64> DeclsToCheckForDeferredDiags;
for (auto *D : SemaRef.DeclsToCheckForDeferredDiags)
- AddDeclRef(D, DeclsToCheckForDeferredDiags);
+ DeclsToCheckForDeferredDiags.push_back(GetDeclRef(D));
RecordData DeclUpdatesOffsetsRecord;
@@ -4792,17 +4709,12 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
WriteIdentifierTable(PP, SemaRef.IdResolver, isModule);
WriteFPPragmaOptions(SemaRef.CurFPFeatureOverrides());
WriteOpenCLExtensions(SemaRef);
- WriteOpenCLExtensionTypes(SemaRef);
WriteCUDAPragmas(SemaRef);
// If we're emitting a module, write out the submodule information.
if (WritingModule)
WriteSubmodules(WritingModule);
- // We need to have information about submodules to correctly deserialize
- // decls from OpenCLExtensionDecls block
- WriteOpenCLExtensionDecls(SemaRef);
-
Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
// Write the record containing external, unnamed definitions.
@@ -5115,8 +5027,8 @@ void ASTWriter::AddAlignPackInfo(const Sema::AlignPackInfo &Info,
}
void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
- uint32_t Raw = Loc.getRawEncoding();
- Record.push_back((Raw << 1) | (Raw >> 31));
+ SourceLocation::UIntTy Raw = Loc.getRawEncoding();
+ Record.push_back((Raw << 1) | (Raw >> (8 * sizeof(Raw) - 1)));
}
void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
@@ -5413,19 +5325,15 @@ void ASTRecordWriter::AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
case DeclarationName::CXXConversionFunctionName:
- AddTypeSourceInfo(DNLoc.NamedType.TInfo);
+ AddTypeSourceInfo(DNLoc.getNamedTypeInfo());
break;
case DeclarationName::CXXOperatorName:
- AddSourceLocation(SourceLocation::getFromRawEncoding(
- DNLoc.CXXOperatorName.BeginOpNameLoc));
- AddSourceLocation(
- SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.EndOpNameLoc));
+ AddSourceRange(DNLoc.getCXXOperatorNameRange());
break;
case DeclarationName::CXXLiteralOperatorName:
- AddSourceLocation(SourceLocation::getFromRawEncoding(
- DNLoc.CXXLiteralOperatorName.OpNameLoc));
+ AddSourceLocation(DNLoc.getCXXLiteralOperatorNameLoc());
break;
case DeclarationName::Identifier:
@@ -5667,6 +5575,7 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
Record->push_back(Lambda.NumExplicitCaptures);
Record->push_back(Lambda.HasKnownInternalLinkage);
Record->push_back(Lambda.ManglingNumber);
+ Record->push_back(D->getDeviceLambdaManglingNumber());
AddDeclRef(D->getLambdaContextDecl());
AddTypeSourceInfo(Lambda.MethodTyInfo);
for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
@@ -6141,6 +6050,20 @@ void OMPClauseWriter::VisitOMPSimdlenClause(OMPSimdlenClause *C) {
Record.AddSourceLocation(C->getLParenLoc());
}
+void OMPClauseWriter::VisitOMPSizesClause(OMPSizesClause *C) {
+ Record.push_back(C->getNumSizes());
+ for (Expr *Size : C->getSizesRefs())
+ Record.AddStmt(Size);
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPFullClause(OMPFullClause *C) {}
+
+void OMPClauseWriter::VisitOMPPartialClause(OMPPartialClause *C) {
+ Record.AddStmt(C->getFactor());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
void OMPClauseWriter::VisitOMPAllocatorClause(OMPAllocatorClause *C) {
Record.AddStmt(C->getAllocator());
Record.AddSourceLocation(C->getLParenLoc());
@@ -6228,7 +6151,45 @@ void OMPClauseWriter::VisitOMPSIMDClause(OMPSIMDClause *) {}
void OMPClauseWriter::VisitOMPNogroupClause(OMPNogroupClause *) {}
-void OMPClauseWriter::VisitOMPDestroyClause(OMPDestroyClause *) {}
+void OMPClauseWriter::VisitOMPInitClause(OMPInitClause *C) {
+ Record.push_back(C->varlist_size());
+ for (Expr *VE : C->varlists())
+ Record.AddStmt(VE);
+ Record.writeBool(C->getIsTarget());
+ Record.writeBool(C->getIsTargetSync());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getVarLoc());
+}
+
+void OMPClauseWriter::VisitOMPUseClause(OMPUseClause *C) {
+ Record.AddStmt(C->getInteropVar());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getVarLoc());
+}
+
+void OMPClauseWriter::VisitOMPDestroyClause(OMPDestroyClause *C) {
+ Record.AddStmt(C->getInteropVar());
+ Record.AddSourceLocation(C->getLParenLoc());
+ Record.AddSourceLocation(C->getVarLoc());
+}
+
+void OMPClauseWriter::VisitOMPNovariantsClause(OMPNovariantsClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getCondition());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPNocontextClause(OMPNocontextClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getCondition());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
+
+void OMPClauseWriter::VisitOMPFilterClause(OMPFilterClause *C) {
+ VisitOMPClauseWithPreInit(C);
+ Record.AddStmt(C->getThreadID());
+ Record.AddSourceLocation(C->getLParenLoc());
+}
void OMPClauseWriter::VisitOMPPrivateClause(OMPPrivateClause *C) {
Record.push_back(C->varlist_size());
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index 2cb44bf9038b..e9315f67d553 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -13,7 +13,6 @@
#include "ASTCommon.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
-#include "clang/AST/DeclContextInternals.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/AST/Expr.h"
@@ -69,6 +68,7 @@ namespace clang {
void VisitTypedefDecl(TypedefDecl *D);
void VisitTypeAliasDecl(TypeAliasDecl *D);
void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingIfExistsDecl(UnresolvedUsingIfExistsDecl *D);
void VisitTagDecl(TagDecl *D);
void VisitEnumDecl(EnumDecl *D);
void VisitRecordDecl(RecordDecl *D);
@@ -114,6 +114,7 @@ namespace clang {
void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingEnumDecl(UsingEnumDecl *D);
void VisitUsingPackDecl(UsingPackDecl *D);
void VisitUsingShadowDecl(UsingShadowDecl *D);
void VisitConstructorUsingShadowDecl(ConstructorUsingShadowDecl *D);
@@ -673,6 +674,7 @@ static void addExplicitSpecifier(ExplicitSpecifier ES,
void ASTDeclWriter::VisitCXXDeductionGuideDecl(CXXDeductionGuideDecl *D) {
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
+ Record.AddDeclRef(D->Ctor);
VisitFunctionDecl(D);
Record.push_back(D->isCopyDeductionCandidate());
Code = serialization::DECL_CXX_DEDUCTION_GUIDE;
@@ -1277,6 +1279,16 @@ void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
Code = serialization::DECL_USING;
}
+void ASTDeclWriter::VisitUsingEnumDecl(UsingEnumDecl *D) {
+ VisitNamedDecl(D);
+ Record.AddSourceLocation(D->getUsingLoc());
+ Record.AddSourceLocation(D->getEnumLoc());
+ Record.AddDeclRef(D->getEnumDecl());
+ Record.AddDeclRef(D->FirstUsingShadow.getPointer());
+ Record.AddDeclRef(Context.getInstantiatedFromUsingEnumDecl(D));
+ Code = serialization::DECL_USING_ENUM;
+}
+
void ASTDeclWriter::VisitUsingPackDecl(UsingPackDecl *D) {
Record.push_back(D->NumExpansions);
VisitNamedDecl(D);
@@ -1333,6 +1345,12 @@ void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
Code = serialization::DECL_UNRESOLVED_USING_TYPENAME;
}
+void ASTDeclWriter::VisitUnresolvedUsingIfExistsDecl(
+ UnresolvedUsingIfExistsDecl *D) {
+ VisitNamedDecl(D);
+ Code = serialization::DECL_UNRESOLVED_USING_IF_EXISTS;
+}
+
void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
VisitRecordDecl(D);
@@ -1390,7 +1408,7 @@ void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
}
void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
- Record.push_back(D->getTraillingAllocKind());
+ Record.push_back(D->getTrailingAllocKind());
addExplicitSpecifier(D->getExplicitSpecifier(), Record);
if (auto Inherited = D->getInheritedConstructor()) {
Record.AddDeclRef(Inherited.getShadowDecl());
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index d4f669ea0183..2bb5e4f3563d 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -115,6 +115,7 @@ void ASTStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
void ASTStmtWriter::VisitLabelStmt(LabelStmt *S) {
VisitStmt(S);
+ Record.push_back(S->isSideEntry());
Record.AddDeclRef(S->getDecl());
Record.AddStmt(S->getSubStmt());
Record.AddSourceLocation(S->getIdentLoc());
@@ -579,6 +580,17 @@ void ASTStmtWriter::VisitConstantExpr(ConstantExpr *E) {
Code = serialization::EXPR_CONSTANT;
}
+void ASTStmtWriter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
+ VisitExpr(E);
+
+ Record.AddSourceLocation(E->getLocation());
+ Record.AddSourceLocation(E->getLParenLocation());
+ Record.AddSourceLocation(E->getRParenLocation());
+ Record.AddTypeSourceInfo(E->getTypeSourceInfo());
+
+ Code = serialization::EXPR_SYCL_UNIQUE_STABLE_NAME;
+}
+
void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
VisitExpr(E);
@@ -2170,18 +2182,29 @@ void ASTStmtWriter::VisitSEHLeaveStmt(SEHLeaveStmt *S) {
// OpenMP Directives.
//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitOMPCanonicalLoop(OMPCanonicalLoop *S) {
+ VisitStmt(S);
+ for (Stmt *SubStmt : S->SubStmts)
+ Record.AddStmt(SubStmt);
+ Code = serialization::STMT_OMP_CANONICAL_LOOP;
+}
+
void ASTStmtWriter::VisitOMPExecutableDirective(OMPExecutableDirective *E) {
Record.writeOMPChildren(E->Data);
Record.AddSourceLocation(E->getBeginLoc());
Record.AddSourceLocation(E->getEndLoc());
}
-void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
+void ASTStmtWriter::VisitOMPLoopBasedDirective(OMPLoopBasedDirective *D) {
VisitStmt(D);
- Record.writeUInt32(D->getCollapsedNumber());
+ Record.writeUInt32(D->getLoopsNumber());
VisitOMPExecutableDirective(D);
}
+void ASTStmtWriter::VisitOMPLoopDirective(OMPLoopDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+}
+
void ASTStmtWriter::VisitOMPParallelDirective(OMPParallelDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
@@ -2194,6 +2217,16 @@ void ASTStmtWriter::VisitOMPSimdDirective(OMPSimdDirective *D) {
Code = serialization::STMT_OMP_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTileDirective(OMPTileDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+ Code = serialization::STMT_OMP_TILE_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPUnrollDirective(OMPUnrollDirective *D) {
+ VisitOMPLoopBasedDirective(D);
+ Code = serialization::STMT_OMP_UNROLL_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPForDirective(OMPForDirective *D) {
VisitOMPLoopDirective(D);
Record.writeBool(D->hasCancel());
@@ -2525,6 +2558,25 @@ void ASTStmtWriter::VisitOMPTargetTeamsDistributeSimdDirective(
Code = serialization::STMT_OMP_TARGET_TEAMS_DISTRIBUTE_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPInteropDirective(OMPInteropDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_INTEROP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPDispatchDirective(OMPDispatchDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Record.AddSourceLocation(D->getTargetCallLoc());
+ Code = serialization::STMT_OMP_DISPATCH_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPMaskedDirective(OMPMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_MASKED_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/clang/lib/Serialization/ModuleFileExtension.cpp b/clang/lib/Serialization/ModuleFileExtension.cpp
index e1ae8a494ab1..6b7fd1d54340 100644
--- a/clang/lib/Serialization/ModuleFileExtension.cpp
+++ b/clang/lib/Serialization/ModuleFileExtension.cpp
@@ -9,6 +9,8 @@
#include "llvm/ADT/Hashing.h"
using namespace clang;
+char ModuleFileExtension::ID = 0;
+
ModuleFileExtension::~ModuleFileExtension() { }
llvm::hash_code ModuleFileExtension::hashExtension(llvm::hash_code Code) const {
diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 59163c1f31fa..605b11874ef5 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -16,7 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
diff --git a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 7c264bba4b6a..2a5fe9d8ed92 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -19,7 +19,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/APSIntType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -179,7 +179,7 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// CHECK UPPER BOUND: Is byteOffset >= size(baseRegion)? If so,
// we are doing a load/store after the last valid offset.
const MemRegion *MR = rawOffset.getRegion();
- DefinedOrUnknownSVal Size = getDynamicSize(state, MR, svalBuilder);
+ DefinedOrUnknownSVal Size = getDynamicExtent(state, MR, svalBuilder);
if (!Size.getAs<NonLoc>())
break;
diff --git a/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
index 233ce57c3ac9..13781b336426 100644
--- a/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -16,7 +16,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
using namespace clang;
using namespace ento;
@@ -92,12 +92,8 @@ bool BuiltinFunctionChecker::evalCall(const CallEvent &Call,
if (Size.isUndef())
return true; // Return true to model purity.
- SValBuilder& svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal DynSize = getDynamicSize(state, R, svalBuilder);
- DefinedOrUnknownSVal DynSizeMatchesSizeArg =
- svalBuilder.evalEQ(state, DynSize, Size.castAs<DefinedOrUnknownSVal>());
- state = state->assume(DynSizeMatchesSizeArg, true);
- assert(state && "The region should not have any previous constraints");
+ state = setDynamicExtent(state, R, Size.castAs<DefinedOrUnknownSVal>(),
+ C.getSValBuilder());
C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
return true;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 30fd62f887c4..69b90be9aa7e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -19,7 +19,7 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
@@ -346,7 +346,7 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// Get the size of the array.
const auto *superReg = cast<SubRegion>(ER->getSuperRegion());
DefinedOrUnknownSVal Size =
- getDynamicSize(state, superReg, C.getSValBuilder());
+ getDynamicExtent(state, superReg, C.getSValBuilder());
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
@@ -923,7 +923,7 @@ bool CStringChecker::IsFirstBufInBound(CheckerContext &C,
// Get the size of the array.
const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
- DefinedOrUnknownSVal SizeDV = getDynamicSize(state, superReg, svalBuilder);
+ DefinedOrUnknownSVal SizeDV = getDynamicExtent(state, superReg, svalBuilder);
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
@@ -1060,7 +1060,7 @@ bool CStringChecker::memsetAux(const Expr *DstBuffer, SVal CharVal,
if (Offset.isValid() && !Offset.hasSymbolicOffset() &&
Offset.getOffset() == 0) {
// Get the base region's size.
- DefinedOrUnknownSVal SizeDV = getDynamicSize(State, BR, svalBuilder);
+ DefinedOrUnknownSVal SizeDV = getDynamicExtent(State, BR, svalBuilder);
ProgramStateRef StateWholeReg, StateNotWholeReg;
std::tie(StateWholeReg, StateNotWholeReg) =
@@ -2039,7 +2039,7 @@ void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
RightStrRef = RightStrRef.substr(0, s2Term);
// Use StringRef's comparison methods to compute the actual result.
- int compareRes = IgnoreCase ? LeftStrRef.compare_lower(RightStrRef)
+ int compareRes = IgnoreCase ? LeftStrRef.compare_insensitive(RightStrRef)
: LeftStrRef.compare(RightStrRef);
// The strcmp function returns an integer greater than, equal to, or less
diff --git a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
index a498f252e693..2d2e14de3f2b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -17,7 +17,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
using namespace clang;
using namespace ento;
@@ -112,7 +112,7 @@ void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal Size = getDynamicSize(state, SR, svalBuilder);
+ DefinedOrUnknownSVal Size = getDynamicExtent(state, SR, svalBuilder);
const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
if (!SizeInt)
return;
diff --git a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
index dc9cd717be9e..99e11a15c08d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/CheckPlacementNew.cpp
@@ -13,7 +13,7 @@
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/Support/FormatVariadic.h"
using namespace clang;
@@ -64,7 +64,7 @@ private:
SVal PlacementNewChecker::getExtentSizeOfPlace(const CXXNewExpr *NE,
CheckerContext &C) const {
const Expr *Place = NE->getPlacementArg(0);
- return getDynamicSizeWithOffset(C.getState(), C.getSVal(Place));
+ return getDynamicExtentWithOffset(C.getState(), C.getSVal(Place));
}
SVal PlacementNewChecker::getExtentSizeOfNewTarget(const CXXNewExpr *NE,
diff --git a/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
index 73c6517fd0eb..1a7f0d5ab74c 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ContainerModeling.cpp
@@ -763,14 +763,14 @@ bool isBeginCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_lower("begin");
+ return IdInfo->getName().endswith_insensitive("begin");
}
bool isEndCall(const FunctionDecl *Func) {
const auto *IdInfo = Func->getIdentifier();
if (!IdInfo)
return false;
- return IdInfo->getName().endswith_lower("end");
+ return IdInfo->getName().endswith_insensitive("end");
}
const CXXRecordDecl *getCXXRecordDecl(ProgramStateRef State,
diff --git a/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 6bc186aa2755..8070d869f678 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -11,17 +11,18 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Lex/Lexer.h"
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/ParentMap.h"
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/SaveAndRestore.h"
@@ -260,8 +261,8 @@ public:
break;
}
- BR.EmitBasicReport(AC->getDecl(), Checker, BugType, "Dead store", os.str(),
- L, R, Fixits);
+ BR.EmitBasicReport(AC->getDecl(), Checker, BugType, categories::UnusedCode,
+ os.str(), L, R, Fixits);
}
void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
@@ -408,15 +409,17 @@ public:
// Special case: check for initializations with constants.
//
// e.g. : int x = 0;
+ // struct A = {0, 1};
+ // struct B = {{0}, {1, 2}};
//
// If x is EVER assigned a new value later, don't issue
// a warning. This is because such initialization can be
// due to defensive programming.
- if (E->isEvaluatable(Ctx))
+ if (isConstant(E))
return;
if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// Special case: check for initialization from constant
// variables.
@@ -444,6 +447,29 @@ public:
}
}
}
+
+private:
+ /// Return true if the given init list can be interpreted as constant
+ bool isConstant(const InitListExpr *Candidate) const {
+ // We consider init list to be constant if each member of the list can be
+ // interpreted as constant.
+ return llvm::all_of(Candidate->inits(),
+ [this](const Expr *Init) { return isConstant(Init); });
+ }
+
+ /// Return true if the given expression can be interpreted as constant
+ bool isConstant(const Expr *E) const {
+ // It looks like E itself is a constant
+ if (E->isEvaluatable(Ctx))
+ return true;
+
+ // We should also allow defensive initialization of structs, i.e. { 0 }
+ if (const auto *ILE = dyn_cast<InitListExpr>(E->IgnoreParenCasts())) {
+ return isConstant(ILE);
+ }
+
+ return false;
+ }
};
} // end anonymous namespace
diff --git a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index adfc2f8cb8fe..4a9c7ce3c66d 100644
--- a/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -193,7 +193,7 @@ void DereferenceChecker::reportBug(DerefKind K, ProgramStateRef State,
}
auto report = std::make_unique<PathSensitiveBugReport>(
- *BT, buf.empty() ? BT->getDescription() : StringRef(buf), N);
+ *BT, buf.empty() ? BT->getDescription() : buf.str(), N);
bugreporter::trackExpressionValue(N, bugreporter::getDerefExpr(S), *report);
diff --git a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index c0167b53ae26..2ce1bef6d228 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -14,7 +14,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/ScopedPrinter.h"
@@ -22,8 +22,8 @@ using namespace clang;
using namespace ento;
namespace {
-class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
- check::EndAnalysis> {
+class ExprInspectionChecker
+ : public Checker<eval::Call, check::DeadSymbols, check::EndAnalysis> {
mutable std::unique_ptr<BugType> BT;
// These stats are per-analysis, not per-branch, hence they shouldn't
@@ -44,6 +44,8 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
void analyzerExplain(const CallExpr *CE, CheckerContext &C) const;
void analyzerPrintState(const CallExpr *CE, CheckerContext &C) const;
void analyzerGetExtent(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDumpExtent(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDumpElementCount(const CallExpr *CE, CheckerContext &C) const;
void analyzerHashDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerDenote(const CallExpr *CE, CheckerContext &C) const;
void analyzerExpress(const CallExpr *CE, CheckerContext &C) const;
@@ -55,17 +57,19 @@ class ExprInspectionChecker : public Checker<eval::Call, check::DeadSymbols,
// Optional parameter `ExprVal` for expression value to be marked interesting.
ExplodedNode *reportBug(llvm::StringRef Msg, CheckerContext &C,
Optional<SVal> ExprVal = None) const;
- ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR,
- ExplodedNode *N,
+ ExplodedNode *reportBug(llvm::StringRef Msg, BugReporter &BR, ExplodedNode *N,
Optional<SVal> ExprVal = None) const;
+ const Expr *getArgExpr(const CallExpr *CE, CheckerContext &C) const;
+ const MemRegion *getArgRegion(const CallExpr *CE, CheckerContext &C) const;
+
public:
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
ExprEngine &Eng) const;
};
-}
+} // namespace
REGISTER_SET_WITH_PROGRAMSTATE(MarkedSymbols, SymbolRef)
REGISTER_MAP_WITH_PROGRAMSTATE(DenotedSymbols, SymbolRef, const StringLiteral *)
@@ -90,6 +94,10 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
&ExprInspectionChecker::analyzerWarnOnDeadSymbol)
.StartsWith("clang_analyzer_explain",
&ExprInspectionChecker::analyzerExplain)
+ .Case("clang_analyzer_dumpExtent",
+ &ExprInspectionChecker::analyzerDumpExtent)
+ .Case("clang_analyzer_dumpElementCount",
+ &ExprInspectionChecker::analyzerDumpElementCount)
.StartsWith("clang_analyzer_dump",
&ExprInspectionChecker::analyzerDump)
.Case("clang_analyzer_getExtent",
@@ -131,7 +139,7 @@ static const char *getArgumentValueString(const CallExpr *CE,
ProgramStateRef StTrue, StFalse;
std::tie(StTrue, StFalse) =
- State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
+ State->assume(AssertionVal.castAs<DefinedOrUnknownSVal>());
if (StTrue) {
if (StFalse)
@@ -155,8 +163,7 @@ ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
}
ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
- BugReporter &BR,
- ExplodedNode *N,
+ BugReporter &BR, ExplodedNode *N,
Optional<SVal> ExprVal) const {
if (!N)
return nullptr;
@@ -172,6 +179,30 @@ ExplodedNode *ExprInspectionChecker::reportBug(llvm::StringRef Msg,
return N;
}
+const Expr *ExprInspectionChecker::getArgExpr(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (CE->getNumArgs() == 0) {
+ reportBug("Missing argument", C);
+ return nullptr;
+ }
+ return CE->getArg(0);
+}
+
+const MemRegion *ExprInspectionChecker::getArgRegion(const CallExpr *CE,
+ CheckerContext &C) const {
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
+ return nullptr;
+
+ const MemRegion *MR = C.getSVal(Arg).getAsRegion();
+ if (!MR) {
+ reportBug("Cannot obtain the region", C);
+ return nullptr;
+ }
+
+ return MR;
+}
+
void ExprInspectionChecker::analyzerEval(const CallExpr *CE,
CheckerContext &C) const {
const LocationContext *LC = C.getPredecessor()->getLocationContext();
@@ -215,24 +246,22 @@ void ExprInspectionChecker::analyzerCheckInlined(const CallExpr *CE,
void ExprInspectionChecker::analyzerExplain(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0) {
- reportBug("Missing argument for explaining", C);
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
- }
- SVal V = C.getSVal(CE->getArg(0));
+ SVal V = C.getSVal(Arg);
SValExplainer Ex(C.getASTContext());
reportBug(Ex.Visit(V), C);
}
void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0) {
- reportBug("Missing argument for dumping", C);
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
- }
- SVal V = C.getSVal(CE->getArg(0));
+ SVal V = C.getSVal(Arg);
llvm::SmallString<32> Str;
llvm::raw_svector_ostream OS(Str);
@@ -242,24 +271,57 @@ void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
void ExprInspectionChecker::analyzerGetExtent(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0) {
- reportBug("Missing region for obtaining extent", C);
+ const MemRegion *MR = getArgRegion(CE, C);
+ if (!MR)
return;
- }
-
- auto MR = dyn_cast_or_null<SubRegion>(C.getSVal(CE->getArg(0)).getAsRegion());
- if (!MR) {
- reportBug("Obtaining extent of a non-region", C);
- return;
- }
ProgramStateRef State = C.getState();
- DefinedOrUnknownSVal Size = getDynamicSize(State, MR, C.getSValBuilder());
+ DefinedOrUnknownSVal Size = getDynamicExtent(State, MR, C.getSValBuilder());
State = State->BindExpr(CE, C.getLocationContext(), Size);
C.addTransition(State);
}
+void ExprInspectionChecker::analyzerDumpExtent(const CallExpr *CE,
+ CheckerContext &C) const {
+ const MemRegion *MR = getArgRegion(CE, C);
+ if (!MR)
+ return;
+
+ DefinedOrUnknownSVal Size =
+ getDynamicExtent(C.getState(), MR, C.getSValBuilder());
+
+ SmallString<64> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << Size;
+ reportBug(Out.str(), C);
+}
+
+void ExprInspectionChecker::analyzerDumpElementCount(const CallExpr *CE,
+ CheckerContext &C) const {
+ const MemRegion *MR = getArgRegion(CE, C);
+ if (!MR)
+ return;
+
+ QualType ElementTy;
+ if (const auto *TVR = MR->getAs<TypedValueRegion>()) {
+ ElementTy = TVR->getValueType();
+ } else {
+ ElementTy =
+ MR->castAs<SymbolicRegion>()->getSymbol()->getType()->getPointeeType();
+ }
+
+ assert(!ElementTy->isPointerType());
+
+ DefinedOrUnknownSVal ElementCount =
+ getDynamicElementCount(C.getState(), MR, C.getSValBuilder(), ElementTy);
+
+ SmallString<128> Msg;
+ llvm::raw_svector_ostream Out(Msg);
+ Out << ElementCount;
+ reportBug(Out.str(), C);
+}
+
void ExprInspectionChecker::analyzerPrintState(const CallExpr *CE,
CheckerContext &C) const {
C.getState()->dump();
@@ -267,9 +329,11 @@ void ExprInspectionChecker::analyzerPrintState(const CallExpr *CE,
void ExprInspectionChecker::analyzerWarnOnDeadSymbol(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0)
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
- SVal Val = C.getSVal(CE->getArg(0));
+
+ SVal Val = C.getSVal(Arg);
SymbolRef Sym = Val.getAsSymbol();
if (!Sym)
return;
@@ -306,7 +370,7 @@ void ExprInspectionChecker::checkDeadSymbols(SymbolReaper &SymReaper,
void ExprInspectionChecker::checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
ExprEngine &Eng) const {
- for (auto Item: ReachedStats) {
+ for (auto Item : ReachedStats) {
unsigned NumTimesReached = Item.second.NumTimesReached;
ExplodedNode *N = Item.second.ExampleNode;
@@ -373,9 +437,7 @@ public:
return None;
}
- Optional<std::string> VisitSymExpr(const SymExpr *S) {
- return lookup(S);
- }
+ Optional<std::string> VisitSymExpr(const SymExpr *S) { return lookup(S); }
Optional<std::string> VisitSymIntExpr(const SymIntExpr *S) {
if (Optional<std::string> Str = lookup(S))
@@ -394,7 +456,8 @@ public:
if (Optional<std::string> Str1 = Visit(S->getLHS()))
if (Optional<std::string> Str2 = Visit(S->getRHS()))
return (*Str1 + " " + BinaryOperator::getOpcodeStr(S->getOpcode()) +
- " " + *Str2).str();
+ " " + *Str2)
+ .str();
return None;
}
@@ -410,10 +473,9 @@ public:
void ExprInspectionChecker::analyzerExpress(const CallExpr *CE,
CheckerContext &C) const {
- if (CE->getNumArgs() == 0) {
- reportBug("clang_analyzer_express() requires a symbol", C);
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
return;
- }
SVal ArgVal = C.getSVal(CE->getArg(0));
SymbolRef Sym = ArgVal.getAsSymbol();
diff --git a/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
index 63fbe75fd498..8e02ef74c668 100644
--- a/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/GCDAntipatternChecker.cpp
@@ -80,7 +80,7 @@ static bool isTest(const Decl *D) {
if (const auto *CD = dyn_cast<ObjCContainerDecl>(OD->getParent())) {
std::string ContainerName = CD->getNameAsString();
StringRef CN(ContainerName);
- if (CN.contains_lower("test") || CN.contains_lower("mock"))
+ if (CN.contains_insensitive("test") || CN.contains_insensitive("mock"))
return true;
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index 65e52e139ee4..bcae73378028 100644
--- a/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -34,9 +34,9 @@ namespace {
class InnerPointerChecker
: public Checker<check::DeadSymbols, check::PostCall> {
- CallDescription AppendFn, AssignFn, ClearFn, CStrFn, DataFn, EraseFn,
- InsertFn, PopBackFn, PushBackFn, ReplaceFn, ReserveFn, ResizeFn,
- ShrinkToFitFn, SwapFn;
+ CallDescription AppendFn, AssignFn, AddressofFn, ClearFn, CStrFn, DataFn,
+ DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn, ReplaceFn,
+ ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
public:
class InnerPointerBRVisitor : public BugReporterVisitor {
@@ -73,9 +73,10 @@ public:
InnerPointerChecker()
: AppendFn({"std", "basic_string", "append"}),
AssignFn({"std", "basic_string", "assign"}),
+ AddressofFn({"std", "addressof"}),
ClearFn({"std", "basic_string", "clear"}),
- CStrFn({"std", "basic_string", "c_str"}),
- DataFn({"std", "basic_string", "data"}),
+ CStrFn({"std", "basic_string", "c_str"}), DataFn({"std", "data"}, 1),
+ DataMemberFn({"std", "basic_string", "data"}),
EraseFn({"std", "basic_string", "erase"}),
InsertFn({"std", "basic_string", "insert"}),
PopBackFn({"std", "basic_string", "pop_back"}),
@@ -90,6 +91,9 @@ public:
/// pointers referring to the container object's inner buffer.
bool isInvalidatingMemberFunction(const CallEvent &Call) const;
+ /// Check whether the called function returns a raw inner pointer.
+ bool isInnerPointerAccessFunction(const CallEvent &Call) const;
+
/// Mark pointer symbols associated with the given memory region released
/// in the program state.
void markPtrSymbolsReleased(const CallEvent &Call, ProgramStateRef State,
@@ -130,6 +134,12 @@ bool InnerPointerChecker::isInvalidatingMemberFunction(
Call.isCalled(SwapFn));
}
+bool InnerPointerChecker::isInnerPointerAccessFunction(
+ const CallEvent &Call) const {
+ return (Call.isCalled(CStrFn) || Call.isCalled(DataFn) ||
+ Call.isCalled(DataMemberFn));
+}
+
void InnerPointerChecker::markPtrSymbolsReleased(const CallEvent &Call,
ProgramStateRef State,
const MemRegion *MR,
@@ -172,6 +182,11 @@ void InnerPointerChecker::checkFunctionArguments(const CallEvent &Call,
if (!ArgRegion)
continue;
+ // std::addressof function accepts a non-const reference as an argument,
+ // but doesn't modify it.
+ if (Call.isCalled(AddressofFn))
+ continue;
+
markPtrSymbolsReleased(Call, State, ArgRegion, C);
}
}
@@ -195,36 +210,49 @@ void InnerPointerChecker::checkPostCall(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
+ // TODO: Do we need these to be typed?
+ const TypedValueRegion *ObjRegion = nullptr;
+
if (const auto *ICall = dyn_cast<CXXInstanceCall>(&Call)) {
- // TODO: Do we need these to be typed?
- const auto *ObjRegion = dyn_cast_or_null<TypedValueRegion>(
+ ObjRegion = dyn_cast_or_null<TypedValueRegion>(
ICall->getCXXThisVal().getAsRegion());
- if (!ObjRegion)
- return;
- if (Call.isCalled(CStrFn) || Call.isCalled(DataFn)) {
- SVal RawPtr = Call.getReturnValue();
- if (SymbolRef Sym = RawPtr.getAsSymbol(/*IncludeBaseRegions=*/true)) {
- // Start tracking this raw pointer by adding it to the set of symbols
- // associated with this container object in the program state map.
+ // Check [string.require] / second point.
+ if (isInvalidatingMemberFunction(Call)) {
+ markPtrSymbolsReleased(Call, State, ObjRegion, C);
+ return;
+ }
+ }
- PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
- const PtrSet *SetPtr = State->get<RawPtrMap>(ObjRegion);
- PtrSet Set = SetPtr ? *SetPtr : F.getEmptySet();
- assert(C.wasInlined || !Set.contains(Sym));
- Set = F.add(Set, Sym);
+ if (isInnerPointerAccessFunction(Call)) {
- State = State->set<RawPtrMap>(ObjRegion, Set);
- C.addTransition(State);
- }
- return;
+ if (isa<SimpleFunctionCall>(Call)) {
+ // NOTE: As of now, we only have one free access function: std::data.
+ // If we add more functions like this in the list, hardcoded
+ // argument index should be changed.
+ ObjRegion =
+ dyn_cast_or_null<TypedValueRegion>(Call.getArgSVal(0).getAsRegion());
}
- // Check [string.require] / second point.
- if (isInvalidatingMemberFunction(Call)) {
- markPtrSymbolsReleased(Call, State, ObjRegion, C);
+ if (!ObjRegion)
return;
+
+ SVal RawPtr = Call.getReturnValue();
+ if (SymbolRef Sym = RawPtr.getAsSymbol(/*IncludeBaseRegions=*/true)) {
+ // Start tracking this raw pointer by adding it to the set of symbols
+ // associated with this container object in the program state map.
+
+ PtrSet::Factory &F = State->getStateManager().get_context<PtrSet>();
+ const PtrSet *SetPtr = State->get<RawPtrMap>(ObjRegion);
+ PtrSet Set = SetPtr ? *SetPtr : F.getEmptySet();
+ assert(C.wasInlined || !Set.contains(Sym));
+ Set = F.add(Set, Sym);
+
+ State = State->set<RawPtrMap>(ObjRegion, Set);
+ C.addTransition(State);
}
+
+ return;
}
// Check [string.require] / first point.
diff --git a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index ac0f24603dd9..496190149991 100644
--- a/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -29,8 +29,8 @@ bool isIterator(const CXXRecordDecl *CRD) {
return false;
const auto Name = CRD->getName();
- if (!(Name.endswith_lower("iterator") || Name.endswith_lower("iter") ||
- Name.endswith_lower("it")))
+ if (!(Name.endswith_insensitive("iterator") ||
+ Name.endswith_insensitive("iter") || Name.endswith_insensitive("it")))
return false;
bool HasCopyCtor = false, HasCopyAssign = true, HasDtor = false,
diff --git a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
index dd014648eb6f..a47484497771 100644
--- a/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/IteratorRangeChecker.cpp
@@ -228,7 +228,7 @@ void IteratorRangeChecker::verifyRandomIncrOrDecr(CheckerContext &C,
Value = State->getRawSVal(*ValAsLoc);
}
- if (Value.isUnknown())
+ if (Value.isUnknownOrUndef())
return;
// Incremention or decremention by 0 is never a bug.
diff --git a/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
index 837213875a60..b72d72580c28 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MIGChecker.cpp
@@ -284,8 +284,9 @@ void MIGChecker::checkReturnAux(const ReturnStmt *RS, CheckerContext &C) const {
N);
R->addRange(RS->getSourceRange());
- bugreporter::trackExpressionValue(N, RS->getRetValue(), *R,
- bugreporter::TrackingKind::Thorough, false);
+ bugreporter::trackExpressionValue(
+ N, RS->getRetValue(), *R,
+ {bugreporter::TrackingKind::Thorough, /*EnableNullFPSuppression=*/false});
C.emitReport(std::move(R));
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 7ac7a38dacf3..5d6bd381d3cc 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -16,7 +16,7 @@
#include "MPIChecker.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
namespace clang {
namespace ento {
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index f117d5505ecb..a6470da09c45 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -63,7 +63,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
@@ -509,10 +509,6 @@ private:
ProgramStateRef State,
AllocationFamily Family);
- LLVM_NODISCARD
- static ProgramStateRef addExtentSize(CheckerContext &C, const CXXNewExpr *NE,
- ProgramStateRef State, SVal Target);
-
// Check if this malloc() for special flags. At present that means M_ZERO or
// __GFP_ZERO (in which case, treat it like calloc).
LLVM_NODISCARD
@@ -1424,7 +1420,6 @@ MallocChecker::processNewAllocation(const CXXAllocatorCall &Call,
// existing binding.
SVal Target = Call.getObjectUnderConstruction();
State = MallocUpdateRefState(C, NE, State, Family, Target);
- State = addExtentSize(C, NE, State, Target);
State = ProcessZeroAllocCheck(Call, 0, State, Target);
return State;
}
@@ -1439,52 +1434,6 @@ void MallocChecker::checkNewAllocator(const CXXAllocatorCall &Call,
}
}
-// Sets the extent value of the MemRegion allocated by
-// new expression NE to its size in Bytes.
-//
-ProgramStateRef MallocChecker::addExtentSize(CheckerContext &C,
- const CXXNewExpr *NE,
- ProgramStateRef State,
- SVal Target) {
- if (!State)
- return nullptr;
- SValBuilder &svalBuilder = C.getSValBuilder();
- SVal ElementCount;
- const SubRegion *Region;
- if (NE->isArray()) {
- const Expr *SizeExpr = *NE->getArraySize();
- ElementCount = C.getSVal(SizeExpr);
- // Store the extent size for the (symbolic)region
- // containing the elements.
- Region = Target.getAsRegion()
- ->castAs<SubRegion>()
- ->StripCasts()
- ->castAs<SubRegion>();
- } else {
- ElementCount = svalBuilder.makeIntVal(1, true);
- Region = Target.getAsRegion()->castAs<SubRegion>();
- }
-
- // Set the region's extent equal to the Size in Bytes.
- QualType ElementType = NE->getAllocatedType();
- ASTContext &AstContext = C.getASTContext();
- CharUnits TypeSize = AstContext.getTypeSizeInChars(ElementType);
-
- if (ElementCount.getAs<NonLoc>()) {
- DefinedOrUnknownSVal DynSize = getDynamicSize(State, Region, svalBuilder);
-
- // size in Bytes = ElementCount*TypeSize
- SVal SizeInBytes = svalBuilder.evalBinOpNN(
- State, BO_Mul, ElementCount.castAs<NonLoc>(),
- svalBuilder.makeArrayIndex(TypeSize.getQuantity()),
- svalBuilder.getArrayIndexType());
- DefinedOrUnknownSVal DynSizeMatchesSize = svalBuilder.evalEQ(
- State, DynSize, SizeInBytes.castAs<DefinedOrUnknownSVal>());
- State = State->assume(DynSizeMatchesSize, true);
- }
- return State;
-}
-
static bool isKnownDeallocObjCMethodName(const ObjCMethodCall &Call) {
// If the first selector piece is one of the names below, assume that the
// object takes ownership of the memory, promising to eventually deallocate it
@@ -1588,21 +1537,9 @@ ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
// Fill the region with the initialization value.
State = State->bindDefaultInitial(RetVal, Init, LCtx);
- // Set the region's extent equal to the Size parameter.
- const SymbolicRegion *R =
- dyn_cast_or_null<SymbolicRegion>(RetVal.getAsRegion());
- if (!R)
- return nullptr;
- if (Optional<DefinedOrUnknownSVal> DefinedSize =
- Size.getAs<DefinedOrUnknownSVal>()) {
- DefinedOrUnknownSVal DynSize = getDynamicSize(State, R, svalBuilder);
-
- DefinedOrUnknownSVal DynSizeMatchesSize =
- svalBuilder.evalEQ(State, DynSize, *DefinedSize);
-
- State = State->assume(DynSizeMatchesSize, true);
- assert(State);
- }
+ // Set the region's extent.
+ State = setDynamicExtent(State, RetVal.getAsRegion(),
+ Size.castAs<DefinedOrUnknownSVal>(), svalBuilder);
return MallocUpdateRefState(C, CE, State, Family);
}
@@ -2186,7 +2123,7 @@ void MallocChecker::HandleMismatchedDealloc(CheckerContext &C,
os.str(), N);
R->markInteresting(Sym);
R->addRange(Range);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym));
+ R->addVisitor<MallocBugVisitor>(Sym);
C.emitReport(std::move(R));
}
}
@@ -2279,7 +2216,7 @@ void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
R->markInteresting(Sym);
R->addRange(Range);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym));
+ R->addVisitor<MallocBugVisitor>(Sym);
if (AF == AF_InnerBuffer)
R->addVisitor(allocation_state::getInnerPointerBRVisitor(Sym));
@@ -2315,7 +2252,7 @@ void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
R->markInteresting(Sym);
if (PrevSym)
R->markInteresting(PrevSym);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym));
+ R->addVisitor<MallocBugVisitor>(Sym);
C.emitReport(std::move(R));
}
}
@@ -2341,7 +2278,7 @@ void MallocChecker::HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
*BT_DoubleDelete, "Attempt to delete released memory", N);
R->markInteresting(Sym);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym));
+ R->addVisitor<MallocBugVisitor>(Sym);
C.emitReport(std::move(R));
}
}
@@ -2371,7 +2308,7 @@ void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
R->addRange(Range);
if (Sym) {
R->markInteresting(Sym);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym));
+ R->addVisitor<MallocBugVisitor>(Sym);
}
C.emitReport(std::move(R));
}
@@ -2641,7 +2578,7 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
*BT_Leak[*CheckKind], os.str(), N, LocUsedForUniqueing,
AllocNode->getLocationContext()->getDecl());
R->markInteresting(Sym);
- R->addVisitor(std::make_unique<MallocBugVisitor>(Sym, true));
+ R->addVisitor<MallocBugVisitor>(Sym, true);
C.emitReport(std::move(R));
}
@@ -3208,9 +3145,10 @@ static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
static bool isReferenceCountingPointerDestructor(const CXXDestructorDecl *DD) {
if (const IdentifierInfo *II = DD->getParent()->getIdentifier()) {
StringRef N = II->getName();
- if (N.contains_lower("ptr") || N.contains_lower("pointer")) {
- if (N.contains_lower("ref") || N.contains_lower("cnt") ||
- N.contains_lower("intrusive") || N.contains_lower("shared")) {
+ if (N.contains_insensitive("ptr") || N.contains_insensitive("pointer")) {
+ if (N.contains_insensitive("ref") || N.contains_insensitive("cnt") ||
+ N.contains_insensitive("intrusive") ||
+ N.contains_insensitive("shared")) {
return true;
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 71f593cb2b56..4b5206a102b8 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -139,6 +139,10 @@ static bool typesCompatible(ASTContext &C, QualType A, QualType B) {
if (B->isVoidPointerType() && A->getAs<PointerType>())
return true;
+ // sizeof(pointer type) is compatible with void*
+ if (A->isVoidPointerType() && B->getAs<PointerType>())
+ return true;
+
while (true) {
A = A.getCanonicalType();
B = B.getCanonicalType();
diff --git a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index a38298a7abed..cbe938982000 100644
--- a/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -202,7 +202,7 @@ public:
};
private:
- mutable std::unique_ptr<BugType> BT;
+ BugType BT{this, "Use-after-move", categories::CXXMoveSemantics};
// Check if the given form of potential misuse of a given object
// should be reported. If so, get it reported. The callback from which
@@ -393,11 +393,6 @@ ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
MisuseKind MK) const {
if (ExplodedNode *N = misuseCausesCrash(MK) ? C.generateErrorNode()
: C.generateNonFatalErrorNode()) {
-
- if (!BT)
- BT.reset(new BugType(this, "Use-after-move",
- "C++ move semantics"));
-
// Uniqueing report to the same object.
PathDiagnosticLocation LocUsedForUniqueing;
const ExplodedNode *MoveNode = getMoveLocation(N, Region, C);
@@ -431,7 +426,7 @@ ExplodedNode *MoveChecker::reportBug(const MemRegion *Region,
}
auto R = std::make_unique<PathSensitiveBugReport>(
- *BT, OS.str(), N, LocUsedForUniqueing,
+ BT, OS.str(), N, LocUsedForUniqueing,
MoveNode->getLocationContext()->getDecl());
R->addVisitor(std::make_unique<MovedBugVisitor>(*this, Region, RD, MK));
C.emitReport(std::move(R));
@@ -477,7 +472,7 @@ void MoveChecker::checkPostCall(const CallEvent &Call,
const MemRegion *BaseRegion = ArgRegion->getBaseRegion();
// Skip temp objects because of their short lifetime.
if (BaseRegion->getAs<CXXTempObjectRegion>() ||
- AFC->getArgExpr(0)->isRValue())
+ AFC->getArgExpr(0)->isPRValue())
return;
// If it has already been reported do not need to modify the state.
diff --git a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
index 80b705fb7392..c5437b16c688 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NonnullGlobalConstantsChecker.cpp
@@ -89,7 +89,7 @@ void NonnullGlobalConstantsChecker::checkLocation(SVal location, bool isLoad,
}
/// \param V loaded lvalue.
-/// \return whether {@code val} is a string-like const global.
+/// \return whether @c val is a string-like const global.
bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
Optional<loc::MemRegionVal> RegionVal = V.getAs<loc::MemRegionVal>();
if (!RegionVal)
@@ -127,7 +127,7 @@ bool NonnullGlobalConstantsChecker::isGlobalConstString(SVal V) const {
return false;
}
-/// \return whether {@code type} is extremely unlikely to be null
+/// \return whether @c type is extremely unlikely to be null
bool NonnullGlobalConstantsChecker::isNonnullType(QualType Ty) const {
if (Ty->isPointerType() && Ty->getPointeeType()->isCharType())
diff --git a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index bc7a8a3b12a1..fe8f7e7bf69e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -170,7 +170,7 @@ private:
auto R = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
if (Region) {
R->markInteresting(Region);
- R->addVisitor(std::make_unique<NullabilityBugVisitor>(Region));
+ R->addVisitor<NullabilityBugVisitor>(Region);
}
if (ValueExpr) {
R->addRange(ValueExpr->getSourceRange());
diff --git a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
index 270b66dab020..0a8379d9ab99 100644
--- a/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/OSObjectCStyleCast.cpp
@@ -32,7 +32,21 @@ public:
void checkASTCodeBody(const Decl *D, AnalysisManager &AM,
BugReporter &BR) const;
};
+} // namespace
+
+namespace clang {
+namespace ast_matchers {
+AST_MATCHER_P(StringLiteral, mentionsBoundType, std::string, BindingID) {
+ return Builder->removeBindings([this, &Node](const BoundNodesMap &Nodes) {
+ const auto &BN = Nodes.getNode(this->BindingID);
+ if (const auto *ND = BN.get<NamedDecl>()) {
+ return ND->getName() != Node.getString();
+ }
+ return true;
+ });
}
+} // end namespace ast_matchers
+} // end namespace clang
static void emitDiagnostics(const BoundNodes &Nodes,
BugReporter &BR,
@@ -63,22 +77,41 @@ static decltype(auto) hasTypePointingTo(DeclarationMatcher DeclM) {
return hasType(pointerType(pointee(hasDeclaration(DeclM))));
}
-void OSObjectCStyleCastChecker::checkASTCodeBody(const Decl *D, AnalysisManager &AM,
+void OSObjectCStyleCastChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &AM,
BugReporter &BR) const {
AnalysisDeclContext *ADC = AM.getAnalysisDeclContext(D);
auto DynamicCastM = callExpr(callee(functionDecl(hasName("safeMetaCast"))));
-
- auto OSObjTypeM = hasTypePointingTo(cxxRecordDecl(isDerivedFrom("OSMetaClassBase")));
+ // 'allocClassWithName' allocates an object with the given type.
+ // The type is actually provided as a string argument (type's name).
+ // This makes the following pattern possible:
+ //
+ // Foo *object = (Foo *)allocClassWithName("Foo");
+ //
+ // While OSRequiredCast can be used here, it is still not a useful warning.
+ auto AllocClassWithNameM = callExpr(
+ callee(functionDecl(hasName("allocClassWithName"))),
+ // Here we want to make sure that the string argument matches the
+ // type in the cast expression.
+ hasArgument(0, stringLiteral(mentionsBoundType(WarnRecordDecl))));
+
+ auto OSObjTypeM =
+ hasTypePointingTo(cxxRecordDecl(isDerivedFrom("OSMetaClassBase")));
auto OSObjSubclassM = hasTypePointingTo(
- cxxRecordDecl(isDerivedFrom("OSObject")).bind(WarnRecordDecl));
-
- auto CastM = cStyleCastExpr(
- allOf(hasSourceExpression(allOf(OSObjTypeM, unless(DynamicCastM))),
- OSObjSubclassM)).bind(WarnAtNode);
-
- auto Matches = match(stmt(forEachDescendant(CastM)), *D->getBody(), AM.getASTContext());
+ cxxRecordDecl(isDerivedFrom("OSObject")).bind(WarnRecordDecl));
+
+ auto CastM =
+ cStyleCastExpr(
+ allOf(OSObjSubclassM,
+ hasSourceExpression(
+ allOf(OSObjTypeM,
+ unless(anyOf(DynamicCastM, AllocClassWithNameM))))))
+ .bind(WarnAtNode);
+
+ auto Matches =
+ match(stmt(forEachDescendant(CastM)), *D->getBody(), AM.getASTContext());
for (BoundNodes Match : Matches)
emitDiagnostics(Match, BR, ADC, this);
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index 7fd6e2abef4c..c8eab3288094 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -8,7 +8,7 @@
//
// This file defines ObjCAutoreleaseWriteChecker which warns against writes
// into autoreleased out parameters which cause crashes.
-// An example of a problematic write is a write to {@code error} in the example
+// An example of a problematic write is a write to @c error in the example
// below:
//
// - (BOOL) mymethod:(NSError *__autoreleasing *)error list:(NSArray*) list {
diff --git a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 8c2008a7ceb4..13985af76b00 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -147,8 +147,9 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
auto R = std::make_unique<PathSensitiveBugReport>(
*BT, "Index is out of bounds", N);
R->addRange(IdxExpr->getSourceRange());
- bugreporter::trackExpressionValue(
- N, IdxExpr, *R, bugreporter::TrackingKind::Thorough, false);
+ bugreporter::trackExpressionValue(N, IdxExpr, *R,
+ {bugreporter::TrackingKind::Thorough,
+ /*EnableNullFPSuppression=*/false});
C.emitReport(std::move(R));
return;
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 96f0d9bb3c3d..40472ccfe7e6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -193,6 +193,11 @@ public:
CharUnits PaddingSum;
CharUnits Offset = ASTContext.toCharUnitsFromBits(RL.getFieldOffset(0));
for (const FieldDecl *FD : RD->fields()) {
+ // Skip field that is a subobject of zero size, marked with
+ // [[no_unique_address]] or an empty bitfield, because its address can be
+ // set the same as the other fields addresses.
+ if (FD->isZeroSize(ASTContext))
+ continue;
// This checker only cares about the padded size of the
// field, and not the data size. If the field is a record
// with tail padding, then we won't put that number in our
@@ -249,7 +254,7 @@ public:
RetVal.Field = FD;
auto &Ctx = FD->getASTContext();
auto Info = Ctx.getTypeInfoInChars(FD->getType());
- RetVal.Size = Info.Width;
+ RetVal.Size = FD->isZeroSize(Ctx) ? CharUnits::Zero() : Info.Width;
RetVal.Align = Info.Align;
assert(llvm::isPowerOf2_64(RetVal.Align.getQuantity()));
if (auto Max = FD->getMaxAlignment())
diff --git a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index 88e80c481a5a..ee71b55a39e6 100644
--- a/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -339,7 +339,16 @@ void PthreadLockChecker::printState(raw_ostream &Out, ProgramStateRef State,
}
}
- // TODO: Dump destroyed mutex symbols?
+ DestroyRetValTy DRV = State->get<DestroyRetVal>();
+ if (!DRV.isEmpty()) {
+ Out << Sep << "Mutexes in unresolved possibly destroyed state:" << NL;
+ for (auto I : DRV) {
+ I.first->dumpToStream(Out);
+ Out << ": ";
+ I.second->dumpToStream(Out);
+ Out << NL;
+ }
+ }
}
void PthreadLockChecker::AcquirePthreadLock(const CallEvent &Call,
@@ -638,8 +647,10 @@ void PthreadLockChecker::checkDeadSymbols(SymbolReaper &SymReaper,
for (auto I : State->get<LockMap>()) {
// Stop tracking dead mutex regions as well.
- if (!SymReaper.isLiveRegion(I.first))
+ if (!SymReaper.isLiveRegion(I.first)) {
State = State->remove<LockMap>(I.first);
+ State = State->remove<DestroyRetVal>(I.first);
+ }
}
// TODO: We probably need to clean up the lock stack as well.
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 1d903530201f..64ac6bc4c06b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -13,6 +13,8 @@
#include "RetainCountDiagnostics.h"
#include "RetainCountChecker.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
using namespace clang;
using namespace ento;
@@ -89,7 +91,7 @@ static std::string getPrettyTypeName(QualType QT) {
return QT.getAsString();
}
-/// Write information about the type state change to {@code os},
+/// Write information about the type state change to @c os,
/// return whether the note should be generated.
static bool shouldGenerateNote(llvm::raw_string_ostream &os,
const RefVal *PrevT,
@@ -164,8 +166,8 @@ static bool shouldGenerateNote(llvm::raw_string_ostream &os,
return true;
}
-/// Finds argument index of the out paramter in the call {@code S}
-/// corresponding to the symbol {@code Sym}.
+/// Finds argument index of the out paramter in the call @c S
+/// corresponding to the symbol @c Sym.
/// If none found, returns None.
static Optional<unsigned> findArgIdxOfSymbol(ProgramStateRef CurrSt,
const LocationContext *LCtx,
@@ -337,11 +339,15 @@ public:
class RefLeakReportVisitor : public RefCountReportVisitor {
public:
- RefLeakReportVisitor(SymbolRef sym) : RefCountReportVisitor(sym) {}
+ RefLeakReportVisitor(SymbolRef Sym, const MemRegion *LastBinding)
+ : RefCountReportVisitor(Sym), LastBinding(LastBinding) {}
PathDiagnosticPieceRef getEndPath(BugReporterContext &BRC,
const ExplodedNode *N,
PathSensitiveBugReport &BR) override;
+
+private:
+ const MemRegion *LastBinding;
};
} // end namespace retaincountchecker
@@ -610,6 +616,41 @@ static Optional<std::string> describeRegion(const MemRegion *MR) {
return None;
}
+using Bindings = llvm::SmallVector<std::pair<const MemRegion *, SVal>, 4>;
+
+class VarBindingsCollector : public StoreManager::BindingsHandler {
+ SymbolRef Sym;
+ Bindings &Result;
+
+public:
+ VarBindingsCollector(SymbolRef Sym, Bindings &ToFill)
+ : Sym(Sym), Result(ToFill) {}
+
+ bool HandleBinding(StoreManager &SMgr, Store Store, const MemRegion *R,
+ SVal Val) override {
+ SymbolRef SymV = Val.getAsLocSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (isa<NonParamVarRegion>(R))
+ Result.emplace_back(R, Val);
+
+ return true;
+ }
+};
+
+Bindings getAllVarBindingsForSymbol(ProgramStateManager &Manager,
+ const ExplodedNode *Node, SymbolRef Sym) {
+ Bindings Result;
+ VarBindingsCollector Collector{Sym, Result};
+ while (Result.empty() && Node) {
+ Manager.iterBindings(Node->getState(), Collector);
+ Node = Node->getFirstPred();
+ }
+
+ return Result;
+}
+
namespace {
// Find the first node in the current function context that referred to the
// tracked symbol and the memory location that value was stored to. Note, the
@@ -729,14 +770,6 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
// assigned to different variables, etc.
BR.markInteresting(Sym);
- // We are reporting a leak. Walk up the graph to get to the first node where
- // the symbol appeared, and also get the first VarDecl that tracked object
- // is stored to.
- AllocationInfo AllocI = GetAllocationSite(BRC.getStateManager(), EndN, Sym);
-
- const MemRegion* FirstBinding = AllocI.R;
- BR.markInteresting(AllocI.InterestingMethodContext);
-
PathDiagnosticLocation L = cast<RefLeakReport>(BR).getEndOfPath();
std::string sbuf;
@@ -744,7 +777,7 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
os << "Object leaked: ";
- Optional<std::string> RegionDescription = describeRegion(FirstBinding);
+ Optional<std::string> RegionDescription = describeRegion(LastBinding);
if (RegionDescription) {
os << "object allocated and stored into '" << *RegionDescription << '\'';
} else {
@@ -753,7 +786,7 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
}
// Get the retain count.
- const RefVal* RV = getRefBinding(EndN->getState(), Sym);
+ const RefVal *RV = getRefBinding(EndN->getState(), Sym);
assert(RV);
if (RV->getKind() == RefVal::ErrorLeakReturned) {
@@ -794,14 +827,15 @@ RefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
" Foundation";
} else if (RV->getObjKind() == ObjKind::OS) {
std::string FuncName = FD->getNameAsString();
- os << "whose name ('" << FuncName
- << "') starts with '" << StringRef(FuncName).substr(0, 3) << "'";
+ os << "whose name ('" << FuncName << "') starts with '"
+ << StringRef(FuncName).substr(0, 3) << "'";
}
}
}
} else {
os << " is not referenced later in this execution path and has a retain "
- "count of +" << RV->getCount();
+ "count of +"
+ << RV->getCount();
}
return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
@@ -812,7 +846,7 @@ RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
: PathSensitiveBugReport(D, D.getDescription(), n), Sym(sym),
isLeak(isLeak) {
if (!isLeak)
- addVisitor(std::make_unique<RefCountReportVisitor>(sym));
+ addVisitor<RefCountReportVisitor>(sym);
}
RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
@@ -820,19 +854,19 @@ RefCountReport::RefCountReport(const RefCountBug &D, const LangOptions &LOpts,
StringRef endText)
: PathSensitiveBugReport(D, D.getDescription(), endText, n) {
- addVisitor(std::make_unique<RefCountReportVisitor>(sym));
+ addVisitor<RefCountReportVisitor>(sym);
}
-void RefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
- const SourceManager& SMgr = Ctx.getSourceManager();
+void RefLeakReport::deriveParamLocation(CheckerContext &Ctx) {
+ const SourceManager &SMgr = Ctx.getSourceManager();
- if (!sym->getOriginRegion())
+ if (!Sym->getOriginRegion())
return;
- auto *Region = dyn_cast<DeclRegion>(sym->getOriginRegion());
+ auto *Region = dyn_cast<DeclRegion>(Sym->getOriginRegion());
if (Region) {
const Decl *PDecl = Region->getDecl();
- if (PDecl && isa<ParmVarDecl>(PDecl)) {
+ if (isa_and_nonnull<ParmVarDecl>(PDecl)) {
PathDiagnosticLocation ParamLocation =
PathDiagnosticLocation::create(PDecl, SMgr);
Location = ParamLocation;
@@ -842,8 +876,7 @@ void RefLeakReport::deriveParamLocation(CheckerContext &Ctx, SymbolRef sym) {
}
}
-void RefLeakReport::deriveAllocLocation(CheckerContext &Ctx,
- SymbolRef sym) {
+void RefLeakReport::deriveAllocLocation(CheckerContext &Ctx) {
// Most bug reports are cached at the location where they occurred.
// With leaks, we want to unique them by the location where they were
// allocated, and only report a single path. To do this, we need to find
@@ -854,13 +887,13 @@ void RefLeakReport::deriveAllocLocation(CheckerContext &Ctx,
// same SourceLocation.
const ExplodedNode *AllocNode = nullptr;
- const SourceManager& SMgr = Ctx.getSourceManager();
+ const SourceManager &SMgr = Ctx.getSourceManager();
AllocationInfo AllocI =
- GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), Sym);
AllocNode = AllocI.N;
- AllocBinding = AllocI.R;
+ AllocFirstBinding = AllocI.R;
markInteresting(AllocI.InterestingMethodContext);
// Get the SourceLocation for the allocation site.
@@ -870,13 +903,12 @@ void RefLeakReport::deriveAllocLocation(CheckerContext &Ctx,
AllocStmt = AllocNode->getStmtForDiagnostics();
if (!AllocStmt) {
- AllocBinding = nullptr;
+ AllocFirstBinding = nullptr;
return;
}
- PathDiagnosticLocation AllocLocation =
- PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
- AllocNode->getLocationContext());
+ PathDiagnosticLocation AllocLocation = PathDiagnosticLocation::createBegin(
+ AllocStmt, SMgr, AllocNode->getLocationContext());
Location = AllocLocation;
// Set uniqieing info, which will be used for unique the bug reports. The
@@ -891,7 +923,8 @@ void RefLeakReport::createDescription(CheckerContext &Ctx) {
llvm::raw_string_ostream os(Description);
os << "Potential leak of an object";
- Optional<std::string> RegionDescription = describeRegion(AllocBinding);
+ Optional<std::string> RegionDescription =
+ describeRegion(AllocBindingToReport);
if (RegionDescription) {
os << " stored into '" << *RegionDescription << '\'';
} else {
@@ -901,16 +934,75 @@ void RefLeakReport::createDescription(CheckerContext &Ctx) {
}
}
+void RefLeakReport::findBindingToReport(CheckerContext &Ctx,
+ ExplodedNode *Node) {
+ if (!AllocFirstBinding)
+ // If we don't have any bindings, we won't be able to find any
+ // better binding to report.
+ return;
+
+ // If the original region still contains the leaking symbol...
+ if (Node->getState()->getSVal(AllocFirstBinding).getAsSymbol() == Sym) {
+ // ...it is the best binding to report.
+ AllocBindingToReport = AllocFirstBinding;
+ return;
+ }
+
+ // At this point, we know that the original region doesn't contain the leaking
+ // when the actual leak happens. It means that it can be confusing for the
+ // user to see such description in the message.
+ //
+ // Let's consider the following example:
+ // Object *Original = allocate(...);
+ // Object *New = Original;
+ // Original = allocate(...);
+ // Original->release();
+ //
+ // Complaining about a leaking object "stored into Original" might cause a
+ // rightful confusion because 'Original' is actually released.
+ // We should complain about 'New' instead.
+ Bindings AllVarBindings =
+ getAllVarBindingsForSymbol(Ctx.getStateManager(), Node, Sym);
+
+ // While looking for the last var bindings, we can still find
+ // `AllocFirstBinding` to be one of them. In situations like this,
+ // it would still be the easiest case to explain to our users.
+ if (!AllVarBindings.empty() &&
+ llvm::count_if(AllVarBindings,
+ [this](const std::pair<const MemRegion *, SVal> Binding) {
+ return Binding.first == AllocFirstBinding;
+ }) == 0) {
+ // Let's pick one of them at random (if there is something to pick from).
+ AllocBindingToReport = AllVarBindings[0].first;
+
+ // Because 'AllocBindingToReport' is not the the same as
+ // 'AllocFirstBinding', we need to explain how the leaking object
+ // got from one to another.
+ //
+ // NOTE: We use the actual SVal stored in AllocBindingToReport here because
+ // trackStoredValue compares SVal's and it can get trickier for
+ // something like derived regions if we want to construct SVal from
+ // Sym. Instead, we take the value that is definitely stored in that
+ // region, thus guaranteeing that trackStoredValue will work.
+ bugreporter::trackStoredValue(AllVarBindings[0].second.castAs<KnownSVal>(),
+ AllocBindingToReport, *this);
+ } else {
+ AllocBindingToReport = AllocFirstBinding;
+ }
+}
+
RefLeakReport::RefLeakReport(const RefCountBug &D, const LangOptions &LOpts,
- ExplodedNode *n, SymbolRef sym,
+ ExplodedNode *N, SymbolRef Sym,
CheckerContext &Ctx)
- : RefCountReport(D, LOpts, n, sym, /*isLeak=*/true) {
+ : RefCountReport(D, LOpts, N, Sym, /*isLeak=*/true) {
+
+ deriveAllocLocation(Ctx);
+ findBindingToReport(Ctx, N);
- deriveAllocLocation(Ctx, sym);
- if (!AllocBinding)
- deriveParamLocation(Ctx, sym);
+ if (!AllocFirstBinding)
+ deriveParamLocation(Ctx);
createDescription(Ctx);
- addVisitor(std::make_unique<RefLeakReportVisitor>(sym));
+ addVisitor<RefLeakReportVisitor>(Sym, AllocBindingToReport);
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
index 286a8ae2ef7d..d05900895c6a 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
+++ b/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.h
@@ -68,17 +68,20 @@ public:
};
class RefLeakReport : public RefCountReport {
- const MemRegion* AllocBinding;
- const Stmt *AllocStmt;
+ const MemRegion *AllocFirstBinding = nullptr;
+ const MemRegion *AllocBindingToReport = nullptr;
+ const Stmt *AllocStmt = nullptr;
PathDiagnosticLocation Location;
// Finds the function declaration where a leak warning for the parameter
// 'sym' should be raised.
- void deriveParamLocation(CheckerContext &Ctx, SymbolRef sym);
- // Finds the location where a leak warning for 'sym' should be raised.
- void deriveAllocLocation(CheckerContext &Ctx, SymbolRef sym);
+ void deriveParamLocation(CheckerContext &Ctx);
+ // Finds the location where the leaking object is allocated.
+ void deriveAllocLocation(CheckerContext &Ctx);
// Produces description of a leak warning which is printed on the console.
void createDescription(CheckerContext &Ctx);
+ // Finds the binding that we should use in a leak warning.
+ void findBindingToReport(CheckerContext &Ctx, ExplodedNode *Node);
public:
RefLeakReport(const RefCountBug &D, const LangOptions &LOpts, ExplodedNode *n,
diff --git a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index 1a94ccdc2825..885750218b9e 100644
--- a/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -16,7 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
using namespace clang;
diff --git a/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
index d9dc72ddaa21..2cf6c6ff47f1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/RunLoopAutoreleaseLeakChecker.cpp
@@ -57,8 +57,8 @@ public:
} // end anonymous namespace
-/// \return Whether {@code A} occurs before {@code B} in traversal of
-/// {@code Parent}.
+/// \return Whether @c A occurs before @c B in traversal of
+/// @c Parent.
/// Conceptually a very incomplete/unsound approximation of happens-before
/// relationship (A is likely to be evaluated before B),
/// but useful enough in this case.
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
index 92c386bbb2b0..6a40f8eda5fa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtr.h
@@ -22,6 +22,10 @@ namespace smartptr {
/// Returns true if the event call is on smart pointer.
bool isStdSmartPtrCall(const CallEvent &Call);
+bool isStdSmartPtr(const CXXRecordDecl *RD);
+bool isStdSmartPtr(const Expr *E);
+
+bool isStdSmartPtr(const CXXRecordDecl *RD);
/// Returns whether the smart pointer is null or not.
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion);
diff --git a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index 6ee7bd9252b3..09e885e8133f 100644
--- a/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -25,16 +25,20 @@
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/ErrorHandling.h"
#include <string>
using namespace clang;
using namespace ento;
namespace {
+
class SmartPtrModeling
: public Checker<eval::Call, check::DeadSymbols, check::RegionChanges,
check::LiveSymbols> {
@@ -60,7 +64,7 @@ public:
private:
void handleReset(const CallEvent &Call, CheckerContext &C) const;
void handleRelease(const CallEvent &Call, CheckerContext &C) const;
- void handleSwap(const CallEvent &Call, CheckerContext &C) const;
+ void handleSwapMethod(const CallEvent &Call, CheckerContext &C) const;
void handleGet(const CallEvent &Call, CheckerContext &C) const;
bool handleAssignOp(const CallEvent &Call, CheckerContext &C) const;
bool handleMoveCtr(const CallEvent &Call, CheckerContext &C,
@@ -68,19 +72,56 @@ private:
bool updateMovedSmartPointers(CheckerContext &C, const MemRegion *ThisRegion,
const MemRegion *OtherSmartPtrRegion) const;
void handleBoolConversion(const CallEvent &Call, CheckerContext &C) const;
+ bool handleComparisionOp(const CallEvent &Call, CheckerContext &C) const;
+ bool handleOstreamOperator(const CallEvent &Call, CheckerContext &C) const;
+ bool handleSwap(ProgramStateRef State, SVal First, SVal Second,
+ CheckerContext &C) const;
+ std::pair<SVal, ProgramStateRef>
+ retrieveOrConjureInnerPtrVal(ProgramStateRef State,
+ const MemRegion *ThisRegion, const Expr *E,
+ QualType Type, CheckerContext &C) const;
using SmartPtrMethodHandlerFn =
void (SmartPtrModeling::*)(const CallEvent &Call, CheckerContext &) const;
CallDescriptionMap<SmartPtrMethodHandlerFn> SmartPtrMethodHandlers{
{{"reset"}, &SmartPtrModeling::handleReset},
{{"release"}, &SmartPtrModeling::handleRelease},
- {{"swap", 1}, &SmartPtrModeling::handleSwap},
+ {{"swap", 1}, &SmartPtrModeling::handleSwapMethod},
{{"get"}, &SmartPtrModeling::handleGet}};
+ const CallDescription StdSwapCall{{"std", "swap"}, 2};
+ const CallDescription StdMakeUniqueCall{{"std", "make_unique"}};
+ const CallDescription StdMakeUniqueForOverwriteCall{
+ {"std", "make_unique_for_overwrite"}};
};
} // end of anonymous namespace
REGISTER_MAP_WITH_PROGRAMSTATE(TrackedRegionMap, const MemRegion *, SVal)
+// Checks if RD has name in Names and is in std namespace
+static bool hasStdClassWithName(const CXXRecordDecl *RD,
+ ArrayRef<llvm::StringLiteral> Names) {
+ if (!RD || !RD->getDeclContext()->isStdNamespace())
+ return false;
+ if (RD->getDeclName().isIdentifier()) {
+ StringRef Name = RD->getName();
+ return llvm::any_of(Names, [&Name](StringRef GivenName) -> bool {
+ return Name == GivenName;
+ });
+ }
+ return false;
+}
+
+constexpr llvm::StringLiteral STD_PTR_NAMES[] = {"shared_ptr", "unique_ptr",
+ "weak_ptr"};
+
+static bool isStdSmartPtr(const CXXRecordDecl *RD) {
+ return hasStdClassWithName(RD, STD_PTR_NAMES);
+}
+
+static bool isStdSmartPtr(const Expr *E) {
+ return isStdSmartPtr(E->getType()->getAsCXXRecordDecl());
+}
+
// Define the inter-checker API.
namespace clang {
namespace ento {
@@ -89,18 +130,24 @@ bool isStdSmartPtrCall(const CallEvent &Call) {
const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
if (!MethodDecl || !MethodDecl->getParent())
return false;
+ return isStdSmartPtr(MethodDecl->getParent());
+}
- const auto *RecordDecl = MethodDecl->getParent();
- if (!RecordDecl || !RecordDecl->getDeclContext()->isStdNamespace())
+bool isStdSmartPtr(const CXXRecordDecl *RD) {
+ if (!RD || !RD->getDeclContext()->isStdNamespace())
return false;
- if (RecordDecl->getDeclName().isIdentifier()) {
- StringRef Name = RecordDecl->getName();
+ if (RD->getDeclName().isIdentifier()) {
+ StringRef Name = RD->getName();
return Name == "shared_ptr" || Name == "unique_ptr" || Name == "weak_ptr";
}
return false;
}
+bool isStdSmartPtr(const Expr *E) {
+ return isStdSmartPtr(E->getType()->getAsCXXRecordDecl());
+}
+
bool isNullSmartPtr(const ProgramStateRef State, const MemRegion *ThisRegion) {
const auto *InnerPointVal = State->get<TrackedRegionMap>(ThisRegion);
return InnerPointVal &&
@@ -135,28 +182,47 @@ static ProgramStateRef updateSwappedRegion(ProgramStateRef State,
return State;
}
-// Helper method to get the inner pointer type of specialized smart pointer
-// Returns empty type if not found valid inner pointer type.
-static QualType getInnerPointerType(const CallEvent &Call, CheckerContext &C) {
- const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
- if (!MethodDecl || !MethodDecl->getParent())
- return {};
-
- const auto *RecordDecl = MethodDecl->getParent();
- if (!RecordDecl || !RecordDecl->isInStdNamespace())
+static QualType getInnerPointerType(CheckerContext C, const CXXRecordDecl *RD) {
+ if (!RD || !RD->isInStdNamespace())
return {};
- const auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RecordDecl);
+ const auto *TSD = dyn_cast<ClassTemplateSpecializationDecl>(RD);
if (!TSD)
return {};
auto TemplateArgs = TSD->getTemplateArgs().asArray();
- if (TemplateArgs.size() == 0)
+ if (TemplateArgs.empty())
return {};
auto InnerValueType = TemplateArgs[0].getAsType();
return C.getASTContext().getPointerType(InnerValueType.getCanonicalType());
}
+// This is for use with standalone-functions like std::make_unique,
+// std::make_unique_for_overwrite, etc. It reads the template parameter and
+// returns the pointer type corresponding to it,
+static QualType getPointerTypeFromTemplateArg(const CallEvent &Call,
+ CheckerContext &C) {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!FD || !FD->isFunctionTemplateSpecialization())
+ return {};
+ const auto &TemplateArgs = FD->getTemplateSpecializationArgs()->asArray();
+ if (TemplateArgs.size() == 0)
+ return {};
+ auto ValueType = TemplateArgs[0].getAsType();
+ return C.getASTContext().getPointerType(ValueType.getCanonicalType());
+}
+
+// Helper method to get the inner pointer type of specialized smart pointer
+// Returns empty type if not found valid inner pointer type.
+static QualType getInnerPointerType(const CallEvent &Call, CheckerContext &C) {
+ const auto *MethodDecl = dyn_cast_or_null<CXXMethodDecl>(Call.getDecl());
+ if (!MethodDecl || !MethodDecl->getParent())
+ return {};
+
+ const auto *RecordDecl = MethodDecl->getParent();
+ return getInnerPointerType(C, RecordDecl);
+}
+
// Helper method to pretty print region and avoid extra spacing.
static void checkAndPrettyPrintRegion(llvm::raw_ostream &OS,
const MemRegion *Region) {
@@ -175,9 +241,107 @@ bool SmartPtrModeling::isBoolConversionMethod(const CallEvent &Call) const {
return CD && CD->getConversionType()->isBooleanType();
}
+constexpr llvm::StringLiteral BASIC_OSTREAM_NAMES[] = {"basic_ostream"};
+
+bool isStdBasicOstream(const Expr *E) {
+ const auto *RD = E->getType()->getAsCXXRecordDecl();
+ return hasStdClassWithName(RD, BASIC_OSTREAM_NAMES);
+}
+
+static bool isStdFunctionCall(const CallEvent &Call) {
+ return Call.getDecl() && Call.getDecl()->getDeclContext()->isStdNamespace();
+}
+
+bool isStdOstreamOperatorCall(const CallEvent &Call) {
+ if (Call.getNumArgs() != 2 || !isStdFunctionCall(Call))
+ return false;
+ const auto *FC = dyn_cast<SimpleFunctionCall>(&Call);
+ if (!FC)
+ return false;
+ const FunctionDecl *FD = FC->getDecl();
+ if (!FD->isOverloadedOperator())
+ return false;
+ const OverloadedOperatorKind OOK = FD->getOverloadedOperator();
+ if (OOK != clang::OO_LessLess)
+ return false;
+ return isStdSmartPtr(Call.getArgExpr(1)) &&
+ isStdBasicOstream(Call.getArgExpr(0));
+}
+
+static bool isPotentiallyComparisionOpCall(const CallEvent &Call) {
+ if (Call.getNumArgs() != 2 || !isStdFunctionCall(Call))
+ return false;
+ return smartptr::isStdSmartPtr(Call.getArgExpr(0)) ||
+ smartptr::isStdSmartPtr(Call.getArgExpr(1));
+}
+
bool SmartPtrModeling::evalCall(const CallEvent &Call,
CheckerContext &C) const {
+
ProgramStateRef State = C.getState();
+
+ // If any one of the arg is a unique_ptr, then
+ // we can try this function
+ if (ModelSmartPtrDereference && isPotentiallyComparisionOpCall(Call))
+ if (handleComparisionOp(Call, C))
+ return true;
+
+ if (ModelSmartPtrDereference && isStdOstreamOperatorCall(Call))
+ return handleOstreamOperator(Call, C);
+
+ if (Call.isCalled(StdSwapCall)) {
+ // Check the first arg, if it is of std::unique_ptr type.
+ assert(Call.getNumArgs() == 2 && "std::swap should have two arguments");
+ const Expr *FirstArg = Call.getArgExpr(0);
+ if (!smartptr::isStdSmartPtr(FirstArg->getType()->getAsCXXRecordDecl()))
+ return false;
+ return handleSwap(State, Call.getArgSVal(0), Call.getArgSVal(1), C);
+ }
+
+ if (Call.isCalled(StdMakeUniqueCall) ||
+ Call.isCalled(StdMakeUniqueForOverwriteCall)) {
+ if (!ModelSmartPtrDereference)
+ return false;
+
+ const Optional<SVal> ThisRegionOpt = Call.getReturnValueUnderConstruction();
+ if (!ThisRegionOpt)
+ return false;
+
+ const auto PtrVal = C.getSValBuilder().getConjuredHeapSymbolVal(
+ Call.getOriginExpr(), C.getLocationContext(),
+ getPointerTypeFromTemplateArg(Call, C), C.blockCount());
+
+ const MemRegion *ThisRegion = ThisRegionOpt->getAsRegion();
+ State = State->set<TrackedRegionMap>(ThisRegion, PtrVal);
+ State = State->assume(PtrVal, true);
+
+ // TODO: ExprEngine should do this for us.
+ // For a bit more context:
+ // 1) Why do we need this? Since we are modelling a "function"
+ // that returns a constructed object we need to store this information in
+ // the program state.
+ //
+ // 2) Why does this work?
+ // `updateObjectsUnderConstruction` does exactly as it sounds.
+ //
+ // 3) How should it look like when moved to the Engine?
+ // It would be nice if we can just
+ // pretend we don't need to know about this - ie, completely automatic work.
+ // However, realistically speaking, I think we would need to "signal" the
+ // ExprEngine evalCall handler that we are constructing an object with this
+ // function call (constructors obviously construct, hence can be
+ // automatically deduced).
+ auto &Engine = State->getStateManager().getOwningEngine();
+ State = Engine.updateObjectsUnderConstruction(
+ *ThisRegionOpt, nullptr, State, C.getLocationContext(),
+ Call.getConstructionContext(), {});
+
+ // We don't leave a note here since it is guaranteed the
+ // unique_ptr from this call is non-null (hence is safe to de-reference).
+ C.addTransition(State);
+ return true;
+ }
+
if (!smartptr::isStdSmartPtrCall(Call))
return false;
@@ -272,6 +436,108 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
return C.isDifferent();
}
+std::pair<SVal, ProgramStateRef> SmartPtrModeling::retrieveOrConjureInnerPtrVal(
+ ProgramStateRef State, const MemRegion *ThisRegion, const Expr *E,
+ QualType Type, CheckerContext &C) const {
+ const auto *Ptr = State->get<TrackedRegionMap>(ThisRegion);
+ if (Ptr)
+ return {*Ptr, State};
+ auto Val = C.getSValBuilder().conjureSymbolVal(E, C.getLocationContext(),
+ Type, C.blockCount());
+ State = State->set<TrackedRegionMap>(ThisRegion, Val);
+ return {Val, State};
+}
+
+bool SmartPtrModeling::handleComparisionOp(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *FC = dyn_cast<SimpleFunctionCall>(&Call);
+ if (!FC)
+ return false;
+ const FunctionDecl *FD = FC->getDecl();
+ if (!FD->isOverloadedOperator())
+ return false;
+ const OverloadedOperatorKind OOK = FD->getOverloadedOperator();
+ if (!(OOK == OO_EqualEqual || OOK == OO_ExclaimEqual || OOK == OO_Less ||
+ OOK == OO_LessEqual || OOK == OO_Greater || OOK == OO_GreaterEqual ||
+ OOK == OO_Spaceship))
+ return false;
+
+ // There are some special cases about which we can infer about
+ // the resulting answer.
+ // For reference, there is a discussion at https://reviews.llvm.org/D104616.
+ // Also, the cppreference page is good to look at
+ // https://en.cppreference.com/w/cpp/memory/unique_ptr/operator_cmp.
+
+ auto makeSValFor = [&C, this](ProgramStateRef State, const Expr *E,
+ SVal S) -> std::pair<SVal, ProgramStateRef> {
+ if (S.isZeroConstant()) {
+ return {S, State};
+ }
+ const MemRegion *Reg = S.getAsRegion();
+ assert(Reg &&
+ "this pointer of std::unique_ptr should be obtainable as MemRegion");
+ QualType Type = getInnerPointerType(C, E->getType()->getAsCXXRecordDecl());
+ return retrieveOrConjureInnerPtrVal(State, Reg, E, Type, C);
+ };
+
+ SVal First = Call.getArgSVal(0);
+ SVal Second = Call.getArgSVal(1);
+ const auto *FirstExpr = Call.getArgExpr(0);
+ const auto *SecondExpr = Call.getArgExpr(1);
+
+ const auto *ResultExpr = Call.getOriginExpr();
+ const auto *LCtx = C.getLocationContext();
+ auto &Bldr = C.getSValBuilder();
+ ProgramStateRef State = C.getState();
+
+ SVal FirstPtrVal, SecondPtrVal;
+ std::tie(FirstPtrVal, State) = makeSValFor(State, FirstExpr, First);
+ std::tie(SecondPtrVal, State) = makeSValFor(State, SecondExpr, Second);
+ BinaryOperatorKind BOK =
+ operationKindFromOverloadedOperator(OOK, true).GetBinaryOpUnsafe();
+ auto RetVal = Bldr.evalBinOp(State, BOK, FirstPtrVal, SecondPtrVal,
+ Call.getResultType());
+
+ if (OOK != OO_Spaceship) {
+ ProgramStateRef TrueState, FalseState;
+ std::tie(TrueState, FalseState) =
+ State->assume(*RetVal.getAs<DefinedOrUnknownSVal>());
+ if (TrueState)
+ C.addTransition(
+ TrueState->BindExpr(ResultExpr, LCtx, Bldr.makeTruthVal(true)));
+ if (FalseState)
+ C.addTransition(
+ FalseState->BindExpr(ResultExpr, LCtx, Bldr.makeTruthVal(false)));
+ } else {
+ C.addTransition(State->BindExpr(ResultExpr, LCtx, RetVal));
+ }
+ return true;
+}
+
+bool SmartPtrModeling::handleOstreamOperator(const CallEvent &Call,
+ CheckerContext &C) const {
+ // operator<< does not modify the smart pointer.
+ // And we don't really have much of modelling of basic_ostream.
+ // So, we are better off:
+ // 1) Invalidating the mem-region of the ostream object at hand.
+ // 2) Setting the SVal of the basic_ostream as the return value.
+ // Not very satisfying, but it gets the job done, and is better
+ // than the default handling. :)
+
+ ProgramStateRef State = C.getState();
+ const auto StreamVal = Call.getArgSVal(0);
+ const MemRegion *StreamThisRegion = StreamVal.getAsRegion();
+ if (!StreamThisRegion)
+ return false;
+ State =
+ State->invalidateRegions({StreamThisRegion}, Call.getOriginExpr(),
+ C.blockCount(), C.getLocationContext(), false);
+ State =
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), StreamVal);
+ C.addTransition(State);
+ return true;
+}
+
void SmartPtrModeling::checkDeadSymbols(SymbolReaper &SymReaper,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -395,43 +661,52 @@ void SmartPtrModeling::handleRelease(const CallEvent &Call,
// pointer.
}
-void SmartPtrModeling::handleSwap(const CallEvent &Call,
- CheckerContext &C) const {
+void SmartPtrModeling::handleSwapMethod(const CallEvent &Call,
+ CheckerContext &C) const {
// To model unique_ptr::swap() method.
const auto *IC = dyn_cast<CXXInstanceCall>(&Call);
if (!IC)
return;
- const MemRegion *ThisRegion = IC->getCXXThisVal().getAsRegion();
- if (!ThisRegion)
- return;
+ auto State = C.getState();
+ handleSwap(State, IC->getCXXThisVal(), Call.getArgSVal(0), C);
+}
- const auto *ArgRegion = Call.getArgSVal(0).getAsRegion();
- if (!ArgRegion)
- return;
+bool SmartPtrModeling::handleSwap(ProgramStateRef State, SVal First,
+ SVal Second, CheckerContext &C) const {
+ const MemRegion *FirstThisRegion = First.getAsRegion();
+ if (!FirstThisRegion)
+ return false;
+ const MemRegion *SecondThisRegion = Second.getAsRegion();
+ if (!SecondThisRegion)
+ return false;
- auto State = C.getState();
- const auto *ThisRegionInnerPointerVal =
- State->get<TrackedRegionMap>(ThisRegion);
- const auto *ArgRegionInnerPointerVal =
- State->get<TrackedRegionMap>(ArgRegion);
+ const auto *FirstInnerPtrVal = State->get<TrackedRegionMap>(FirstThisRegion);
+ const auto *SecondInnerPtrVal =
+ State->get<TrackedRegionMap>(SecondThisRegion);
- // Swap the tracked region values.
- State = updateSwappedRegion(State, ThisRegion, ArgRegionInnerPointerVal);
- State = updateSwappedRegion(State, ArgRegion, ThisRegionInnerPointerVal);
+ State = updateSwappedRegion(State, FirstThisRegion, SecondInnerPtrVal);
+ State = updateSwappedRegion(State, SecondThisRegion, FirstInnerPtrVal);
- C.addTransition(
- State, C.getNoteTag([ThisRegion, ArgRegion](PathSensitiveBugReport &BR,
- llvm::raw_ostream &OS) {
- if (&BR.getBugType() != smartptr::getNullDereferenceBugType() ||
- !BR.isInteresting(ThisRegion))
- return;
- BR.markInteresting(ArgRegion);
- OS << "Swapped null smart pointer";
- checkAndPrettyPrintRegion(OS, ArgRegion);
- OS << " with smart pointer";
- checkAndPrettyPrintRegion(OS, ThisRegion);
- }));
+ C.addTransition(State, C.getNoteTag([FirstThisRegion, SecondThisRegion](
+ PathSensitiveBugReport &BR,
+ llvm::raw_ostream &OS) {
+ if (&BR.getBugType() != smartptr::getNullDereferenceBugType())
+ return;
+ if (BR.isInteresting(FirstThisRegion) &&
+ !BR.isInteresting(SecondThisRegion)) {
+ BR.markInteresting(SecondThisRegion);
+ BR.markNotInteresting(FirstThisRegion);
+ }
+ if (BR.isInteresting(SecondThisRegion) &&
+ !BR.isInteresting(FirstThisRegion)) {
+ BR.markInteresting(FirstThisRegion);
+ BR.markNotInteresting(SecondThisRegion);
+ }
+ // TODO: We need to emit some note here probably!!
+ }));
+
+ return true;
}
void SmartPtrModeling::handleGet(const CallEvent &Call,
@@ -446,15 +721,8 @@ void SmartPtrModeling::handleGet(const CallEvent &Call,
return;
SVal InnerPointerVal;
- if (const auto *InnerValPtr = State->get<TrackedRegionMap>(ThisRegion)) {
- InnerPointerVal = *InnerValPtr;
- } else {
- const auto *CallExpr = Call.getOriginExpr();
- InnerPointerVal = C.getSValBuilder().conjureSymbolVal(
- CallExpr, C.getLocationContext(), Call.getResultType(), C.blockCount());
- State = State->set<TrackedRegionMap>(ThisRegion, InnerPointerVal);
- }
-
+ std::tie(InnerPointerVal, State) = retrieveOrConjureInnerPtrVal(
+ State, ThisRegion, Call.getOriginExpr(), Call.getResultType(), C);
State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
InnerPointerVal);
// TODO: Add NoteTag, for how the raw pointer got using 'get' method.
diff --git a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index d1c366a94fac..e758b465af1b 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -56,7 +56,11 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+
+#include <string>
using namespace clang;
using namespace clang::ento;
@@ -87,6 +91,10 @@ class StdLibraryFunctionsChecker
typedef uint32_t ArgNo;
static const ArgNo Ret;
+ /// Returns the string representation of an argument index.
+ /// E.g.: (1) -> '1st arg', (2) - > '2nd arg'
+ static SmallString<8> getArgDesc(ArgNo);
+
class ValueConstraint;
// Pointer to the ValueConstraint. We need a copyable, polymorphic and
@@ -126,8 +134,24 @@ class StdLibraryFunctionsChecker
}
ArgNo getArgNo() const { return ArgN; }
+ // Return those arguments that should be tracked when we report a bug. By
+ // default it is the argument that is constrained, however, in some special
+ // cases we need to track other arguments as well. E.g. a buffer size might
+ // be encoded in another argument.
+ virtual std::vector<ArgNo> getArgsToTrack() const { return {ArgN}; }
+
virtual StringRef getName() const = 0;
+ // Give a description that explains the constraint to the user. Used when
+ // the bug is reported.
+ virtual std::string describe(ProgramStateRef State,
+ const Summary &Summary) const {
+ // There are some descendant classes that are not used as argument
+ // constraints, e.g. ComparisonConstraint. In that case we can safely
+ // ignore the implementation of this function.
+ llvm_unreachable("Not implemented");
+ }
+
protected:
ArgNo ArgN; // Argument to which we apply the constraint.
@@ -158,6 +182,9 @@ class StdLibraryFunctionsChecker
RangeConstraint(ArgNo ArgN, RangeKind Kind, const IntRangeVector &Ranges)
: ValueConstraint(ArgN), Kind(Kind), Ranges(Ranges) {}
+ std::string describe(ProgramStateRef State,
+ const Summary &Summary) const override;
+
const IntRangeVector &getRanges() const { return Ranges; }
private:
@@ -225,6 +252,8 @@ class StdLibraryFunctionsChecker
bool CannotBeNull = true;
public:
+ std::string describe(ProgramStateRef State,
+ const Summary &Summary) const override;
StringRef getName() const override { return "NonNull"; }
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
@@ -286,6 +315,18 @@ class StdLibraryFunctionsChecker
: ValueConstraint(Buffer), SizeArgN(BufSize),
SizeMultiplierArgN(BufSizeMultiplier) {}
+ std::vector<ArgNo> getArgsToTrack() const override {
+ std::vector<ArgNo> Result{ArgN};
+ if (SizeArgN)
+ Result.push_back(*SizeArgN);
+ if (SizeMultiplierArgN)
+ Result.push_back(*SizeMultiplierArgN);
+ return Result;
+ }
+
+ std::string describe(ProgramStateRef State,
+ const Summary &Summary) const override;
+
ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
const Summary &Summary,
CheckerContext &C) const override {
@@ -297,24 +338,22 @@ class StdLibraryFunctionsChecker
const SVal SizeV = [this, &State, &Call, &Summary, &SvalBuilder]() {
if (ConcreteSize) {
return SVal(SvalBuilder.makeIntVal(*ConcreteSize));
- } else if (SizeArgN) {
- // The size argument.
- SVal SizeV = getArgSVal(Call, *SizeArgN);
- // Multiply with another argument if given.
- if (SizeMultiplierArgN) {
- SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
- SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
- Summary.getArgType(*SizeArgN));
- }
- return SizeV;
- } else {
- llvm_unreachable("The constraint must be either a concrete value or "
- "encoded in an arguement.");
}
+ assert(SizeArgN && "The constraint must be either a concrete value or "
+ "encoded in an argument.");
+ // The size argument.
+ SVal SizeV = getArgSVal(Call, *SizeArgN);
+ // Multiply with another argument if given.
+ if (SizeMultiplierArgN) {
+ SVal SizeMulV = getArgSVal(Call, *SizeMultiplierArgN);
+ SizeV = SvalBuilder.evalBinOp(State, BO_Mul, SizeV, SizeMulV,
+ Summary.getArgType(*SizeArgN));
+ }
+ return SizeV;
}();
// The dynamic size of the buffer argument, got from the analyzer engine.
- SVal BufDynSize = getDynamicSizeWithOffset(State, BufV);
+ SVal BufDynSize = getDynamicExtentWithOffset(State, BufV);
SVal Feasible = SvalBuilder.evalBinOp(State, Op, SizeV, BufDynSize,
SvalBuilder.getContext().BoolTy);
@@ -508,6 +547,7 @@ class StdLibraryFunctionsChecker
mutable FunctionSummaryMapType FunctionSummaryMap;
mutable std::unique_ptr<BugType> BT_InvalidArg;
+ mutable bool SummariesInitialized = false;
static SVal getArgSVal(const CallEvent &Call, ArgNo ArgN) {
return ArgN == Ret ? Call.getReturnValue() : Call.getArgSVal(ArgN);
@@ -538,24 +578,30 @@ private:
void initFunctionSummaries(CheckerContext &C) const;
void reportBug(const CallEvent &Call, ExplodedNode *N,
- const ValueConstraint *VC, CheckerContext &C) const {
+ const ValueConstraint *VC, const Summary &Summary,
+ CheckerContext &C) const {
if (!ChecksEnabled[CK_StdCLibraryFunctionArgsChecker])
return;
- // TODO Add more detailed diagnostic.
std::string Msg =
(Twine("Function argument constraint is not satisfied, constraint: ") +
- VC->getName().data() + ", ArgN: " + Twine(VC->getArgNo()))
+ VC->getName().data())
.str();
if (!BT_InvalidArg)
BT_InvalidArg = std::make_unique<BugType>(
CheckNames[CK_StdCLibraryFunctionArgsChecker],
"Unsatisfied argument constraints", categories::LogicError);
auto R = std::make_unique<PathSensitiveBugReport>(*BT_InvalidArg, Msg, N);
- bugreporter::trackExpressionValue(N, Call.getArgExpr(VC->getArgNo()), *R);
+
+ for (ArgNo ArgN : VC->getArgsToTrack())
+ bugreporter::trackExpressionValue(N, Call.getArgExpr(ArgN), *R);
// Highlight the range of the argument that was violated.
R->addRange(Call.getArgSourceRange(VC->getArgNo()));
+ // Describe the argument constraint in a note.
+ R->addNote(VC->describe(C.getState(), Summary), R->getLocation(),
+ Call.getArgSourceRange(VC->getArgNo()));
+
C.emitReport(std::move(R));
}
};
@@ -565,6 +611,85 @@ const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
} // end of anonymous namespace
+static BasicValueFactory &getBVF(ProgramStateRef State) {
+ ProgramStateManager &Mgr = State->getStateManager();
+ SValBuilder &SVB = Mgr.getSValBuilder();
+ return SVB.getBasicValueFactory();
+}
+
+std::string StdLibraryFunctionsChecker::NotNullConstraint::describe(
+ ProgramStateRef State, const Summary &Summary) const {
+ SmallString<48> Result;
+ Result += "The ";
+ Result += getArgDesc(ArgN);
+ Result += " should not be NULL";
+ return Result.c_str();
+}
+
+std::string StdLibraryFunctionsChecker::RangeConstraint::describe(
+ ProgramStateRef State, const Summary &Summary) const {
+
+ BasicValueFactory &BVF = getBVF(State);
+
+ QualType T = Summary.getArgType(getArgNo());
+ SmallString<48> Result;
+ Result += "The ";
+ Result += getArgDesc(ArgN);
+ Result += " should be ";
+
+ // Range kind as a string.
+ Kind == OutOfRange ? Result += "out of" : Result += "within";
+
+ // Get the range values as a string.
+ Result += " the range ";
+ if (Ranges.size() > 1)
+ Result += "[";
+ unsigned I = Ranges.size();
+ for (const std::pair<RangeInt, RangeInt> &R : Ranges) {
+ Result += "[";
+ const llvm::APSInt &Min = BVF.getValue(R.first, T);
+ const llvm::APSInt &Max = BVF.getValue(R.second, T);
+ Min.toString(Result);
+ Result += ", ";
+ Max.toString(Result);
+ Result += "]";
+ if (--I > 0)
+ Result += ", ";
+ }
+ if (Ranges.size() > 1)
+ Result += "]";
+
+ return Result.c_str();
+}
+
+SmallString<8>
+StdLibraryFunctionsChecker::getArgDesc(StdLibraryFunctionsChecker::ArgNo ArgN) {
+ SmallString<8> Result;
+ Result += std::to_string(ArgN + 1);
+ Result += llvm::getOrdinalSuffix(ArgN + 1);
+ Result += " arg";
+ return Result;
+}
+
+std::string StdLibraryFunctionsChecker::BufferSizeConstraint::describe(
+ ProgramStateRef State, const Summary &Summary) const {
+ SmallString<96> Result;
+ Result += "The size of the ";
+ Result += getArgDesc(ArgN);
+ Result += " should be equal to or less than the value of ";
+ if (ConcreteSize) {
+ ConcreteSize->toString(Result);
+ } else if (SizeArgN) {
+ Result += "the ";
+ Result += getArgDesc(*SizeArgN);
+ if (SizeMultiplierArgN) {
+ Result += " times the ";
+ Result += getArgDesc(*SizeMultiplierArgN);
+ }
+ }
+ return Result.c_str();
+}
+
ProgramStateRef StdLibraryFunctionsChecker::RangeConstraint::applyAsOutOfRange(
ProgramStateRef State, const CallEvent &Call,
const Summary &Summary) const {
@@ -692,7 +817,7 @@ void StdLibraryFunctionsChecker::checkPreCall(const CallEvent &Call,
// The argument constraint is not satisfied.
if (FailureSt && !SuccessSt) {
if (ExplodedNode *N = C.generateErrorNode(NewState))
- reportBug(Call, N, Constraint.get(), C);
+ reportBug(Call, N, Constraint.get(), Summary, C);
break;
} else {
// We will apply the constraint even if we cannot reason about the
@@ -823,7 +948,7 @@ StdLibraryFunctionsChecker::findFunctionSummary(const CallEvent &Call,
void StdLibraryFunctionsChecker::initFunctionSummaries(
CheckerContext &C) const {
- if (!FunctionSummaryMap.empty())
+ if (SummariesInitialized)
return;
SValBuilder &SVB = C.getSValBuilder();
@@ -841,7 +966,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
llvm::Optional<QualType> operator()(StringRef Name) {
IdentifierInfo &II = ACtx.Idents.get(Name);
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
- if (LookupRes.size() == 0)
+ if (LookupRes.empty())
return None;
// Prioritze typedef declarations.
@@ -993,7 +1118,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
return false;
IdentifierInfo &II = ACtx.Idents.get(Name);
auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
- if (LookupRes.size() == 0)
+ if (LookupRes.empty())
return false;
for (Decl *D : LookupRes) {
if (auto *FD = dyn_cast<FunctionDecl>(D)) {
@@ -2441,6 +2566,35 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// Functions for testing.
if (ChecksEnabled[CK_StdCLibraryFunctionsTesterChecker]) {
addToFunctionSummaryMap(
+ "__not_null", Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure).ArgConstraint(NotNull(ArgNo(0))));
+
+ // Test range values.
+ addToFunctionSummaryMap(
+ "__single_val_1", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__range_1_2", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, Range(1, 2))));
+ addToFunctionSummaryMap("__range_1_2__4_5",
+ Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(
+ 0U, WithinRange, Range({1, 2}, {4, 5}))));
+
+ // Test range kind.
+ addToFunctionSummaryMap(
+ "__within", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, WithinRange, SingleValue(1))));
+ addToFunctionSummaryMap(
+ "__out_of", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(EvalCallAsPure)
+ .ArgConstraint(ArgumentCondition(0U, OutOfRange, SingleValue(1))));
+
+ addToFunctionSummaryMap(
"__two_constrained_args",
Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
@@ -2485,6 +2639,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{VoidPtrRestrictTy}, RetType{VoidTy}),
Summary(EvalCallAsPure));
}
+
+ SummariesInitialized = true;
}
void ento::registerStdCLibraryFunctionsChecker(CheckerManager &mgr) {
diff --git a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 6b176b3c4e2b..dd65f8c035aa 100644
--- a/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -25,6 +25,10 @@ using namespace clang;
using namespace ento;
using namespace std::placeholders;
+//===----------------------------------------------------------------------===//
+// Definition of state data structures.
+//===----------------------------------------------------------------------===//
+
namespace {
struct FnDescription;
@@ -146,6 +150,14 @@ struct StreamState {
}
};
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// StreamChecker class and utility functions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
class StreamChecker;
using FnCheck = std::function<void(const StreamChecker *, const FnDescription *,
const CallEvent &, CheckerContext &)>;
@@ -219,6 +231,8 @@ public:
/// If true, evaluate special testing stream functions.
bool TestMode = false;
+ const BugType *getBT_StreamEof() const { return &BT_StreamEof; }
+
private:
CallDescriptionMap<FnDescription> FnDescriptions = {
{{"fopen"}, {nullptr, &StreamChecker::evalFopen, ArgNone}},
@@ -306,7 +320,8 @@ private:
/// If it can only be NULL a fatal error is emitted and nullptr returned.
/// Otherwise the return value is a new state where the stream is constrained
/// to be non-null.
- ProgramStateRef ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+ ProgramStateRef ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
+ CheckerContext &C,
ProgramStateRef State) const;
/// Check that the stream is the opened state.
@@ -336,7 +351,8 @@ private:
/// There will be always a state transition into the passed State,
/// by the new non-fatal error node or (if failed) a normal transition,
/// to ensure uniform handling.
- void reportFEofWarning(CheckerContext &C, ProgramStateRef State) const;
+ void reportFEofWarning(SymbolRef StreamSym, CheckerContext &C,
+ ProgramStateRef State) const;
/// Emit resource leak warnings for the given symbols.
/// Createn a non-fatal error node for these, and returns it (if any warnings
@@ -362,14 +378,14 @@ private:
/// Generate a message for BugReporterVisitor if the stored symbol is
/// marked as interesting by the actual bug report.
+ // FIXME: Use lambda instead.
struct NoteFn {
- const CheckerNameRef CheckerName;
+ const BugType *BT_ResourceLeak;
SymbolRef StreamSym;
std::string Message;
std::string operator()(PathSensitiveBugReport &BR) const {
- if (BR.isInteresting(StreamSym) &&
- CheckerName == BR.getBugType().getCheckerName())
+ if (BR.isInteresting(StreamSym) && &BR.getBugType() == BT_ResourceLeak)
return Message;
return "";
@@ -378,7 +394,20 @@ private:
const NoteTag *constructNoteTag(CheckerContext &C, SymbolRef StreamSym,
const std::string &Message) const {
- return C.getNoteTag(NoteFn{getCheckerName(), StreamSym, Message});
+ return C.getNoteTag(NoteFn{&BT_ResourceLeak, StreamSym, Message});
+ }
+
+ const NoteTag *constructSetEofNoteTag(CheckerContext &C,
+ SymbolRef StreamSym) const {
+ return C.getNoteTag([this, StreamSym](PathSensitiveBugReport &BR) {
+ if (!BR.isInteresting(StreamSym) ||
+ &BR.getBugType() != this->getBT_StreamEof())
+ return "";
+
+ BR.markNotInteresting(StreamSym);
+
+ return "Assuming stream reaches end-of-file here";
+ });
}
/// Searches for the ExplodedNode where the file descriptor was acquired for
@@ -390,6 +419,9 @@ private:
} // end anonymous namespace
+// This map holds the state of a stream.
+// The stream is identified with a SymbolRef that is created when a stream
+// opening function is modeled by the checker.
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
inline void assertStreamStateOpened(const StreamState *SS) {
@@ -418,6 +450,10 @@ const ExplodedNode *StreamChecker::getAcquisitionSite(const ExplodedNode *N,
return nullptr;
}
+//===----------------------------------------------------------------------===//
+// Methods of StreamChecker.
+//===----------------------------------------------------------------------===//
+
void StreamChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
const FnDescription *Desc = lookupFn(Call);
@@ -472,7 +508,8 @@ void StreamChecker::preFreopen(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
// Do not allow NULL as passed stream pointer but allow a closed stream.
ProgramStateRef State = C.getState();
- State = ensureStreamNonNull(getStreamArg(Desc, Call), C, State);
+ State = ensureStreamNonNull(getStreamArg(Desc, Call),
+ Call.getArgExpr(Desc->StreamArgNo), C, State);
if (!State)
return;
@@ -549,7 +586,8 @@ void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, C, State);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
if (!State)
return;
State = ensureStreamOpened(StreamVal, C, State);
@@ -563,7 +601,7 @@ void StreamChecker::preFread(const FnDescription *Desc, const CallEvent &Call,
if (Sym && State->get<StreamMap>(Sym)) {
const StreamState *SS = State->get<StreamMap>(Sym);
if (SS->ErrorState & ErrorFEof)
- reportFEofWarning(C, State);
+ reportFEofWarning(Sym, C, State);
} else {
C.addTransition(State);
}
@@ -573,7 +611,8 @@ void StreamChecker::preFwrite(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, C, State);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
if (!State)
return;
State = ensureStreamOpened(StreamVal, C, State);
@@ -605,11 +644,11 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!NMembVal)
return;
- const StreamState *SS = State->get<StreamMap>(StreamSym);
- if (!SS)
+ const StreamState *OldSS = State->get<StreamMap>(StreamSym);
+ if (!OldSS)
return;
- assertStreamStateOpened(SS);
+ assertStreamStateOpened(OldSS);
// C'99 standard, §7.19.8.1.3, the return value of fread:
// The fread function returns the number of elements successfully read, which
@@ -628,7 +667,7 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
// Generate a transition for the success state.
// If we know the state to be FEOF at fread, do not add a success state.
- if (!IsFread || (SS->ErrorState != ErrorFEof)) {
+ if (!IsFread || (OldSS->ErrorState != ErrorFEof)) {
ProgramStateRef StateNotFailed =
State->BindExpr(CE, C.getLocationContext(), *NMembVal);
if (StateNotFailed) {
@@ -657,21 +696,26 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
StreamErrorState NewES;
if (IsFread)
- NewES = (SS->ErrorState == ErrorFEof) ? ErrorFEof : ErrorFEof | ErrorFError;
+ NewES =
+ (OldSS->ErrorState == ErrorFEof) ? ErrorFEof : ErrorFEof | ErrorFError;
else
NewES = ErrorFError;
// If a (non-EOF) error occurs, the resulting value of the file position
// indicator for the stream is indeterminate.
- StreamState NewState = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
- StateFailed = StateFailed->set<StreamMap>(StreamSym, NewState);
- C.addTransition(StateFailed);
+ StreamState NewSS = StreamState::getOpened(Desc, NewES, !NewES.isFEof());
+ StateFailed = StateFailed->set<StreamMap>(StreamSym, NewSS);
+ if (IsFread && OldSS->ErrorState != ErrorFEof)
+ C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
+ else
+ C.addTransition(StateFailed);
}
void StreamChecker::preFseek(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, C, State);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
if (!State)
return;
State = ensureStreamOpened(StreamVal, C, State);
@@ -722,7 +766,7 @@ void StreamChecker::evalFseek(const FnDescription *Desc, const CallEvent &Call,
StreamState::getOpened(Desc, ErrorNone | ErrorFEof | ErrorFError, true));
C.addTransition(StateNotFailed);
- C.addTransition(StateFailed);
+ C.addTransition(StateFailed, constructSetEofNoteTag(C, StreamSym));
}
void StreamChecker::evalClearerr(const FnDescription *Desc,
@@ -790,7 +834,8 @@ void StreamChecker::preDefault(const FnDescription *Desc, const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
SVal StreamVal = getStreamArg(Desc, Call);
- State = ensureStreamNonNull(StreamVal, C, State);
+ State = ensureStreamNonNull(StreamVal, Call.getArgExpr(Desc->StreamArgNo), C,
+ State);
if (!State)
return;
State = ensureStreamOpened(StreamVal, C, State);
@@ -814,7 +859,8 @@ void StreamChecker::evalSetFeofFerror(const FnDescription *Desc,
}
ProgramStateRef
-StreamChecker::ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
+StreamChecker::ensureStreamNonNull(SVal StreamVal, const Expr *StreamE,
+ CheckerContext &C,
ProgramStateRef State) const {
auto Stream = StreamVal.getAs<DefinedSVal>();
if (!Stream)
@@ -827,8 +873,11 @@ StreamChecker::ensureStreamNonNull(SVal StreamVal, CheckerContext &C,
if (!StateNotNull && StateNull) {
if (ExplodedNode *N = C.generateErrorNode(StateNull)) {
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
- BT_FileNull, "Stream pointer might be NULL.", N));
+ auto R = std::make_unique<PathSensitiveBugReport>(
+ BT_FileNull, "Stream pointer might be NULL.", N);
+ if (StreamE)
+ bugreporter::trackExpressionValue(N, StreamE, *R);
+ C.emitReport(std::move(R));
}
return nullptr;
}
@@ -950,14 +999,16 @@ StreamChecker::ensureFseekWhenceCorrect(SVal WhenceVal, CheckerContext &C,
return State;
}
-void StreamChecker::reportFEofWarning(CheckerContext &C,
+void StreamChecker::reportFEofWarning(SymbolRef StreamSym, CheckerContext &C,
ProgramStateRef State) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
- C.emitReport(std::make_unique<PathSensitiveBugReport>(
+ auto R = std::make_unique<PathSensitiveBugReport>(
BT_StreamEof,
"Read function called when stream is in EOF state. "
"Function has no effect.",
- N));
+ N);
+ R->markInteresting(StreamSym);
+ C.emitReport(std::move(R));
return;
}
C.addTransition(State);
@@ -1048,6 +1099,10 @@ ProgramStateRef StreamChecker::checkPointerEscape(
return State;
}
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
void ento::registerStreamChecker(CheckerManager &Mgr) {
Mgr.registerChecker<StreamChecker>();
}
@@ -1063,4 +1118,4 @@ void ento::registerStreamTesterChecker(CheckerManager &Mgr) {
bool ento::shouldRegisterStreamTesterChecker(const CheckerManager &Mgr) {
return true;
-}
+} \ No newline at end of file
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
index e457513d8de4..816a547cadc3 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -86,9 +86,9 @@ UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
auto R = std::make_unique<PathSensitiveBugReport>(*BT, os.str(), N);
if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
R->addRange(Ex->getSourceRange());
- R->addVisitor(std::make_unique<FindLastStoreBRVisitor>(
- *V, VR, /*EnableNullFPSuppression*/ false,
- bugreporter::TrackingKind::Thorough));
+ bugreporter::trackStoredValue(*V, VR, *R,
+ {bugreporter::TrackingKind::Thorough,
+ /*EnableNullFPSuppression*/ false});
R->disablePathPruning();
// need location of block
C.emitReport(std::move(R));
diff --git a/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 392da4818098..477d910bc653 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -16,7 +16,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
diff --git a/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
index 74eec81ffb3e..d231be64c2e1 100644
--- a/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -169,7 +169,7 @@ void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
continue;
- B.EmitBasicReport(D, this, "Unreachable code", "Dead code",
+ B.EmitBasicReport(D, this, "Unreachable code", categories::UnusedCode,
"This statement is never executed", DL, SR);
}
}
diff --git a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index d76b2a06aba5..96501215c689 100644
--- a/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -20,7 +20,7 @@
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/raw_ostream.h"
@@ -285,21 +285,11 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
return;
}
- // VLASizeChecker is responsible for defining the extent of the array being
- // declared. We do this by multiplying the array length by the element size,
- // then matching that with the array region's extent symbol.
-
+ // VLASizeChecker is responsible for defining the extent of the array.
if (VD) {
- // Assume that the array's size matches the region size.
- const LocationContext *LC = C.getLocationContext();
- DefinedOrUnknownSVal DynSize =
- getDynamicSize(State, State->getRegion(VD, LC), SVB);
-
- DefinedOrUnknownSVal SizeIsKnown = SVB.evalEQ(State, DynSize, *ArraySizeNL);
- State = State->assume(SizeIsKnown, true);
-
- // Assume should not fail at this point.
- assert(State);
+ State =
+ setDynamicExtent(State, State->getRegion(VD, C.getLocationContext()),
+ ArraySize.castAs<DefinedOrUnknownSVal>(), SVB);
}
// Remember our assumptions!
diff --git a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
index d1f5ac02278f..40cdaef1bfa7 100644
--- a/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
#include <cassert>
#include <cstdint>
#include <utility>
@@ -176,28 +177,73 @@ const PointerToMemberData *BasicValueFactory::getPointerToMemberData(
return D;
}
+LLVM_ATTRIBUTE_UNUSED bool hasNoRepeatedElements(
+ llvm::ImmutableList<const CXXBaseSpecifier *> BaseSpecList) {
+ llvm::SmallPtrSet<QualType, 16> BaseSpecSeen;
+ for (const CXXBaseSpecifier *BaseSpec : BaseSpecList) {
+ QualType BaseType = BaseSpec->getType();
+ // Check whether inserted
+ if (!BaseSpecSeen.insert(BaseType).second)
+ return false;
+ }
+ return true;
+}
+
const PointerToMemberData *BasicValueFactory::accumCXXBase(
llvm::iterator_range<CastExpr::path_const_iterator> PathRange,
- const nonloc::PointerToMember &PTM) {
+ const nonloc::PointerToMember &PTM, const CastKind &kind) {
+ assert((kind == CK_DerivedToBaseMemberPointer ||
+ kind == CK_BaseToDerivedMemberPointer ||
+ kind == CK_ReinterpretMemberPointer) &&
+ "accumCXXBase called with wrong CastKind");
nonloc::PointerToMember::PTMDataType PTMDT = PTM.getPTMData();
const NamedDecl *ND = nullptr;
- llvm::ImmutableList<const CXXBaseSpecifier *> PathList;
+ llvm::ImmutableList<const CXXBaseSpecifier *> BaseSpecList;
if (PTMDT.isNull() || PTMDT.is<const NamedDecl *>()) {
if (PTMDT.is<const NamedDecl *>())
ND = PTMDT.get<const NamedDecl *>();
- PathList = CXXBaseListFactory.getEmptyList();
- } else { // const PointerToMemberData *
+ BaseSpecList = CXXBaseListFactory.getEmptyList();
+ } else {
const PointerToMemberData *PTMD = PTMDT.get<const PointerToMemberData *>();
ND = PTMD->getDeclaratorDecl();
- PathList = PTMD->getCXXBaseList();
+ BaseSpecList = PTMD->getCXXBaseList();
}
- for (const auto &I : llvm::reverse(PathRange))
- PathList = prependCXXBase(I, PathList);
- return getPointerToMemberData(ND, PathList);
+ assert(hasNoRepeatedElements(BaseSpecList) &&
+ "CXXBaseSpecifier list of PointerToMemberData must not have repeated "
+ "elements");
+
+ if (kind == CK_DerivedToBaseMemberPointer) {
+ // Here we pop off matching CXXBaseSpecifiers from BaseSpecList.
+ // Because, CK_DerivedToBaseMemberPointer comes from a static_cast and
+ // serves to remove a matching implicit cast. Note that static_cast's that
+ // are no-ops do not count since they produce an empty PathRange, a nice
+ // thing about Clang AST.
+
+ // Now we know that there are no repetitions in BaseSpecList.
+ // So, popping the first element from it corresponding to each element in
+ // PathRange is equivalent to only including elements that are in
+ // BaseSpecList but not it PathRange
+ auto ReducedBaseSpecList = CXXBaseListFactory.getEmptyList();
+ for (const CXXBaseSpecifier *BaseSpec : BaseSpecList) {
+ auto IsSameAsBaseSpec = [&BaseSpec](const CXXBaseSpecifier *I) -> bool {
+ return BaseSpec->getType() == I->getType();
+ };
+ if (llvm::none_of(PathRange, IsSameAsBaseSpec))
+ ReducedBaseSpecList =
+ CXXBaseListFactory.add(BaseSpec, ReducedBaseSpecList);
+ }
+
+ return getPointerToMemberData(ND, ReducedBaseSpecList);
+ }
+ // FIXME: Reinterpret casts on member-pointers are not handled properly by
+ // this code
+ for (const CXXBaseSpecifier *I : llvm::reverse(PathRange))
+ BaseSpecList = prependCXXBase(I, BaseSpecList);
+ return getPointerToMemberData(ND, BaseSpecList);
}
const llvm::APSInt*
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index bf38891b370a..d6f69ae03afe 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -1988,14 +1988,6 @@ PathDiagnosticBuilder::generate(const PathDiagnosticConsumer *PDC) const {
const SourceManager &SM = getSourceManager();
const AnalyzerOptions &Opts = getAnalyzerOptions();
- StringRef ErrorTag = ErrorNode->getLocation().getTag()->getTagDescription();
-
- // See whether we need to silence the checker/package.
- // FIXME: This will not work if the report was emitted with an incorrect tag.
- for (const std::string &CheckerOrPackage : Opts.SilencedCheckersAndPackages) {
- if (ErrorTag.startswith(CheckerOrPackage))
- return nullptr;
- }
if (!PDC->shouldGenerateDiagnostics())
return generateEmptyDiagnosticForReport(R, getSourceManager());
@@ -2257,10 +2249,24 @@ void PathSensitiveBugReport::markInteresting(SymbolRef sym,
insertToInterestingnessMap(InterestingSymbols, sym, TKind);
+ // FIXME: No tests exist for this code and it is questionable:
+ // How to handle multiple metadata for the same region?
if (const auto *meta = dyn_cast<SymbolMetadata>(sym))
markInteresting(meta->getRegion(), TKind);
}
+void PathSensitiveBugReport::markNotInteresting(SymbolRef sym) {
+ if (!sym)
+ return;
+ InterestingSymbols.erase(sym);
+
+ // The metadata part of markInteresting is not reversed here.
+ // Just making the same region not interesting is incorrect
+ // in specific cases.
+ if (const auto *meta = dyn_cast<SymbolMetadata>(sym))
+ markNotInteresting(meta->getRegion());
+}
+
void PathSensitiveBugReport::markInteresting(const MemRegion *R,
bugreporter::TrackingKind TKind) {
if (!R)
@@ -2273,6 +2279,17 @@ void PathSensitiveBugReport::markInteresting(const MemRegion *R,
markInteresting(SR->getSymbol(), TKind);
}
+void PathSensitiveBugReport::markNotInteresting(const MemRegion *R) {
+ if (!R)
+ return;
+
+ R = R->getBaseRegion();
+ InterestingRegions.erase(R);
+
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R))
+ markNotInteresting(SR->getSymbol());
+}
+
void PathSensitiveBugReport::markInteresting(SVal V,
bugreporter::TrackingKind TKind) {
markInteresting(V.getAsRegion(), TKind);
@@ -2738,8 +2755,8 @@ static void CompactMacroExpandedPieces(PathPieces &path,
}
/// Generate notes from all visitors.
-/// Notes associated with {@code ErrorNode} are generated using
-/// {@code getEndPath}, and the rest are generated with {@code VisitNode}.
+/// Notes associated with @c ErrorNode are generated using
+/// @c getEndPath, and the rest are generated with @c VisitNode.
static std::unique_ptr<VisitorsDiagnosticsTy>
generateVisitorsDiagnostics(PathSensitiveBugReport *R,
const ExplodedNode *ErrorNode,
@@ -2749,7 +2766,7 @@ generateVisitorsDiagnostics(PathSensitiveBugReport *R,
PathSensitiveBugReport::VisitorList visitors;
// Run visitors on all nodes starting from the node *before* the last one.
- // The last node is reserved for notes generated with {@code getEndPath}.
+ // The last node is reserved for notes generated with @c getEndPath.
const ExplodedNode *NextNode = ErrorNode->getFirstPred();
while (NextNode) {
@@ -2811,12 +2828,12 @@ Optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
// Register refutation visitors first, if they mark the bug invalid no
// further analysis is required
- R->addVisitor(std::make_unique<LikelyFalsePositiveSuppressionBRVisitor>());
+ R->addVisitor<LikelyFalsePositiveSuppressionBRVisitor>();
// Register additional node visitors.
- R->addVisitor(std::make_unique<NilReceiverBRVisitor>());
- R->addVisitor(std::make_unique<ConditionBRVisitor>());
- R->addVisitor(std::make_unique<TagVisitor>());
+ R->addVisitor<NilReceiverBRVisitor>();
+ R->addVisitor<ConditionBRVisitor>();
+ R->addVisitor<TagVisitor>();
BugReporterContext BRC(Reporter);
@@ -2829,7 +2846,7 @@ Optional<PathDiagnosticBuilder> PathDiagnosticBuilder::findValidReport(
// If crosscheck is enabled, remove all visitors, add the refutation
// visitor and check again
R->clearVisitors();
- R->addVisitor(std::make_unique<FalsePositiveRefutationBRVisitor>());
+ R->addVisitor<FalsePositiveRefutationBRVisitor>();
// We don't overwrite the notes inserted by other visitors because the
// refutation manager does not add any new note to the path
@@ -3041,6 +3058,14 @@ void BugReporter::FlushReport(BugReportEquivClass& EQ) {
if (!report)
return;
+ // See whether we need to silence the checker/package.
+ for (const std::string &CheckerOrPackage :
+ getAnalyzerOptions().SilencedCheckersAndPackages) {
+ if (report->getBugType().getCheckerName().startswith(
+ CheckerOrPackage))
+ return;
+ }
+
ArrayRef<PathDiagnosticConsumer*> Consumers = getPathDiagnosticConsumers();
std::unique_ptr<DiagnosticForConsumerMapTy> Diagnostics =
generateDiagnosticForConsumerMap(report, Consumers, bugReports);
diff --git a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index bc72f4f8c1e3..d06a2d493303 100644
--- a/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -65,6 +65,7 @@
using namespace clang;
using namespace ento;
+using namespace bugreporter;
//===----------------------------------------------------------------------===//
// Utility functions.
@@ -153,6 +154,28 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return E;
}
+static const MemRegion *
+getLocationRegionIfReference(const Expr *E, const ExplodedNode *N,
+ bool LookingForReference = true) {
+ if (const auto *DR = dyn_cast<DeclRefExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (LookingForReference && !VD->getType()->isReferenceType())
+ return nullptr;
+ return N->getState()
+ ->getLValue(VD, N->getLocationContext())
+ .getAsRegion();
+ }
+ }
+
+ // FIXME: This does not handle other kinds of null references,
+ // for example, references from FieldRegions:
+ // struct Wrapper { int &ref; };
+ // Wrapper w = { *(int *)0 };
+ // w.ref = 1;
+
+ return nullptr;
+}
+
/// Comparing internal representations of symbolic values (via
/// SVal::operator==()) is a valid way to check if the value was updated,
/// unless it's a LazyCompoundVal that may have a different internal
@@ -830,10 +853,10 @@ public:
bool EnableNullFPSuppression, PathSensitiveBugReport &BR,
const SVal V) {
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
- if (EnableNullFPSuppression &&
- Options.ShouldSuppressNullReturnPaths && V.getAs<Loc>())
- BR.addVisitor(std::make_unique<MacroNullReturnSuppressionVisitor>(
- R->getAs<SubRegion>(), V));
+ if (EnableNullFPSuppression && Options.ShouldSuppressNullReturnPaths &&
+ V.getAs<Loc>())
+ BR.addVisitor<MacroNullReturnSuppressionVisitor>(R->getAs<SubRegion>(),
+ V);
}
void* getTag() const {
@@ -883,7 +906,7 @@ namespace {
///
/// This visitor is intended to be used when another visitor discovers that an
/// interesting value comes from an inlined function call.
-class ReturnVisitor : public BugReporterVisitor {
+class ReturnVisitor : public TrackingBugReporterVisitor {
const StackFrameContext *CalleeSFC;
enum {
Initial,
@@ -897,10 +920,11 @@ class ReturnVisitor : public BugReporterVisitor {
bugreporter::TrackingKind TKind;
public:
- ReturnVisitor(const StackFrameContext *Frame, bool Suppressed,
- AnalyzerOptions &Options, bugreporter::TrackingKind TKind)
- : CalleeSFC(Frame), EnableNullFPSuppression(Suppressed),
- Options(Options), TKind(TKind) {}
+ ReturnVisitor(TrackerRef ParentTracker, const StackFrameContext *Frame,
+ bool Suppressed, AnalyzerOptions &Options,
+ bugreporter::TrackingKind TKind)
+ : TrackingBugReporterVisitor(ParentTracker), CalleeSFC(Frame),
+ EnableNullFPSuppression(Suppressed), Options(Options), TKind(TKind) {}
static void *getTag() {
static int Tag = 0;
@@ -913,92 +937,6 @@ public:
ID.AddBoolean(EnableNullFPSuppression);
}
- /// Adds a ReturnVisitor if the given statement represents a call that was
- /// inlined.
- ///
- /// This will search back through the ExplodedGraph, starting from the given
- /// node, looking for when the given statement was processed. If it turns out
- /// the statement is a call that was inlined, we add the visitor to the
- /// bug report, so it can print a note later.
- static void addVisitorIfNecessary(const ExplodedNode *Node, const Stmt *S,
- PathSensitiveBugReport &BR,
- bool InEnableNullFPSuppression,
- bugreporter::TrackingKind TKind) {
- if (!CallEvent::isCallStmt(S))
- return;
-
- // First, find when we processed the statement.
- // If we work with a 'CXXNewExpr' that is going to be purged away before
- // its call take place. We would catch that purge in the last condition
- // as a 'StmtPoint' so we have to bypass it.
- const bool BypassCXXNewExprEval = isa<CXXNewExpr>(S);
-
- // This is moving forward when we enter into another context.
- const StackFrameContext *CurrentSFC = Node->getStackFrame();
-
- do {
- // If that is satisfied we found our statement as an inlined call.
- if (Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>())
- if (CEE->getCalleeContext()->getCallSite() == S)
- break;
-
- // Try to move forward to the end of the call-chain.
- Node = Node->getFirstPred();
- if (!Node)
- break;
-
- const StackFrameContext *PredSFC = Node->getStackFrame();
-
- // If that is satisfied we found our statement.
- // FIXME: This code currently bypasses the call site for the
- // conservatively evaluated allocator.
- if (!BypassCXXNewExprEval)
- if (Optional<StmtPoint> SP = Node->getLocationAs<StmtPoint>())
- // See if we do not enter into another context.
- if (SP->getStmt() == S && CurrentSFC == PredSFC)
- break;
-
- CurrentSFC = PredSFC;
- } while (Node->getStackFrame() == CurrentSFC);
-
- // Next, step over any post-statement checks.
- while (Node && Node->getLocation().getAs<PostStmt>())
- Node = Node->getFirstPred();
- if (!Node)
- return;
-
- // Finally, see if we inlined the call.
- Optional<CallExitEnd> CEE = Node->getLocationAs<CallExitEnd>();
- if (!CEE)
- return;
-
- const StackFrameContext *CalleeContext = CEE->getCalleeContext();
- if (CalleeContext->getCallSite() != S)
- return;
-
- // Check the return value.
- ProgramStateRef State = Node->getState();
- SVal RetVal = Node->getSVal(S);
-
- // Handle cases where a reference is returned and then immediately used.
- if (cast<Expr>(S)->isGLValue())
- if (Optional<Loc> LValue = RetVal.getAs<Loc>())
- RetVal = State->getSVal(*LValue);
-
- // See if the return value is NULL. If so, suppress the report.
- AnalyzerOptions &Options = State->getAnalysisManager().options;
-
- bool EnableNullFPSuppression = false;
- if (InEnableNullFPSuppression &&
- Options.ShouldSuppressNullReturnPaths)
- if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
- EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
-
- BR.addVisitor(std::make_unique<ReturnVisitor>(CalleeContext,
- EnableNullFPSuppression,
- Options, TKind));
- }
-
PathDiagnosticPieceRef visitNodeInitial(const ExplodedNode *N,
BugReporterContext &BRC,
PathSensitiveBugReport &BR) {
@@ -1045,8 +983,7 @@ public:
RetE = RetE->IgnoreParenCasts();
// Let's track the return value.
- bugreporter::trackExpressionValue(
- N, RetE, BR, TKind, EnableNullFPSuppression);
+ getParentTracker().track(RetE, N, {TKind, EnableNullFPSuppression});
// Build an appropriate message based on the return value.
SmallString<64> Msg;
@@ -1162,7 +1099,9 @@ public:
if (!State->isNull(*ArgV).isConstrainedTrue())
continue;
- if (trackExpressionValue(N, ArgE, BR, TKind, EnableNullFPSuppression))
+ if (getParentTracker()
+ .track(ArgE, N, {TKind, EnableNullFPSuppression})
+ .FoundSomethingToTrack)
ShouldInvalidate = false;
// If we /can't/ track the null pointer, we should err on the side of
@@ -1198,16 +1137,52 @@ public:
} // end of anonymous namespace
//===----------------------------------------------------------------------===//
-// Implementation of FindLastStoreBRVisitor.
+// StoreSiteFinder
//===----------------------------------------------------------------------===//
-void FindLastStoreBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+/// Finds last store into the given region,
+/// which is different from a given symbolic value.
+class StoreSiteFinder final : public TrackingBugReporterVisitor {
+ const MemRegion *R;
+ SVal V;
+ bool Satisfied = false;
+
+ TrackingOptions Options;
+ const StackFrameContext *OriginSFC;
+
+public:
+ /// \param V We're searching for the store where \c R received this value.
+ /// \param R The region we're tracking.
+ /// \param TKind May limit the amount of notes added to the bug report.
+ /// \param OriginSFC Only adds notes when the last store happened in a
+ /// different stackframe to this one. Disregarded if the tracking kind
+ /// is thorough.
+ /// This is useful, because for non-tracked regions, notes about
+ /// changes to its value in a nested stackframe could be pruned, and
+ /// this visitor can prevent that without polluting the bugpath too
+ /// much.
+ StoreSiteFinder(bugreporter::TrackerRef ParentTracker, KnownSVal V,
+ const MemRegion *R, TrackingOptions Options,
+ const StackFrameContext *OriginSFC = nullptr)
+ : TrackingBugReporterVisitor(ParentTracker), R(R), V(V), Options(Options),
+ OriginSFC(OriginSFC) {
+ assert(R);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const override;
+
+ PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) override;
+};
+
+void StoreSiteFinder::Profile(llvm::FoldingSetNodeID &ID) const {
static int tag = 0;
ID.AddPointer(&tag);
ID.AddPointer(R);
ID.Add(V);
- ID.AddInteger(static_cast<int>(TKind));
- ID.AddBoolean(EnableNullFPSuppression);
+ ID.AddInteger(static_cast<int>(Options.Kind));
+ ID.AddBoolean(Options.EnableNullFPSuppression);
}
/// Returns true if \p N represents the DeclStmt declaring and initializing
@@ -1239,127 +1214,152 @@ static bool isInitializationOfVar(const ExplodedNode *N, const VarRegion *VR) {
return FrameSpace->getStackFrame() == LCtx->getStackFrame();
}
+static bool isObjCPointer(const MemRegion *R) {
+ if (R->isBoundable())
+ if (const auto *TR = dyn_cast<TypedValueRegion>(R))
+ return TR->getValueType()->isObjCObjectPointerType();
+
+ return false;
+}
+
+static bool isObjCPointer(const ValueDecl *D) {
+ return D->getType()->isObjCObjectPointerType();
+}
+
/// Show diagnostics for initializing or declaring a region \p R with a bad value.
-static void showBRDiagnostics(const char *action, llvm::raw_svector_ostream &os,
- const MemRegion *R, SVal V, const DeclStmt *DS) {
- if (R->canPrintPretty()) {
- R->printPretty(os);
- os << " ";
- }
-
- if (V.getAs<loc::ConcreteInt>()) {
- bool b = false;
- if (R->isBoundable()) {
- if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
- if (TR->getValueType()->isObjCObjectPointerType()) {
- os << action << "nil";
- b = true;
- }
- }
- }
- if (!b)
- os << action << "a null pointer value";
-
- } else if (auto CVal = V.getAs<nonloc::ConcreteInt>()) {
- os << action << CVal->getValue();
- } else if (DS) {
- if (V.isUndef()) {
- if (isa<VarRegion>(R)) {
+static void showBRDiagnostics(llvm::raw_svector_ostream &OS, StoreInfo SI) {
+ const bool HasPrefix = SI.Dest->canPrintPretty();
+
+ if (HasPrefix) {
+ SI.Dest->printPretty(OS);
+ OS << " ";
+ }
+
+ const char *Action = nullptr;
+
+ switch (SI.StoreKind) {
+ case StoreInfo::Initialization:
+ Action = HasPrefix ? "initialized to " : "Initializing to ";
+ break;
+ case StoreInfo::BlockCapture:
+ Action = HasPrefix ? "captured by block as " : "Captured by block as ";
+ break;
+ default:
+ llvm_unreachable("Unexpected store kind");
+ }
+
+ if (SI.Value.getAs<loc::ConcreteInt>()) {
+ OS << Action << (isObjCPointer(SI.Dest) ? "nil" : "a null pointer value");
+
+ } else if (auto CVal = SI.Value.getAs<nonloc::ConcreteInt>()) {
+ OS << Action << CVal->getValue();
+
+ } else if (SI.Origin && SI.Origin->canPrintPretty()) {
+ OS << Action << "the value of ";
+ SI.Origin->printPretty(OS);
+
+ } else if (SI.StoreKind == StoreInfo::Initialization) {
+ // We don't need to check here, all these conditions were
+ // checked by StoreSiteFinder, when it figured out that it is
+ // initialization.
+ const auto *DS =
+ cast<DeclStmt>(SI.StoreSite->getLocationAs<PostStmt>()->getStmt());
+
+ if (SI.Value.isUndef()) {
+ if (isa<VarRegion>(SI.Dest)) {
const auto *VD = cast<VarDecl>(DS->getSingleDecl());
+
if (VD->getInit()) {
- os << (R->canPrintPretty() ? "initialized" : "Initializing")
- << " to a garbage value";
+ OS << (HasPrefix ? "initialized" : "Initializing")
+ << " to a garbage value";
} else {
- os << (R->canPrintPretty() ? "declared" : "Declaring")
- << " without an initial value";
+ OS << (HasPrefix ? "declared" : "Declaring")
+ << " without an initial value";
}
}
} else {
- os << (R->canPrintPretty() ? "initialized" : "Initialized")
- << " here";
+ OS << (HasPrefix ? "initialized" : "Initialized") << " here";
}
}
}
/// Display diagnostics for passing bad region as a parameter.
-static void showBRParamDiagnostics(llvm::raw_svector_ostream& os,
- const VarRegion *VR,
- SVal V) {
+static void showBRParamDiagnostics(llvm::raw_svector_ostream &OS,
+ StoreInfo SI) {
+ const auto *VR = cast<VarRegion>(SI.Dest);
const auto *Param = cast<ParmVarDecl>(VR->getDecl());
- os << "Passing ";
+ OS << "Passing ";
+
+ if (SI.Value.getAs<loc::ConcreteInt>()) {
+ OS << (isObjCPointer(Param) ? "nil object reference"
+ : "null pointer value");
+
+ } else if (SI.Value.isUndef()) {
+ OS << "uninitialized value";
+
+ } else if (auto CI = SI.Value.getAs<nonloc::ConcreteInt>()) {
+ OS << "the value " << CI->getValue();
+
+ } else if (SI.Origin && SI.Origin->canPrintPretty()) {
+ SI.Origin->printPretty(OS);
- if (V.getAs<loc::ConcreteInt>()) {
- if (Param->getType()->isObjCObjectPointerType())
- os << "nil object reference";
- else
- os << "null pointer value";
- } else if (V.isUndef()) {
- os << "uninitialized value";
- } else if (auto CI = V.getAs<nonloc::ConcreteInt>()) {
- os << "the value " << CI->getValue();
} else {
- os << "value";
+ OS << "value";
}
// Printed parameter indexes are 1-based, not 0-based.
unsigned Idx = Param->getFunctionScopeIndex() + 1;
- os << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
+ OS << " via " << Idx << llvm::getOrdinalSuffix(Idx) << " parameter";
if (VR->canPrintPretty()) {
- os << " ";
- VR->printPretty(os);
+ OS << " ";
+ VR->printPretty(OS);
}
}
/// Show default diagnostics for storing bad region.
-static void showBRDefaultDiagnostics(llvm::raw_svector_ostream &os,
- const MemRegion *R, SVal V) {
- if (V.getAs<loc::ConcreteInt>()) {
- bool b = false;
- if (R->isBoundable()) {
- if (const auto *TR = dyn_cast<TypedValueRegion>(R)) {
- if (TR->getValueType()->isObjCObjectPointerType()) {
- os << "nil object reference stored";
- b = true;
- }
- }
- }
- if (!b) {
- if (R->canPrintPretty())
- os << "Null pointer value stored";
- else
- os << "Storing null pointer value";
- }
-
- } else if (V.isUndef()) {
- if (R->canPrintPretty())
- os << "Uninitialized value stored";
+static void showBRDefaultDiagnostics(llvm::raw_svector_ostream &OS,
+ StoreInfo SI) {
+ const bool HasSuffix = SI.Dest->canPrintPretty();
+
+ if (SI.Value.getAs<loc::ConcreteInt>()) {
+ OS << (isObjCPointer(SI.Dest) ? "nil object reference stored"
+ : (HasSuffix ? "Null pointer value stored"
+ : "Storing null pointer value"));
+
+ } else if (SI.Value.isUndef()) {
+ OS << (HasSuffix ? "Uninitialized value stored"
+ : "Storing uninitialized value");
+
+ } else if (auto CV = SI.Value.getAs<nonloc::ConcreteInt>()) {
+ if (HasSuffix)
+ OS << "The value " << CV->getValue() << " is assigned";
else
- os << "Storing uninitialized value";
+ OS << "Assigning " << CV->getValue();
- } else if (auto CV = V.getAs<nonloc::ConcreteInt>()) {
- if (R->canPrintPretty())
- os << "The value " << CV->getValue() << " is assigned";
- else
- os << "Assigning " << CV->getValue();
+ } else if (SI.Origin && SI.Origin->canPrintPretty()) {
+ if (HasSuffix) {
+ OS << "The value of ";
+ SI.Origin->printPretty(OS);
+ OS << " is assigned";
+ } else {
+ OS << "Assigning the value of ";
+ SI.Origin->printPretty(OS);
+ }
} else {
- if (R->canPrintPretty())
- os << "Value assigned";
- else
- os << "Assigning value";
+ OS << (HasSuffix ? "Value assigned" : "Assigning value");
}
- if (R->canPrintPretty()) {
- os << " to ";
- R->printPretty(os);
+ if (HasSuffix) {
+ OS << " to ";
+ SI.Dest->printPretty(OS);
}
}
-PathDiagnosticPieceRef
-FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) {
+PathDiagnosticPieceRef StoreSiteFinder::VisitNode(const ExplodedNode *Succ,
+ BugReporterContext &BRC,
+ PathSensitiveBugReport &BR) {
if (Satisfied)
return nullptr;
@@ -1451,11 +1451,78 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
if (!IsParam)
InitE = InitE->IgnoreParenCasts();
- bugreporter::trackExpressionValue(
- StoreSite, InitE, BR, TKind, EnableNullFPSuppression);
+ getParentTracker().track(InitE, StoreSite, Options);
}
- if (TKind == TrackingKind::Condition &&
+ // Let's try to find the region where the value came from.
+ const MemRegion *OldRegion = nullptr;
+
+ // If we have init expression, it might be simply a reference
+ // to a variable, so we can use it.
+ if (InitE) {
+ // That region might still be not exactly what we are looking for.
+ // In situations like `int &ref = val;`, we can't say that
+ // `ref` is initialized with `val`, rather refers to `val`.
+ //
+ // In order, to mitigate situations like this, we check if the last
+ // stored value in that region is the value that we track.
+ //
+ // TODO: support other situations better.
+ if (const MemRegion *Candidate =
+ getLocationRegionIfReference(InitE, Succ, false)) {
+ const StoreManager &SM = BRC.getStateManager().getStoreManager();
+
+ // Here we traverse the graph up to find the last node where the
+ // candidate region is still in the store.
+ for (const ExplodedNode *N = StoreSite; N; N = N->getFirstPred()) {
+ if (SM.includedInBindings(N->getState()->getStore(), Candidate)) {
+ // And if it was bound to the target value, we can use it.
+ if (N->getState()->getSVal(Candidate) == V) {
+ OldRegion = Candidate;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+ // Otherwise, if the current region does indeed contain the value
+ // we are looking for, we can look for a region where this value
+ // was before.
+ //
+ // It can be useful for situations like:
+ // new = identity(old)
+ // where the analyzer knows that 'identity' returns the value of its
+ // first argument.
+ //
+ // NOTE: If the region R is not a simple var region, it can contain
+ // V in one of its subregions.
+ if (!OldRegion && StoreSite->getState()->getSVal(R) == V) {
+ // Let's go up the graph to find the node where the region is
+ // bound to V.
+ const ExplodedNode *NodeWithoutBinding = StoreSite->getFirstPred();
+ for (;
+ NodeWithoutBinding && NodeWithoutBinding->getState()->getSVal(R) == V;
+ NodeWithoutBinding = NodeWithoutBinding->getFirstPred()) {
+ }
+
+ if (NodeWithoutBinding) {
+ // Let's try to find a unique binding for the value in that node.
+ // We want to use this to find unique bindings because of the following
+ // situations:
+ // b = a;
+ // c = identity(b);
+ //
+ // Telling the user that the value of 'a' is assigned to 'c', while
+ // correct, can be confusing.
+ StoreManager::FindUniqueBinding FB(V.getAsLocSymbol());
+ BRC.getStateManager().iterBindings(NodeWithoutBinding->getState(), FB);
+ if (FB)
+ OldRegion = FB.getRegion();
+ }
+ }
+
+ if (Options.Kind == TrackingKind::Condition && OriginSFC &&
!OriginSFC->isParentOf(StoreSite->getStackFrame()))
return nullptr;
@@ -1463,60 +1530,41 @@ FindLastStoreBRVisitor::VisitNode(const ExplodedNode *Succ,
SmallString<256> sbuf;
llvm::raw_svector_ostream os(sbuf);
+ StoreInfo SI = {StoreInfo::Assignment, // default kind
+ StoreSite,
+ InitE,
+ V,
+ R,
+ OldRegion};
+
if (Optional<PostStmt> PS = StoreSite->getLocationAs<PostStmt>()) {
const Stmt *S = PS->getStmt();
- const char *action = nullptr;
const auto *DS = dyn_cast<DeclStmt>(S);
const auto *VR = dyn_cast<VarRegion>(R);
if (DS) {
- action = R->canPrintPretty() ? "initialized to " :
- "Initializing to ";
+ SI.StoreKind = StoreInfo::Initialization;
} else if (isa<BlockExpr>(S)) {
- action = R->canPrintPretty() ? "captured by block as " :
- "Captured by block as ";
+ SI.StoreKind = StoreInfo::BlockCapture;
if (VR) {
// See if we can get the BlockVarRegion.
ProgramStateRef State = StoreSite->getState();
SVal V = StoreSite->getSVal(S);
if (const auto *BDR =
- dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
+ dyn_cast_or_null<BlockDataRegion>(V.getAsRegion())) {
if (const VarRegion *OriginalR = BDR->getOriginalRegion(VR)) {
- if (auto KV = State->getSVal(OriginalR).getAs<KnownSVal>())
- BR.addVisitor(std::make_unique<FindLastStoreBRVisitor>(
- *KV, OriginalR, EnableNullFPSuppression, TKind, OriginSFC));
+ getParentTracker().track(State->getSVal(OriginalR), OriginalR,
+ Options, OriginSFC);
}
}
}
}
- if (action)
- showBRDiagnostics(action, os, R, V, DS);
-
- } else if (StoreSite->getLocation().getAs<CallEnter>()) {
- if (const auto *VR = dyn_cast<VarRegion>(R))
- showBRParamDiagnostics(os, VR, V);
+ } else if (SI.StoreSite->getLocation().getAs<CallEnter>() &&
+ isa<VarRegion>(SI.Dest)) {
+ SI.StoreKind = StoreInfo::CallArgument;
}
- if (os.str().empty())
- showBRDefaultDiagnostics(os, R, V);
-
- if (TKind == bugreporter::TrackingKind::Condition)
- os << WillBeUsedForACondition;
-
- // Construct a new PathDiagnosticPiece.
- ProgramPoint P = StoreSite->getLocation();
- PathDiagnosticLocation L;
- if (P.getAs<CallEnter>() && InitE)
- L = PathDiagnosticLocation(InitE, BRC.getSourceManager(),
- P.getLocationContext());
-
- if (!L.isValid() || !L.asLocation().isValid())
- L = PathDiagnosticLocation::create(P, BRC.getSourceManager());
-
- if (!L.isValid() || !L.asLocation().isValid())
- return nullptr;
-
- return std::make_shared<PathDiagnosticEventPiece>(L, os.str());
+ return getParentTracker().handle(SI, BRC, Options);
}
//===----------------------------------------------------------------------===//
@@ -1705,14 +1753,17 @@ namespace {
/// An error is emitted at line 3. This visitor realizes that the branch
/// on line 2 is a control dependency of line 3, and tracks it's condition via
/// trackExpressionValue().
-class TrackControlDependencyCondBRVisitor final : public BugReporterVisitor {
+class TrackControlDependencyCondBRVisitor final
+ : public TrackingBugReporterVisitor {
const ExplodedNode *Origin;
ControlDependencyCalculator ControlDeps;
llvm::SmallSet<const CFGBlock *, 32> VisitedBlocks;
public:
- TrackControlDependencyCondBRVisitor(const ExplodedNode *O)
- : Origin(O), ControlDeps(&O->getCFG()) {}
+ TrackControlDependencyCondBRVisitor(TrackerRef ParentTracker,
+ const ExplodedNode *O)
+ : TrackingBugReporterVisitor(ParentTracker), Origin(O),
+ ControlDeps(&O->getCFG()) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int x = 0;
@@ -1810,9 +1861,9 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
// isn't sufficient, because a new visitor is created for each tracked
// expression, hence the BugReport level set.
if (BR.addTrackedCondition(N)) {
- bugreporter::trackExpressionValue(
- N, Condition, BR, bugreporter::TrackingKind::Condition,
- /*EnableNullFPSuppression=*/false);
+ getParentTracker().track(Condition, N,
+ {bugreporter::TrackingKind::Condition,
+ /*EnableNullFPSuppression=*/false});
return constructDebugPieceForTrackedCondition(Condition, N, BRC);
}
}
@@ -1825,28 +1876,7 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
// Implementation of trackExpressionValue.
//===----------------------------------------------------------------------===//
-static const MemRegion *getLocationRegionIfReference(const Expr *E,
- const ExplodedNode *N) {
- if (const auto *DR = dyn_cast<DeclRefExpr>(E)) {
- if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- if (!VD->getType()->isReferenceType())
- return nullptr;
- ProgramStateManager &StateMgr = N->getState()->getStateManager();
- MemRegionManager &MRMgr = StateMgr.getRegionManager();
- return MRMgr.getVarRegion(VD, N->getLocationContext());
- }
- }
-
- // FIXME: This does not handle other kinds of null references,
- // for example, references from FieldRegions:
- // struct Wrapper { int &ref; };
- // Wrapper w = { *(int *)0 };
- // w.ref = 1;
-
- return nullptr;
-}
-
-/// \return A subexpression of {@code Ex} which represents the
+/// \return A subexpression of @c Ex which represents the
/// expression-of-interest.
static const Expr *peelOffOuterExpr(const Expr *Ex,
const ExplodedNode *N) {
@@ -1924,152 +1954,472 @@ static const ExplodedNode* findNodeForExpression(const ExplodedNode *N,
return N;
}
-bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
- const Expr *E,
- PathSensitiveBugReport &report,
- bugreporter::TrackingKind TKind,
- bool EnableNullFPSuppression) {
+//===----------------------------------------------------------------------===//
+// Tracker implementation
+//===----------------------------------------------------------------------===//
- if (!E || !InputNode)
- return false;
+PathDiagnosticPieceRef StoreHandler::constructNote(StoreInfo SI,
+ BugReporterContext &BRC,
+ StringRef NodeText) {
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = SI.StoreSite->getLocation();
+ PathDiagnosticLocation L;
+ if (P.getAs<CallEnter>() && SI.SourceOfTheValue)
+ L = PathDiagnosticLocation(SI.SourceOfTheValue, BRC.getSourceManager(),
+ P.getLocationContext());
- const Expr *Inner = peelOffOuterExpr(E, InputNode);
- const ExplodedNode *LVNode = findNodeForExpression(InputNode, Inner);
- if (!LVNode)
- return false;
+ if (!L.isValid() || !L.asLocation().isValid())
+ L = PathDiagnosticLocation::create(P, BRC.getSourceManager());
- ProgramStateRef LVState = LVNode->getState();
- const StackFrameContext *SFC = LVNode->getStackFrame();
-
- // We only track expressions if we believe that they are important. Chances
- // are good that control dependencies to the tracking point are also improtant
- // because of this, let's explain why we believe control reached this point.
- // TODO: Shouldn't we track control dependencies of every bug location, rather
- // than only tracked expressions?
- if (LVState->getAnalysisManager().getAnalyzerOptions().ShouldTrackConditions)
- report.addVisitor(std::make_unique<TrackControlDependencyCondBRVisitor>(
- InputNode));
-
- // The message send could be nil due to the receiver being nil.
- // At this point in the path, the receiver should be live since we are at the
- // message send expr. If it is nil, start tracking it.
- if (const Expr *Receiver = NilReceiverBRVisitor::getNilReceiver(Inner, LVNode))
- trackExpressionValue(
- LVNode, Receiver, report, TKind, EnableNullFPSuppression);
-
- // Track the index if this is an array subscript.
- if (const auto *Arr = dyn_cast<ArraySubscriptExpr>(Inner))
- trackExpressionValue(
- LVNode, Arr->getIdx(), report, TKind, /*EnableNullFPSuppression*/false);
-
- // See if the expression we're interested refers to a variable.
- // If so, we can track both its contents and constraints on its value.
- if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
- SVal LVal = LVNode->getSVal(Inner);
-
- const MemRegion *RR = getLocationRegionIfReference(Inner, LVNode);
- bool LVIsNull = LVState->isNull(LVal).isConstrainedTrue();
-
- // If this is a C++ reference to a null pointer, we are tracking the
- // pointer. In addition, we should find the store at which the reference
- // got initialized.
- if (RR && !LVIsNull)
- if (auto KV = LVal.getAs<KnownSVal>())
- report.addVisitor(std::make_unique<FindLastStoreBRVisitor>(
- *KV, RR, EnableNullFPSuppression, TKind, SFC));
-
- // In case of C++ references, we want to differentiate between a null
- // reference and reference to null pointer.
- // If the LVal is null, check if we are dealing with null reference.
- // For those, we want to track the location of the reference.
- const MemRegion *R = (RR && LVIsNull) ? RR :
- LVNode->getSVal(Inner).getAsRegion();
-
- if (R) {
-
- // Mark both the variable region and its contents as interesting.
- SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
- report.addVisitor(
- std::make_unique<NoStoreFuncVisitor>(cast<SubRegion>(R), TKind));
-
- MacroNullReturnSuppressionVisitor::addMacroVisitorIfNecessary(
- LVNode, R, EnableNullFPSuppression, report, V);
-
- report.markInteresting(V, TKind);
- report.addVisitor(std::make_unique<UndefOrNullArgVisitor>(R));
-
- // If the contents are symbolic and null, find out when they became null.
- if (V.getAsLocSymbol(/*IncludeBaseRegions=*/true))
- if (LVState->isNull(V).isConstrainedTrue())
- report.addVisitor(std::make_unique<TrackConstraintBRVisitor>(
- V.castAs<DefinedSVal>(), false));
-
- // Add visitor, which will suppress inline defensive checks.
- if (auto DV = V.getAs<DefinedSVal>())
- if (!DV->isZeroConstant() && EnableNullFPSuppression) {
- // Note that LVNode may be too late (i.e., too far from the InputNode)
- // because the lvalue may have been computed before the inlined call
- // was evaluated. InputNode may as well be too early here, because
- // the symbol is already dead; this, however, is fine because we can
- // still find the node in which it collapsed to null previously.
- report.addVisitor(
- std::make_unique<SuppressInlineDefensiveChecksVisitor>(
- *DV, InputNode));
- }
+ if (!L.isValid() || !L.asLocation().isValid())
+ return nullptr;
- if (auto KV = V.getAs<KnownSVal>())
- report.addVisitor(std::make_unique<FindLastStoreBRVisitor>(
- *KV, R, EnableNullFPSuppression, TKind, SFC));
- return true;
+ return std::make_shared<PathDiagnosticEventPiece>(L, NodeText);
+}
+
+class DefaultStoreHandler final : public StoreHandler {
+public:
+ using StoreHandler::StoreHandler;
+
+ PathDiagnosticPieceRef handle(StoreInfo SI, BugReporterContext &BRC,
+ TrackingOptions Opts) override {
+ // Okay, we've found the binding. Emit an appropriate message.
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream OS(Buffer);
+
+ switch (SI.StoreKind) {
+ case StoreInfo::Initialization:
+ case StoreInfo::BlockCapture:
+ showBRDiagnostics(OS, SI);
+ break;
+ case StoreInfo::CallArgument:
+ showBRParamDiagnostics(OS, SI);
+ break;
+ case StoreInfo::Assignment:
+ showBRDefaultDiagnostics(OS, SI);
+ break;
+ }
+
+ if (Opts.Kind == bugreporter::TrackingKind::Condition)
+ OS << WillBeUsedForACondition;
+
+ return constructNote(SI, BRC, OS.str());
+ }
+};
+
+class ControlDependencyHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *Inner, const ExplodedNode *InputNode,
+ const ExplodedNode *LVNode,
+ TrackingOptions Opts) override {
+ PathSensitiveBugReport &Report = getParentTracker().getReport();
+
+ // We only track expressions if we believe that they are important. Chances
+ // are good that control dependencies to the tracking point are also
+ // important because of this, let's explain why we believe control reached
+ // this point.
+ // TODO: Shouldn't we track control dependencies of every bug location,
+ // rather than only tracked expressions?
+ if (LVNode->getState()
+ ->getAnalysisManager()
+ .getAnalyzerOptions()
+ .ShouldTrackConditions) {
+ Report.addVisitor<TrackControlDependencyCondBRVisitor>(
+ &getParentTracker(), InputNode);
+ return {/*FoundSomethingToTrack=*/true};
}
+
+ return {};
}
+};
+
+class NilReceiverHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *Inner, const ExplodedNode *InputNode,
+ const ExplodedNode *LVNode,
+ TrackingOptions Opts) override {
+ // The message send could be nil due to the receiver being nil.
+ // At this point in the path, the receiver should be live since we are at
+ // the message send expr. If it is nil, start tracking it.
+ if (const Expr *Receiver =
+ NilReceiverBRVisitor::getNilReceiver(Inner, LVNode))
+ return getParentTracker().track(Receiver, LVNode, Opts);
+
+ return {};
+ }
+};
+
+class ArrayIndexHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *Inner, const ExplodedNode *InputNode,
+ const ExplodedNode *LVNode,
+ TrackingOptions Opts) override {
+ // Track the index if this is an array subscript.
+ if (const auto *Arr = dyn_cast<ArraySubscriptExpr>(Inner))
+ return getParentTracker().track(
+ Arr->getIdx(), LVNode,
+ {Opts.Kind, /*EnableNullFPSuppression*/ false});
+
+ return {};
+ }
+};
+
+// TODO: extract it into more handlers
+class InterestingLValueHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *Inner, const ExplodedNode *InputNode,
+ const ExplodedNode *LVNode,
+ TrackingOptions Opts) override {
+ ProgramStateRef LVState = LVNode->getState();
+ const StackFrameContext *SFC = LVNode->getStackFrame();
+ PathSensitiveBugReport &Report = getParentTracker().getReport();
+ Tracker::Result Result;
+
+ // See if the expression we're interested refers to a variable.
+ // If so, we can track both its contents and constraints on its value.
+ if (ExplodedGraph::isInterestingLValueExpr(Inner)) {
+ SVal LVal = LVNode->getSVal(Inner);
+
+ const MemRegion *RR = getLocationRegionIfReference(Inner, LVNode);
+ bool LVIsNull = LVState->isNull(LVal).isConstrainedTrue();
+
+ // If this is a C++ reference to a null pointer, we are tracking the
+ // pointer. In addition, we should find the store at which the reference
+ // got initialized.
+ if (RR && !LVIsNull)
+ Result.combineWith(getParentTracker().track(LVal, RR, Opts, SFC));
+
+ // In case of C++ references, we want to differentiate between a null
+ // reference and reference to null pointer.
+ // If the LVal is null, check if we are dealing with null reference.
+ // For those, we want to track the location of the reference.
+ const MemRegion *R =
+ (RR && LVIsNull) ? RR : LVNode->getSVal(Inner).getAsRegion();
+
+ if (R) {
+
+ // Mark both the variable region and its contents as interesting.
+ SVal V = LVState->getRawSVal(loc::MemRegionVal(R));
+ Report.addVisitor<NoStoreFuncVisitor>(cast<SubRegion>(R), Opts.Kind);
+
+ // When we got here, we do have something to track, and we will
+ // interrupt.
+ Result.FoundSomethingToTrack = true;
+ Result.WasInterrupted = true;
+
+ MacroNullReturnSuppressionVisitor::addMacroVisitorIfNecessary(
+ LVNode, R, Opts.EnableNullFPSuppression, Report, V);
+
+ Report.markInteresting(V, Opts.Kind);
+ Report.addVisitor<UndefOrNullArgVisitor>(R);
+
+ // If the contents are symbolic and null, find out when they became
+ // null.
+ if (V.getAsLocSymbol(/*IncludeBaseRegions=*/true))
+ if (LVState->isNull(V).isConstrainedTrue())
+ Report.addVisitor<TrackConstraintBRVisitor>(V.castAs<DefinedSVal>(),
+ false);
+
+ // Add visitor, which will suppress inline defensive checks.
+ if (auto DV = V.getAs<DefinedSVal>())
+ if (!DV->isZeroConstant() && Opts.EnableNullFPSuppression)
+ // Note that LVNode may be too late (i.e., too far from the
+ // InputNode) because the lvalue may have been computed before the
+ // inlined call was evaluated. InputNode may as well be too early
+ // here, because the symbol is already dead; this, however, is fine
+ // because we can still find the node in which it collapsed to null
+ // previously.
+ Report.addVisitor<SuppressInlineDefensiveChecksVisitor>(*DV,
+ InputNode);
+ getParentTracker().track(V, R, Opts, SFC);
+ }
+ }
+
+ return Result;
+ }
+};
+
+/// Adds a ReturnVisitor if the given statement represents a call that was
+/// inlined.
+///
+/// This will search back through the ExplodedGraph, starting from the given
+/// node, looking for when the given statement was processed. If it turns out
+/// the statement is a call that was inlined, we add the visitor to the
+/// bug report, so it can print a note later.
+class InlinedFunctionCallHandler final : public ExpressionHandler {
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *E, const ExplodedNode *InputNode,
+ const ExplodedNode *ExprNode,
+ TrackingOptions Opts) override {
+ if (!CallEvent::isCallStmt(E))
+ return {};
+
+ // First, find when we processed the statement.
+ // If we work with a 'CXXNewExpr' that is going to be purged away before
+ // its call take place. We would catch that purge in the last condition
+ // as a 'StmtPoint' so we have to bypass it.
+ const bool BypassCXXNewExprEval = isa<CXXNewExpr>(E);
- // If the expression is not an "lvalue expression", we can still
- // track the constraints on its contents.
- SVal V = LVState->getSValAsScalarOrLoc(Inner, LVNode->getLocationContext());
+ // This is moving forward when we enter into another context.
+ const StackFrameContext *CurrentSFC = ExprNode->getStackFrame();
+
+ do {
+ // If that is satisfied we found our statement as an inlined call.
+ if (Optional<CallExitEnd> CEE = ExprNode->getLocationAs<CallExitEnd>())
+ if (CEE->getCalleeContext()->getCallSite() == E)
+ break;
+
+ // Try to move forward to the end of the call-chain.
+ ExprNode = ExprNode->getFirstPred();
+ if (!ExprNode)
+ break;
+
+ const StackFrameContext *PredSFC = ExprNode->getStackFrame();
+
+ // If that is satisfied we found our statement.
+ // FIXME: This code currently bypasses the call site for the
+ // conservatively evaluated allocator.
+ if (!BypassCXXNewExprEval)
+ if (Optional<StmtPoint> SP = ExprNode->getLocationAs<StmtPoint>())
+ // See if we do not enter into another context.
+ if (SP->getStmt() == E && CurrentSFC == PredSFC)
+ break;
+
+ CurrentSFC = PredSFC;
+ } while (ExprNode->getStackFrame() == CurrentSFC);
+
+ // Next, step over any post-statement checks.
+ while (ExprNode && ExprNode->getLocation().getAs<PostStmt>())
+ ExprNode = ExprNode->getFirstPred();
+ if (!ExprNode)
+ return {};
+
+ // Finally, see if we inlined the call.
+ Optional<CallExitEnd> CEE = ExprNode->getLocationAs<CallExitEnd>();
+ if (!CEE)
+ return {};
+
+ const StackFrameContext *CalleeContext = CEE->getCalleeContext();
+ if (CalleeContext->getCallSite() != E)
+ return {};
+
+ // Check the return value.
+ ProgramStateRef State = ExprNode->getState();
+ SVal RetVal = ExprNode->getSVal(E);
+
+ // Handle cases where a reference is returned and then immediately used.
+ if (cast<Expr>(E)->isGLValue())
+ if (Optional<Loc> LValue = RetVal.getAs<Loc>())
+ RetVal = State->getSVal(*LValue);
+
+ // See if the return value is NULL. If so, suppress the report.
+ AnalyzerOptions &Options = State->getAnalysisManager().options;
+
+ bool EnableNullFPSuppression = false;
+ if (Opts.EnableNullFPSuppression && Options.ShouldSuppressNullReturnPaths)
+ if (Optional<Loc> RetLoc = RetVal.getAs<Loc>())
+ EnableNullFPSuppression = State->isNull(*RetLoc).isConstrainedTrue();
- ReturnVisitor::addVisitorIfNecessary(
- LVNode, Inner, report, EnableNullFPSuppression, TKind);
+ PathSensitiveBugReport &Report = getParentTracker().getReport();
+ Report.addVisitor<ReturnVisitor>(&getParentTracker(), CalleeContext,
+ EnableNullFPSuppression, Options,
+ Opts.Kind);
+ return {true};
+ }
+};
- // Is it a symbolic value?
- if (auto L = V.getAs<loc::MemRegionVal>()) {
- // FIXME: this is a hack for fixing a later crash when attempting to
- // dereference a void* pointer.
- // We should not try to dereference pointers at all when we don't care
- // what is written inside the pointer.
- bool CanDereference = true;
- if (const auto *SR = L->getRegionAs<SymbolicRegion>()) {
- if (SR->getSymbol()->getType()->getPointeeType()->isVoidType())
+class DefaultExpressionHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *Inner, const ExplodedNode *InputNode,
+ const ExplodedNode *LVNode,
+ TrackingOptions Opts) override {
+ ProgramStateRef LVState = LVNode->getState();
+ const StackFrameContext *SFC = LVNode->getStackFrame();
+ PathSensitiveBugReport &Report = getParentTracker().getReport();
+ Tracker::Result Result;
+
+ // If the expression is not an "lvalue expression", we can still
+ // track the constraints on its contents.
+ SVal V = LVState->getSValAsScalarOrLoc(Inner, LVNode->getLocationContext());
+
+ // Is it a symbolic value?
+ if (auto L = V.getAs<loc::MemRegionVal>()) {
+ // FIXME: this is a hack for fixing a later crash when attempting to
+ // dereference a void* pointer.
+ // We should not try to dereference pointers at all when we don't care
+ // what is written inside the pointer.
+ bool CanDereference = true;
+ if (const auto *SR = L->getRegionAs<SymbolicRegion>()) {
+ if (SR->getSymbol()->getType()->getPointeeType()->isVoidType())
+ CanDereference = false;
+ } else if (L->getRegionAs<AllocaRegion>())
CanDereference = false;
- } else if (L->getRegionAs<AllocaRegion>())
- CanDereference = false;
-
- // At this point we are dealing with the region's LValue.
- // However, if the rvalue is a symbolic region, we should track it as well.
- // Try to use the correct type when looking up the value.
- SVal RVal;
- if (ExplodedGraph::isInterestingLValueExpr(Inner))
- RVal = LVState->getRawSVal(L.getValue(), Inner->getType());
- else if (CanDereference)
- RVal = LVState->getSVal(L->getRegion());
-
- if (CanDereference) {
- report.addVisitor(
- std::make_unique<UndefOrNullArgVisitor>(L->getRegion()));
-
- if (auto KV = RVal.getAs<KnownSVal>())
- report.addVisitor(std::make_unique<FindLastStoreBRVisitor>(
- *KV, L->getRegion(), EnableNullFPSuppression, TKind, SFC));
+
+ // At this point we are dealing with the region's LValue.
+ // However, if the rvalue is a symbolic region, we should track it as
+ // well. Try to use the correct type when looking up the value.
+ SVal RVal;
+ if (ExplodedGraph::isInterestingLValueExpr(Inner))
+ RVal = LVState->getRawSVal(L.getValue(), Inner->getType());
+ else if (CanDereference)
+ RVal = LVState->getSVal(L->getRegion());
+
+ if (CanDereference) {
+ Report.addVisitor<UndefOrNullArgVisitor>(L->getRegion());
+ Result.FoundSomethingToTrack = true;
+
+ if (auto KV = RVal.getAs<KnownSVal>())
+ Result.combineWith(
+ getParentTracker().track(*KV, L->getRegion(), Opts, SFC));
+ }
+
+ const MemRegion *RegionRVal = RVal.getAsRegion();
+ if (isa_and_nonnull<SymbolicRegion>(RegionRVal)) {
+ Report.markInteresting(RegionRVal, Opts.Kind);
+ Report.addVisitor<TrackConstraintBRVisitor>(
+ loc::MemRegionVal(RegionRVal),
+ /*assumption=*/false);
+ Result.FoundSomethingToTrack = true;
+ }
}
- const MemRegion *RegionRVal = RVal.getAsRegion();
- if (RegionRVal && isa<SymbolicRegion>(RegionRVal)) {
- report.markInteresting(RegionRVal, TKind);
- report.addVisitor(std::make_unique<TrackConstraintBRVisitor>(
- loc::MemRegionVal(RegionRVal), /*assumption=*/false));
+ return Result;
+ }
+};
+
+/// Attempts to add visitors to track an RValue expression back to its point of
+/// origin.
+class PRValueHandler final : public ExpressionHandler {
+public:
+ using ExpressionHandler::ExpressionHandler;
+
+ Tracker::Result handle(const Expr *E, const ExplodedNode *InputNode,
+ const ExplodedNode *ExprNode,
+ TrackingOptions Opts) override {
+ if (!E->isPRValue())
+ return {};
+
+ const ExplodedNode *RVNode = findNodeForExpression(ExprNode, E);
+ if (!RVNode)
+ return {};
+
+ ProgramStateRef RVState = RVNode->getState();
+ SVal V = RVState->getSValAsScalarOrLoc(E, RVNode->getLocationContext());
+ const auto *BO = dyn_cast<BinaryOperator>(E);
+
+ if (!BO || !BO->isMultiplicativeOp() || !V.isZeroConstant())
+ return {};
+
+ SVal RHSV = RVState->getSVal(BO->getRHS(), RVNode->getLocationContext());
+ SVal LHSV = RVState->getSVal(BO->getLHS(), RVNode->getLocationContext());
+
+ // Track both LHS and RHS of a multiplication.
+ Tracker::Result CombinedResult;
+ Tracker &Parent = getParentTracker();
+
+ const auto track = [&CombinedResult, &Parent, ExprNode, Opts](Expr *Inner) {
+ CombinedResult.combineWith(Parent.track(Inner, ExprNode, Opts));
+ };
+
+ if (BO->getOpcode() == BO_Mul) {
+ if (LHSV.isZeroConstant())
+ track(BO->getLHS());
+ if (RHSV.isZeroConstant())
+ track(BO->getRHS());
+ } else { // Track only the LHS of a division or a modulo.
+ if (LHSV.isZeroConstant())
+ track(BO->getLHS());
}
+
+ return CombinedResult;
}
- return true;
+};
+
+Tracker::Tracker(PathSensitiveBugReport &Report) : Report(Report) {
+ // Default expression handlers.
+ addLowPriorityHandler<ControlDependencyHandler>();
+ addLowPriorityHandler<NilReceiverHandler>();
+ addLowPriorityHandler<ArrayIndexHandler>();
+ addLowPriorityHandler<InterestingLValueHandler>();
+ addLowPriorityHandler<InlinedFunctionCallHandler>();
+ addLowPriorityHandler<DefaultExpressionHandler>();
+ addLowPriorityHandler<PRValueHandler>();
+ // Default store handlers.
+ addHighPriorityHandler<DefaultStoreHandler>();
+}
+
+Tracker::Result Tracker::track(const Expr *E, const ExplodedNode *N,
+ TrackingOptions Opts) {
+ if (!E || !N)
+ return {};
+
+ const Expr *Inner = peelOffOuterExpr(E, N);
+ const ExplodedNode *LVNode = findNodeForExpression(N, Inner);
+ if (!LVNode)
+ return {};
+
+ Result CombinedResult;
+ // Iterate through the handlers in the order according to their priorities.
+ for (ExpressionHandlerPtr &Handler : ExpressionHandlers) {
+ CombinedResult.combineWith(Handler->handle(Inner, N, LVNode, Opts));
+ if (CombinedResult.WasInterrupted) {
+ // There is no need to confuse our users here.
+ // We got interrupted, but our users don't need to know about it.
+ CombinedResult.WasInterrupted = false;
+ break;
+ }
+ }
+
+ return CombinedResult;
+}
+
+Tracker::Result Tracker::track(SVal V, const MemRegion *R, TrackingOptions Opts,
+ const StackFrameContext *Origin) {
+ if (auto KV = V.getAs<KnownSVal>()) {
+ Report.addVisitor<StoreSiteFinder>(this, *KV, R, Opts, Origin);
+ return {true};
+ }
+ return {};
+}
+
+PathDiagnosticPieceRef Tracker::handle(StoreInfo SI, BugReporterContext &BRC,
+ TrackingOptions Opts) {
+ // Iterate through the handlers in the order according to their priorities.
+ for (StoreHandlerPtr &Handler : StoreHandlers) {
+ if (PathDiagnosticPieceRef Result = Handler->handle(SI, BRC, Opts))
+ // If the handler produced a non-null piece, return it.
+ // There is no need in asking other handlers.
+ return Result;
+ }
+ return {};
+}
+
+bool bugreporter::trackExpressionValue(const ExplodedNode *InputNode,
+ const Expr *E,
+
+ PathSensitiveBugReport &Report,
+ TrackingOptions Opts) {
+ return Tracker::create(Report)
+ ->track(E, InputNode, Opts)
+ .FoundSomethingToTrack;
+}
+
+void bugreporter::trackStoredValue(KnownSVal V, const MemRegion *R,
+ PathSensitiveBugReport &Report,
+ TrackingOptions Opts,
+ const StackFrameContext *Origin) {
+ Tracker::create(Report)->track(V, R, Opts, Origin);
}
//===----------------------------------------------------------------------===//
@@ -2118,9 +2468,9 @@ NilReceiverBRVisitor::VisitNode(const ExplodedNode *N, BugReporterContext &BRC,
// The receiver was nil, and hence the method was skipped.
// Register a BugReporterVisitor to issue a message telling us how
// the receiver was null.
- bugreporter::trackExpressionValue(
- N, Receiver, BR, bugreporter::TrackingKind::Thorough,
- /*EnableNullFPSuppression*/ false);
+ bugreporter::trackExpressionValue(N, Receiver, BR,
+ {bugreporter::TrackingKind::Thorough,
+ /*EnableNullFPSuppression*/ false});
// Issue a message saying that the method was skipped.
PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
N->getLocationContext());
diff --git a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index a55d9302ca58..3785f498414f 100644
--- a/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -47,6 +47,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ImmutableList.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PointerIntPair.h"
@@ -86,7 +87,7 @@ QualType CallEvent::getResultType() const {
case VK_XValue:
ResultTy = Ctx.getRValueReferenceType(ResultTy);
break;
- case VK_RValue:
+ case VK_PRValue:
// No adjustment is necessary.
break;
}
@@ -466,6 +467,42 @@ bool CallEvent::isVariadic(const Decl *D) {
llvm_unreachable("unknown callable kind");
}
+static bool isTransparentUnion(QualType T) {
+ const RecordType *UT = T->getAsUnionType();
+ return UT && UT->getDecl()->hasAttr<TransparentUnionAttr>();
+}
+
+// In some cases, symbolic cases should be transformed before we associate
+// them with parameters. This function incapsulates such cases.
+static SVal processArgument(SVal Value, const Expr *ArgumentExpr,
+ const ParmVarDecl *Parameter, SValBuilder &SVB) {
+ QualType ParamType = Parameter->getType();
+ QualType ArgumentType = ArgumentExpr->getType();
+
+ // Transparent unions allow users to easily convert values of union field
+ // types into union-typed objects.
+ //
+ // Also, more importantly, they allow users to define functions with different
+ // different parameter types, substituting types matching transparent union
+ // field types with the union type itself.
+ //
+ // Here, we check specifically for latter cases and prevent binding
+ // field-typed values to union-typed regions.
+ if (isTransparentUnion(ParamType) &&
+ // Let's check that we indeed trying to bind different types.
+ !isTransparentUnion(ArgumentType)) {
+ BasicValueFactory &BVF = SVB.getBasicValueFactory();
+
+ llvm::ImmutableList<SVal> CompoundSVals = BVF.getEmptySValList();
+ CompoundSVals = BVF.prependSVal(Value, CompoundSVals);
+
+ // Wrap it with compound value.
+ return SVB.makeCompoundVal(ParamType, CompoundSVals);
+ }
+
+ return Value;
+}
+
static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
CallEvent::BindingsTy &Bindings,
SValBuilder &SVB,
@@ -490,10 +527,12 @@ static void addParameterValuesToBindings(const StackFrameContext *CalleeCtx,
// determined in compile-time but not represented as arg-expressions,
// which makes getArgSVal() fail and return UnknownVal.
SVal ArgVal = Call.getArgSVal(Idx);
+ const Expr *ArgExpr = Call.getArgExpr(Idx);
if (!ArgVal.isUnknown()) {
Loc ParamLoc = SVB.makeLoc(
MRMgr.getParamVarRegion(Call.getOriginExpr(), Idx, CalleeCtx));
- Bindings.push_back(std::make_pair(ParamLoc, ArgVal));
+ Bindings.push_back(
+ std::make_pair(ParamLoc, processArgument(ArgVal, ArgExpr, *I, SVB)));
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 3d44d2cbc069..3d64ce453479 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -19,6 +19,10 @@ using namespace clang;
using namespace ento;
const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
+ const FunctionDecl *D = CE->getDirectCallee();
+ if (D)
+ return D;
+
const Expr *Callee = CE->getCallee();
SVal L = Pred->getSVal(Callee);
return L.getAsFunctionDecl();
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
index cae728815b41..626ae1ae8066 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -148,5 +148,39 @@ llvm::Optional<int> tryExpandAsInteger(StringRef Macro,
return IntValue.getSExtValue();
}
+OperatorKind operationKindFromOverloadedOperator(OverloadedOperatorKind OOK,
+ bool IsBinary) {
+ llvm::StringMap<BinaryOperatorKind> BinOps{
+#define BINARY_OPERATION(Name, Spelling) {Spelling, BO_##Name},
+#include "clang/AST/OperationKinds.def"
+ };
+ llvm::StringMap<UnaryOperatorKind> UnOps{
+#define UNARY_OPERATION(Name, Spelling) {Spelling, UO_##Name},
+#include "clang/AST/OperationKinds.def"
+ };
+
+ switch (OOK) {
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ case OO_##Name: \
+ if (IsBinary) { \
+ auto BinOpIt = BinOps.find(Spelling); \
+ if (BinOpIt != BinOps.end()) \
+ return OperatorKind(BinOpIt->second); \
+ else \
+ llvm_unreachable("operator was expected to be binary but is not"); \
+ } else { \
+ auto UnOpIt = UnOps.find(Spelling); \
+ if (UnOpIt != UnOps.end()) \
+ return OperatorKind(UnOpIt->second); \
+ else \
+ llvm_unreachable("operator was expected to be unary but is not"); \
+ } \
+ break;
+#include "clang/Basic/OperatorKinds.def"
+ default:
+ llvm_unreachable("unexpected operator kind");
+ }
+}
+
} // namespace ento
} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
index 86cecf6524f0..e09399a83589 100644
--- a/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -33,21 +33,20 @@ using namespace clang;
using namespace ento;
bool CheckerManager::hasPathSensitiveCheckers() const {
- return !StmtCheckers.empty() ||
- !PreObjCMessageCheckers.empty() ||
- !PostObjCMessageCheckers.empty() ||
- !PreCallCheckers.empty() ||
- !PostCallCheckers.empty() ||
- !LocationCheckers.empty() ||
- !BindCheckers.empty() ||
- !EndAnalysisCheckers.empty() ||
- !EndFunctionCheckers.empty() ||
- !BranchConditionCheckers.empty() ||
- !LiveSymbolsCheckers.empty() ||
- !DeadSymbolsCheckers.empty() ||
- !RegionChangesCheckers.empty() ||
- !EvalAssumeCheckers.empty() ||
- !EvalCallCheckers.empty();
+ const auto IfAnyAreNonEmpty = [](const auto &... Callbacks) -> bool {
+ bool Result = false;
+ // FIXME: Use fold expressions in C++17.
+ LLVM_ATTRIBUTE_UNUSED int Unused[]{0, (Result |= !Callbacks.empty())...};
+ return Result;
+ };
+ return IfAnyAreNonEmpty(
+ StmtCheckers, PreObjCMessageCheckers, ObjCMessageNilCheckers,
+ PostObjCMessageCheckers, PreCallCheckers, PostCallCheckers,
+ LocationCheckers, BindCheckers, EndAnalysisCheckers,
+ BeginFunctionCheckers, EndFunctionCheckers, BranchConditionCheckers,
+ NewAllocatorCheckers, LiveSymbolsCheckers, DeadSymbolsCheckers,
+ RegionChangesCheckers, PointerEscapeCheckers, EvalAssumeCheckers,
+ EvalCallCheckers, EndOfTranslationUnitCheckers);
}
void CheckerManager::finishedCheckerRegistration() {
diff --git a/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp b/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
index a601370775b4..d12c35ef156a 100644
--- a/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CommonBugCategories.cpp
@@ -20,7 +20,9 @@ const char *const MemoryRefCount =
const char *const MemoryError = "Memory error";
const char *const UnixAPI = "Unix API";
const char *const CXXObjectLifecycle = "C++ object lifecycle";
+const char *const CXXMoveSemantics = "C++ move semantics";
const char *const SecurityError = "Security error";
+const char *const UnusedCode = "Unused code";
} // namespace categories
} // namespace ento
} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index 70deb13a8e1a..bc939d252800 100644
--- a/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -219,13 +219,14 @@ void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
// and we're taking the path that skips virtual base constructors.
if (L.getSrc()->getTerminator().isVirtualBaseBranch() &&
L.getDst() == *L.getSrc()->succ_begin()) {
- ProgramPoint P = L.withTag(getNoteTags().makeNoteTag(
+ ProgramPoint P = L.withTag(getDataTags().make<NoteTag>(
[](BugReporterContext &, PathSensitiveBugReport &) -> std::string {
// TODO: Just call out the name of the most derived class
// when we know it.
return "Virtual base initialization skipped because "
"it has already been handled by the most derived class";
- }, /*IsPrunable=*/true));
+ },
+ /*IsPrunable=*/true));
// Perform the transition.
ExplodedNodeSet Dst;
NodeBuilder Bldr(Pred, Dst, BuilderCtx);
@@ -349,6 +350,7 @@ void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
return;
+ case Stmt::SEHLeaveStmtClass:
case Stmt::ContinueStmtClass:
case Stmt::BreakStmtClass:
case Stmt::GotoStmtClass:
diff --git a/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp b/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp
index 8b2172db445c..db9698b4086e 100644
--- a/clang/lib/StaticAnalyzer/Core/DynamicSize.cpp
+++ b/clang/lib/StaticAnalyzer/Core/DynamicExtent.cpp
@@ -1,4 +1,4 @@
-//===- DynamicSize.cpp - Dynamic size related APIs --------------*- C++ -*-===//
+//===- DynamicExtent.cpp - Dynamic extent related APIs ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,11 +6,11 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines APIs that track and query dynamic size information.
+// This file defines APIs that track and query dynamic extent information.
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/AST/Expr.h"
#include "clang/Basic/LLVM.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
@@ -19,32 +19,43 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+REGISTER_MAP_WITH_PROGRAMSTATE(DynamicExtentMap, const clang::ento::MemRegion *,
+ clang::ento::DefinedOrUnknownSVal)
+
namespace clang {
namespace ento {
-DefinedOrUnknownSVal getDynamicSize(ProgramStateRef State, const MemRegion *MR,
- SValBuilder &SVB) {
+DefinedOrUnknownSVal getDynamicExtent(ProgramStateRef State,
+ const MemRegion *MR, SValBuilder &SVB) {
+ MR = MR->StripCasts();
+
+ if (const DefinedOrUnknownSVal *Size = State->get<DynamicExtentMap>(MR))
+ return *Size;
+
return MR->getMemRegionManager().getStaticSize(MR, SVB);
}
+DefinedOrUnknownSVal getElementExtent(QualType Ty, SValBuilder &SVB) {
+ return SVB.makeIntVal(SVB.getContext().getTypeSizeInChars(Ty).getQuantity(),
+ SVB.getArrayIndexType());
+}
+
DefinedOrUnknownSVal getDynamicElementCount(ProgramStateRef State,
const MemRegion *MR,
SValBuilder &SVB,
QualType ElementTy) {
- MemRegionManager &MemMgr = MR->getMemRegionManager();
- ASTContext &Ctx = MemMgr.getContext();
+ MR = MR->StripCasts();
- DefinedOrUnknownSVal Size = getDynamicSize(State, MR, SVB);
- SVal ElementSizeV = SVB.makeIntVal(
- Ctx.getTypeSizeInChars(ElementTy).getQuantity(), SVB.getArrayIndexType());
+ DefinedOrUnknownSVal Size = getDynamicExtent(State, MR, SVB);
+ SVal ElementSize = getElementExtent(ElementTy, SVB);
- SVal DivisionV =
- SVB.evalBinOp(State, BO_Div, Size, ElementSizeV, SVB.getArrayIndexType());
+ SVal ElementCount =
+ SVB.evalBinOp(State, BO_Div, Size, ElementSize, SVB.getArrayIndexType());
- return DivisionV.castAs<DefinedOrUnknownSVal>();
+ return ElementCount.castAs<DefinedOrUnknownSVal>();
}
-SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV) {
+SVal getDynamicExtentWithOffset(ProgramStateRef State, SVal BufV) {
SValBuilder &SvalBuilder = State->getStateManager().getSValBuilder();
const MemRegion *MRegion = BufV.getAsRegion();
if (!MRegion)
@@ -60,12 +71,22 @@ SVal getDynamicSizeWithOffset(ProgramStateRef State, const SVal &BufV) {
Offset.getOffset() /
MRegion->getMemRegionManager().getContext().getCharWidth());
DefinedOrUnknownSVal ExtentInBytes =
- getDynamicSize(State, BaseRegion, SvalBuilder);
+ getDynamicExtent(State, BaseRegion, SvalBuilder);
return SvalBuilder.evalBinOp(State, BinaryOperator::Opcode::BO_Sub,
ExtentInBytes, OffsetInBytes,
SvalBuilder.getArrayIndexType());
}
+ProgramStateRef setDynamicExtent(ProgramStateRef State, const MemRegion *MR,
+ DefinedOrUnknownSVal Size, SValBuilder &SVB) {
+ MR = MR->StripCasts();
+
+ if (Size.isUnknown())
+ return State;
+
+ return State->set<DynamicExtentMap>(MR->StripCasts(), Size);
+}
+
} // namespace ento
} // namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index f285b652c175..66332d3b848c 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -1238,6 +1238,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::SEHExceptStmtClass:
case Stmt::SEHLeaveStmtClass:
case Stmt::SEHFinallyStmtClass:
+ case Stmt::OMPCanonicalLoopClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPSimdDirectiveClass:
case Stmt::OMPForDirectiveClass:
@@ -1292,7 +1293,12 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
- case Stmt::CapturedStmtClass: {
+ case Stmt::OMPTileDirectiveClass:
+ case Stmt::OMPInteropDirectiveClass:
+ case Stmt::OMPDispatchDirectiveClass:
+ case Stmt::OMPMaskedDirectiveClass:
+ case Stmt::CapturedStmtClass:
+ case Stmt::OMPUnrollDirectiveClass: {
const ExplodedNode *node = Bldr.generateSink(S, Pred, Pred->getState());
Engine.addAbortedBlock(node, currBldrCtx->getBlock());
break;
@@ -1414,6 +1420,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPArraySectionExprClass:
case Stmt::OMPArrayShapingExprClass:
case Stmt::OMPIteratorExprClass:
+ case Stmt::SYCLUniqueStableNameExprClass:
case Stmt::TypeTraitExprClass: {
Bldr.takeNodes(Pred);
ExplodedNodeSet preVisit;
@@ -3137,8 +3144,8 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
/// \p PreCallback: callback before break.
/// \p PostCallback: callback after break.
- /// \p Stop: stop iteration if returns {@code true}
- /// \return Whether {@code Stop} ever returned {@code true}.
+ /// \p Stop: stop iteration if returns @c true
+ /// \return Whether @c Stop ever returned @c true.
static bool traverseHiddenNodes(
const ExplodedNode *N,
llvm::function_ref<void(const ExplodedNode *)> PreCallback,
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 18d1b2169eed..7ad3dca831ac 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -282,29 +282,14 @@ ProgramStateRef ExprEngine::handleLValueBitCast(
return state;
}
-ProgramStateRef ExprEngine::handleLVectorSplat(
- ProgramStateRef state, const LocationContext* LCtx, const CastExpr* CastE,
- StmtNodeBuilder &Bldr, ExplodedNode* Pred) {
- // Recover some path sensitivity by conjuring a new value.
- QualType resultType = CastE->getType();
- if (CastE->isGLValue())
- resultType = getContext().getPointerType(resultType);
- SVal result = svalBuilder.conjureSymbolVal(nullptr, CastE, LCtx,
- resultType,
- currBldrCtx->blockCount());
- state = state->BindExpr(CastE, LCtx, result);
- Bldr.generateNode(CastE, Pred, state);
-
- return state;
-}
-
void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
ExplodedNode *Pred, ExplodedNodeSet &Dst) {
ExplodedNodeSet dstPreStmt;
getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
- if (CastE->getCastKind() == CK_LValueToRValue) {
+ if (CastE->getCastKind() == CK_LValueToRValue ||
+ CastE->getCastKind() == CK_LValueToRValueBitCast) {
for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
I!=E; ++I) {
ExplodedNode *subExprNode = *I;
@@ -332,6 +317,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
switch (CastE->getCastKind()) {
case CK_LValueToRValue:
+ case CK_LValueToRValueBitCast:
llvm_unreachable("LValueToRValue casts handled earlier.");
case CK_ToVoid:
continue;
@@ -380,7 +366,6 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_Dependent:
case CK_ArrayToPointerDecay:
case CK_BitCast:
- case CK_LValueToRValueBitCast:
case CK_AddressSpaceConversion:
case CK_BooleanToSignedIntegral:
case CK_IntegralToPointer:
@@ -526,22 +511,28 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_ReinterpretMemberPointer: {
SVal V = state->getSVal(Ex, LCtx);
if (auto PTMSV = V.getAs<nonloc::PointerToMember>()) {
- SVal CastedPTMSV = svalBuilder.makePointerToMember(
- getBasicVals().accumCXXBase(
- llvm::make_range<CastExpr::path_const_iterator>(
- CastE->path_begin(), CastE->path_end()), *PTMSV));
+ SVal CastedPTMSV =
+ svalBuilder.makePointerToMember(getBasicVals().accumCXXBase(
+ CastE->path(), *PTMSV, CastE->getCastKind()));
state = state->BindExpr(CastE, LCtx, CastedPTMSV);
Bldr.generateNode(CastE, Pred, state);
continue;
}
// Explicitly proceed with default handler for this case cascade.
- state = handleLVectorSplat(state, LCtx, CastE, Bldr, Pred);
- continue;
}
+ LLVM_FALLTHROUGH;
// Various C++ casts that are not handled yet.
case CK_ToUnion:
+ case CK_MatrixCast:
case CK_VectorSplat: {
- state = handleLVectorSplat(state, LCtx, CastE, Bldr, Pred);
+ QualType resultType = CastE->getType();
+ if (CastE->isGLValue())
+ resultType = getContext().getPointerType(resultType);
+ SVal result = svalBuilder.conjureSymbolVal(
+ /*symbolTag=*/nullptr, CastE, LCtx, resultType,
+ currBldrCtx->blockCount());
+ state = state->BindExpr(CastE, LCtx, result);
+ Bldr.generateNode(CastE, Pred, state);
continue;
}
}
diff --git a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index 996d3644e018..e6918e071a4f 100644
--- a/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -18,6 +18,7 @@
#include "clang/Analysis/ConstructionContext.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/Statistic.h"
@@ -689,16 +690,30 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// See if we need to conjure a heap pointer instead of
// a regular unknown pointer.
- bool IsHeapPointer = false;
- if (const auto *CNE = dyn_cast<CXXNewExpr>(E))
- if (CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
- // FIXME: Delegate this to evalCall in MallocChecker?
- IsHeapPointer = true;
+ const auto *CNE = dyn_cast<CXXNewExpr>(E);
+ if (CNE && CNE->getOperatorNew()->isReplaceableGlobalAllocationFunction()) {
+ R = svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count);
+ const MemRegion *MR = R.getAsRegion()->StripCasts();
+
+ // Store the extent of the allocated object(s).
+ SVal ElementCount;
+ if (const Expr *SizeExpr = CNE->getArraySize().getValueOr(nullptr)) {
+ ElementCount = State->getSVal(SizeExpr, LCtx);
+ } else {
+ ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
}
- R = IsHeapPointer ? svalBuilder.getConjuredHeapSymbolVal(E, LCtx, Count)
- : svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy,
- Count);
+ SVal ElementSize = getElementExtent(CNE->getAllocatedType(), svalBuilder);
+
+ SVal Size =
+ svalBuilder.evalBinOp(State, BO_Mul, ElementCount, ElementSize,
+ svalBuilder.getArrayIndexType());
+
+ State = setDynamicExtent(State, MR, Size.castAs<DefinedOrUnknownSVal>(),
+ svalBuilder);
+ } else {
+ R = svalBuilder.conjureSymbolVal(nullptr, E, LCtx, ResultTy, Count);
+ }
}
return State->BindExpr(E, LCtx, R);
}
diff --git a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
index 149459cf986a..64fc32ea7554 100644
--- a/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -10,11 +10,12 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/IssueHash.h"
-#include "clang/Analysis/PathDiagnostic.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
#include "clang/AST/Stmt.h"
+#include "clang/Analysis/IssueHash.h"
+#include "clang/Analysis/MacroExpansionContext.h"
+#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
@@ -135,14 +136,16 @@ private:
void ento::createHTMLDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputDir, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
// FIXME: HTML is currently our default output type, but if the output
// directory isn't specified, it acts like if it was in the minimal text
// output mode. This doesn't make much sense, we should have the minimal text
// as our default. In the case of backward compatibility concerns, this could
// be preserved with -analyzer-config-compatibility-mode=true.
- createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU);
+ createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU,
+ MacroExpansions);
// TODO: Emit an error here.
if (OutputDir.empty())
@@ -154,8 +157,10 @@ void ento::createHTMLDiagnosticConsumer(
void ento::createHTMLSingleFileDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputDir, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
- createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU);
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const clang::MacroExpansionContext &MacroExpansions) {
+ createTextMinimalPathDiagnosticConsumer(DiagOpts, C, OutputDir, PP, CTU,
+ MacroExpansions);
// TODO: Emit an error here.
if (OutputDir.empty())
@@ -167,13 +172,29 @@ void ento::createHTMLSingleFileDiagnosticConsumer(
void ento::createPlistHTMLDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &prefix, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
createHTMLDiagnosticConsumer(
- DiagOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP,
- CTU);
- createPlistMultiFileDiagnosticConsumer(DiagOpts, C, prefix, PP, CTU);
+ DiagOpts, C, std::string(llvm::sys::path::parent_path(prefix)), PP, CTU,
+ MacroExpansions);
+ createPlistMultiFileDiagnosticConsumer(DiagOpts, C, prefix, PP, CTU,
+ MacroExpansions);
createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, prefix, PP,
- CTU);
+ CTU, MacroExpansions);
+}
+
+void ento::createSarifHTMLDiagnosticConsumer(
+ PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
+ const std::string &sarif_file, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
+ createHTMLDiagnosticConsumer(
+ DiagOpts, C, std::string(llvm::sys::path::parent_path(sarif_file)), PP,
+ CTU, MacroExpansions);
+ createSarifDiagnosticConsumer(DiagOpts, C, sarif_file, PP, CTU,
+ MacroExpansions);
+ createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, sarif_file,
+ PP, CTU, MacroExpansions);
}
//===----------------------------------------------------------------------===//
@@ -254,11 +275,11 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
<< "' absolute: " << EC.message() << '\n';
return;
}
- if (std::error_code EC =
- llvm::sys::fs::createUniqueFile(Model, FD, ResultPath)) {
- llvm::errs() << "warning: could not create file in '" << Directory
- << "': " << EC.message() << '\n';
- return;
+ if (std::error_code EC = llvm::sys::fs::createUniqueFile(
+ Model, FD, ResultPath, llvm::sys::fs::OF_Text)) {
+ llvm::errs() << "warning: could not create file in '" << Directory
+ << "': " << EC.message() << '\n';
+ return;
}
} else {
int i = 1;
diff --git a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index dc268e562237..e5f4e9ea30c9 100644
--- a/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -79,14 +79,17 @@ ProgramStateRef processLoopEnd(const Stmt *LoopStmt, ProgramStateRef State) {
return State;
}
-static internal::Matcher<Stmt> simpleCondition(StringRef BindName) {
- return binaryOperator(anyOf(hasOperatorName("<"), hasOperatorName(">"),
- hasOperatorName("<="), hasOperatorName(">="),
- hasOperatorName("!=")),
- hasEitherOperand(ignoringParenImpCasts(declRefExpr(
- to(varDecl(hasType(isInteger())).bind(BindName))))),
- hasEitherOperand(ignoringParenImpCasts(
- integerLiteral().bind("boundNum"))))
+static internal::Matcher<Stmt> simpleCondition(StringRef BindName,
+ StringRef RefName) {
+ return binaryOperator(
+ anyOf(hasOperatorName("<"), hasOperatorName(">"),
+ hasOperatorName("<="), hasOperatorName(">="),
+ hasOperatorName("!=")),
+ hasEitherOperand(ignoringParenImpCasts(
+ declRefExpr(to(varDecl(hasType(isInteger())).bind(BindName)))
+ .bind(RefName))),
+ hasEitherOperand(
+ ignoringParenImpCasts(integerLiteral().bind("boundNum"))))
.bind("conditionOperator");
}
@@ -138,7 +141,7 @@ static internal::Matcher<Stmt> hasSuspiciousStmt(StringRef NodeName) {
static internal::Matcher<Stmt> forLoopMatcher() {
return forStmt(
- hasCondition(simpleCondition("initVarName")),
+ hasCondition(simpleCondition("initVarName", "initVarRef")),
// Initialization should match the form: 'int i = 6' or 'i = 42'.
hasLoopInit(
anyOf(declStmt(hasSingleDecl(
@@ -156,17 +159,52 @@ static internal::Matcher<Stmt> forLoopMatcher() {
hasUnaryOperand(declRefExpr(
to(varDecl(allOf(equalsBoundNode("initVarName"),
hasType(isInteger())))))))),
- unless(hasBody(hasSuspiciousStmt("initVarName")))).bind("forLoop");
+ unless(hasBody(hasSuspiciousStmt("initVarName"))))
+ .bind("forLoop");
}
-static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
- // Global variables assumed as escaped variables.
+static bool isCapturedByReference(ExplodedNode *N, const DeclRefExpr *DR) {
+
+ // Get the lambda CXXRecordDecl
+ assert(DR->refersToEnclosingVariableOrCapture());
+ const LocationContext *LocCtxt = N->getLocationContext();
+ const Decl *D = LocCtxt->getDecl();
+ const auto *MD = cast<CXXMethodDecl>(D);
+ assert(MD && MD->getParent()->isLambda() &&
+ "Captured variable should only be seen while evaluating a lambda");
+ const CXXRecordDecl *LambdaCXXRec = MD->getParent();
+
+ // Lookup the fields of the lambda
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+ LambdaCXXRec->getCaptureFields(LambdaCaptureFields, LambdaThisCaptureField);
+
+ // Check if the counter is captured by reference
+ const VarDecl *VD = cast<VarDecl>(DR->getDecl()->getCanonicalDecl());
+ assert(VD);
+ const FieldDecl *FD = LambdaCaptureFields[VD];
+ assert(FD && "Captured variable without a corresponding field");
+ return FD->getType()->isReferenceType();
+}
+
+// A loop counter is considered escaped if:
+// case 1: It is a global variable.
+// case 2: It is a reference parameter or a reference capture.
+// case 3: It is assigned to a non-const reference variable or parameter.
+// case 4: Has its address taken.
+static bool isPossiblyEscaped(ExplodedNode *N, const DeclRefExpr *DR) {
+ const VarDecl *VD = cast<VarDecl>(DR->getDecl()->getCanonicalDecl());
+ assert(VD);
+ // Case 1:
if (VD->hasGlobalStorage())
return true;
- const bool isParm = isa<ParmVarDecl>(VD);
- // Reference parameters are assumed as escaped variables.
- if (isParm && VD->getType()->isReferenceType())
+ const bool IsRefParamOrCapture =
+ isa<ParmVarDecl>(VD) || DR->refersToEnclosingVariableOrCapture();
+ // Case 2:
+ if ((DR->refersToEnclosingVariableOrCapture() &&
+ isCapturedByReference(N, DR)) ||
+ (IsRefParamOrCapture && VD->getType()->isReferenceType()))
return true;
while (!N->pred_empty()) {
@@ -189,6 +227,7 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
// on VD and reference initialized by VD.
ASTContext &ASTCtx =
N->getLocationContext()->getAnalysisDeclContext()->getASTContext();
+ // Case 3 and 4:
auto Match =
match(stmt(anyOf(callByRef(equalsNode(VD)), getAddrTo(equalsNode(VD)),
assignedToRef(equalsNode(VD)))),
@@ -199,8 +238,8 @@ static bool isPossiblyEscaped(const VarDecl *VD, ExplodedNode *N) {
N = N->getFirstPred();
}
- // Parameter declaration will not be found.
- if (isParm)
+ // Reference parameter and reference capture will not be found.
+ if (IsRefParamOrCapture)
return false;
llvm_unreachable("Reached root without finding the declaration of VD");
@@ -218,7 +257,7 @@ bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx,
if (Matches.empty())
return false;
- auto CounterVar = Matches[0].getNodeAs<VarDecl>("initVarName");
+ const auto *CounterVarRef = Matches[0].getNodeAs<DeclRefExpr>("initVarRef");
llvm::APInt BoundNum =
Matches[0].getNodeAs<IntegerLiteral>("boundNum")->getValue();
llvm::APInt InitNum =
@@ -235,7 +274,7 @@ bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx,
maxStep = (BoundNum - InitNum).abs().getZExtValue();
// Check if the counter of the loop is not escaped before.
- return !isPossiblyEscaped(CounterVar->getCanonicalDecl(), Pred);
+ return !isPossiblyEscaped(Pred, CounterVarRef);
}
bool madeNewBranch(ExplodedNode *N, const Stmt *LoopStmt) {
diff --git a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 455adf53ac99..bd725ee9eaa3 100644
--- a/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -28,6 +28,7 @@
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicExtent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
@@ -729,13 +730,6 @@ SourceRange MemRegion::sourceRange() const {
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
-static DefinedOrUnknownSVal getTypeSize(QualType Ty, ASTContext &Ctx,
- SValBuilder &SVB) {
- CharUnits Size = Ctx.getTypeSizeInChars(Ty);
- QualType SizeTy = SVB.getArrayIndexType();
- return SVB.makeIntVal(Size.getQuantity(), SizeTy);
-}
-
DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
SValBuilder &SVB) const {
const auto *SR = cast<SubRegion>(MR);
@@ -766,7 +760,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
if (Ty->isIncompleteType())
return UnknownVal();
- return getTypeSize(Ty, Ctx, SVB);
+ return getElementExtent(Ty, SVB);
}
case MemRegion::FieldRegionKind: {
// Force callers to deal with bitfields explicitly.
@@ -774,7 +768,7 @@ DefinedOrUnknownSVal MemRegionManager::getStaticSize(const MemRegion *MR,
return UnknownVal();
QualType Ty = cast<TypedValueRegion>(SR)->getDesugaredValueType(Ctx);
- DefinedOrUnknownSVal Size = getTypeSize(Ty, Ctx, SVB);
+ DefinedOrUnknownSVal Size = getElementExtent(Ty, SVB);
// A zero-length array at the end of a struct often stands for dynamically
// allocated extra memory.
diff --git a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 35e320c7755f..92104d628711 100644
--- a/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/IssueHash.h"
+#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/PlistSupport.h"
@@ -43,6 +44,7 @@ namespace {
const std::string OutputFile;
const Preprocessor &PP;
const cross_tu::CrossTranslationUnitContext &CTU;
+ const MacroExpansionContext &MacroExpansions;
const bool SupportsCrossFileDiagnostics;
void printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
@@ -52,6 +54,7 @@ namespace {
PlistDiagnostics(PathDiagnosticConsumerOptions DiagOpts,
const std::string &OutputFile, const Preprocessor &PP,
const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions,
bool supportsMultipleFiles);
~PlistDiagnostics() override {}
@@ -80,14 +83,14 @@ class PlistPrinter {
const FIDMap& FM;
const Preprocessor &PP;
const cross_tu::CrossTranslationUnitContext &CTU;
+ const MacroExpansionContext &MacroExpansions;
llvm::SmallVector<const PathDiagnosticMacroPiece *, 0> MacroPieces;
public:
- PlistPrinter(const FIDMap& FM,
- const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU)
- : FM(FM), PP(PP), CTU(CTU) {
- }
+ PlistPrinter(const FIDMap &FM, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions)
+ : FM(FM), PP(PP), CTU(CTU), MacroExpansions(MacroExpansions) {}
void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P) {
ReportPiece(o, P, /*indent*/ 4, /*depth*/ 0, /*includeControlFlow*/ true);
@@ -154,28 +157,17 @@ private:
} // end of anonymous namespace
-namespace {
-
-struct ExpansionInfo {
- std::string MacroName;
- std::string Expansion;
- ExpansionInfo(std::string N, std::string E)
- : MacroName(std::move(N)), Expansion(std::move(E)) {}
-};
-
-} // end of anonymous namespace
-
-/// Print coverage information to output stream {@code o}.
-/// May modify the used list of files {@code Fids} by inserting new ones.
+/// Print coverage information to output stream @c o.
+/// May modify the used list of files @c Fids by inserting new ones.
static void printCoverage(const PathDiagnostic *D,
unsigned InputIndentLevel,
SmallVectorImpl<FileID> &Fids,
FIDMap &FM,
llvm::raw_fd_ostream &o);
-static ExpansionInfo
-getExpandedMacro(SourceLocation MacroLoc, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU);
+static Optional<StringRef> getExpandedMacro(
+ SourceLocation MacroLoc, const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions, const SourceManager &SM);
//===----------------------------------------------------------------------===//
// Methods of PlistPrinter.
@@ -388,7 +380,17 @@ void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
for (const PathDiagnosticMacroPiece *P : MacroPieces) {
const SourceManager &SM = PP.getSourceManager();
- ExpansionInfo EI = getExpandedMacro(P->getLocation().asLocation(), PP, CTU);
+
+ SourceLocation MacroExpansionLoc =
+ P->getLocation().asLocation().getExpansionLoc();
+
+ const Optional<StringRef> MacroName =
+ MacroExpansions.getOriginalText(MacroExpansionLoc);
+ const Optional<StringRef> ExpansionText =
+ getExpandedMacro(MacroExpansionLoc, CTU, MacroExpansions, SM);
+
+ if (!MacroName.hasValue() || !ExpansionText.hasValue())
+ continue;
Indent(o, indent) << "<dict>\n";
++indent;
@@ -405,11 +407,11 @@ void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
// Output the macro name.
Indent(o, indent) << "<key>name</key>";
- EmitString(o, EI.MacroName) << '\n';
+ EmitString(o, MacroName.getValue()) << '\n';
// Output what it expands into.
Indent(o, indent) << "<key>expansion</key>";
- EmitString(o, EI.Expansion) << '\n';
+ EmitString(o, ExpansionText.getValue()) << '\n';
// Finish up.
--indent;
@@ -482,8 +484,8 @@ void PlistPrinter::ReportPopUp(raw_ostream &o,
// Static function definitions.
//===----------------------------------------------------------------------===//
-/// Print coverage information to output stream {@code o}.
-/// May modify the used list of files {@code Fids} by inserting new ones.
+/// Print coverage information to output stream @c o.
+/// May modify the used list of files @c Fids by inserting new ones.
static void printCoverage(const PathDiagnostic *D,
unsigned InputIndentLevel,
SmallVectorImpl<FileID> &Fids,
@@ -522,8 +524,9 @@ static void printCoverage(const PathDiagnostic *D,
PlistDiagnostics::PlistDiagnostics(
PathDiagnosticConsumerOptions DiagOpts, const std::string &output,
const Preprocessor &PP, const cross_tu::CrossTranslationUnitContext &CTU,
- bool supportsMultipleFiles)
+ const MacroExpansionContext &MacroExpansions, bool supportsMultipleFiles)
: DiagOpts(std::move(DiagOpts)), OutputFile(output), PP(PP), CTU(CTU),
+ MacroExpansions(MacroExpansions),
SupportsCrossFileDiagnostics(supportsMultipleFiles) {
// FIXME: Will be used by a later planned change.
(void)this->CTU;
@@ -532,36 +535,40 @@ PlistDiagnostics::PlistDiagnostics(
void ento::createPlistDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputFile, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
// TODO: Emit an error here.
if (OutputFile.empty())
return;
C.push_back(new PlistDiagnostics(DiagOpts, OutputFile, PP, CTU,
+ MacroExpansions,
/*supportsMultipleFiles=*/false));
createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, OutputFile,
- PP, CTU);
+ PP, CTU, MacroExpansions);
}
void ento::createPlistMultiFileDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &OutputFile, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
// TODO: Emit an error here.
if (OutputFile.empty())
return;
C.push_back(new PlistDiagnostics(DiagOpts, OutputFile, PP, CTU,
+ MacroExpansions,
/*supportsMultipleFiles=*/true));
createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, OutputFile,
- PP, CTU);
+ PP, CTU, MacroExpansions);
}
void PlistDiagnostics::printBugPath(llvm::raw_ostream &o, const FIDMap &FM,
const PathPieces &Path) {
- PlistPrinter Printer(FM, PP, CTU);
+ PlistPrinter Printer(FM, PP, CTU, MacroExpansions);
assert(std::is_partitioned(Path.begin(), Path.end(),
[](const PathDiagnosticPieceRef &E) {
return E->getKind() == PathDiagnosticPiece::Note;
@@ -653,7 +660,7 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
// Open the file.
std::error_code EC;
- llvm::raw_fd_ostream o(OutputFile, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream o(OutputFile, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
return;
@@ -815,570 +822,17 @@ void PlistDiagnostics::FlushDiagnosticsImpl(
}
//===----------------------------------------------------------------------===//
-// Declarations of helper functions and data structures for expanding macros.
-//===----------------------------------------------------------------------===//
-
-namespace {
-
-using ArgTokensTy = llvm::SmallVector<Token, 2>;
-
-} // end of anonymous namespace
-
-LLVM_DUMP_METHOD static void dumpArgTokensToStream(llvm::raw_ostream &Out,
- const Preprocessor &PP,
- const ArgTokensTy &Toks);
-
-namespace {
-/// Maps unexpanded macro parameters to expanded arguments. A macro argument may
-/// need to expanded further when it is nested inside another macro.
-class MacroParamMap : public std::map<const IdentifierInfo *, ArgTokensTy> {
-public:
- void expandFromPrevMacro(const MacroParamMap &Super);
-
- LLVM_DUMP_METHOD void dump(const Preprocessor &PP) const {
- dumpToStream(llvm::errs(), PP);
- }
-
- LLVM_DUMP_METHOD void dumpToStream(llvm::raw_ostream &Out,
- const Preprocessor &PP) const;
-};
-
-struct MacroExpansionInfo {
- std::string Name;
- const MacroInfo *MI = nullptr;
- MacroParamMap ParamMap;
-
- MacroExpansionInfo(std::string N, const MacroInfo *MI, MacroParamMap M)
- : Name(std::move(N)), MI(MI), ParamMap(std::move(M)) {}
-};
-
-class TokenPrinter {
- llvm::raw_ostream &OS;
- const Preprocessor &PP;
-
- Token PrevTok, PrevPrevTok;
- TokenConcatenation ConcatInfo;
-
-public:
- TokenPrinter(llvm::raw_ostream &OS, const Preprocessor &PP)
- : OS(OS), PP(PP), ConcatInfo(PP) {
- PrevTok.setKind(tok::unknown);
- PrevPrevTok.setKind(tok::unknown);
- }
-
- void printToken(const Token &Tok);
-};
-
-/// Wrapper around a Lexer object that can lex tokens one-by-one. Its possible
-/// to "inject" a range of tokens into the stream, in which case the next token
-/// is retrieved from the next element of the range, until the end of the range
-/// is reached.
-class TokenStream {
-public:
- TokenStream(SourceLocation ExpanLoc, const SourceManager &SM,
- const LangOptions &LangOpts)
- : ExpanLoc(ExpanLoc) {
- FileID File;
- unsigned Offset;
- std::tie(File, Offset) = SM.getDecomposedLoc(ExpanLoc);
- llvm::MemoryBufferRef MB = SM.getBufferOrFake(File);
- const char *MacroNameTokenPos = MB.getBufferStart() + Offset;
-
- RawLexer = std::make_unique<Lexer>(SM.getLocForStartOfFile(File), LangOpts,
- MB.getBufferStart(), MacroNameTokenPos,
- MB.getBufferEnd());
- }
-
- void next(Token &Result) {
- if (CurrTokenIt == TokenRange.end()) {
- RawLexer->LexFromRawLexer(Result);
- return;
- }
- Result = *CurrTokenIt;
- CurrTokenIt++;
- }
-
- void injectRange(const ArgTokensTy &Range) {
- TokenRange = Range;
- CurrTokenIt = TokenRange.begin();
- }
-
- std::unique_ptr<Lexer> RawLexer;
- ArgTokensTy TokenRange;
- ArgTokensTy::iterator CurrTokenIt = TokenRange.begin();
- SourceLocation ExpanLoc;
-};
-
-} // end of anonymous namespace
-
-/// The implementation method of getMacroExpansion: It prints the expansion of
-/// a macro to \p Printer, and returns with the name of the macro.
-///
-/// Since macros can be nested in one another, this function may call itself
-/// recursively.
-///
-/// Unfortunately, macro arguments have to expanded manually. To understand why,
-/// observe the following example:
-///
-/// #define PRINT(x) print(x)
-/// #define DO_SOMETHING(str) PRINT(str)
-///
-/// DO_SOMETHING("Cute panda cubs.");
-///
-/// As we expand the last line, we'll immediately replace PRINT(str) with
-/// print(x). The information that both 'str' and 'x' refers to the same string
-/// is an information we have to forward, hence the argument \p PrevParamMap.
-///
-/// To avoid infinite recursion we maintain the already processed tokens in
-/// a set. This is carried as a parameter through the recursive calls. The set
-/// is extended with the currently processed token and after processing it, the
-/// token is removed. If the token is already in the set, then recursion stops:
-///
-/// #define f(y) x
-/// #define x f(x)
-static std::string getMacroNameAndPrintExpansion(
- TokenPrinter &Printer, SourceLocation MacroLoc, const Preprocessor &PP,
- const MacroParamMap &PrevParamMap,
- llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens);
-
-/// Retrieves the name of the macro and what it's parameters expand into
-/// at \p ExpanLoc.
-///
-/// For example, for the following macro expansion:
-///
-/// #define SET_TO_NULL(x) x = 0
-/// #define NOT_SUSPICIOUS(a) \
-/// { \
-/// int b = 0; \
-/// } \
-/// SET_TO_NULL(a)
-///
-/// int *ptr = new int(4);
-/// NOT_SUSPICIOUS(&ptr);
-/// *ptr = 5;
-///
-/// When \p ExpanLoc references the last line, the macro name "NOT_SUSPICIOUS"
-/// and the MacroArgMap map { (a, &ptr) } will be returned.
-///
-/// When \p ExpanLoc references "SET_TO_NULL(a)" within the definition of
-/// "NOT_SUSPICOUS", the macro name "SET_TO_NULL" and the MacroArgMap map
-/// { (x, a) } will be returned.
-static MacroExpansionInfo
-getMacroExpansionInfo(const MacroParamMap &PrevParamMap,
- SourceLocation ExpanLoc, const Preprocessor &PP);
-
-/// Retrieves the ')' token that matches '(' \p It points to.
-static MacroInfo::tokens_iterator getMatchingRParen(
- MacroInfo::tokens_iterator It,
- MacroInfo::tokens_iterator End);
-
-/// Retrieves the macro info for \p II refers to at \p Loc. This is important
-/// because macros can be redefined or undefined.
-static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
- const SourceManager &SM,
- const IdentifierInfo *II,
- SourceLocation Loc);
-
-//===----------------------------------------------------------------------===//
// Definitions of helper functions and methods for expanding macros.
//===----------------------------------------------------------------------===//
-static ExpansionInfo
-getExpandedMacro(SourceLocation MacroLoc, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
-
- const Preprocessor *PPToUse = &PP;
- if (auto LocAndUnit = CTU.getImportedFromSourceLocation(MacroLoc)) {
- MacroLoc = LocAndUnit->first;
- PPToUse = &LocAndUnit->second->getPreprocessor();
- }
-
- llvm::SmallString<200> ExpansionBuf;
- llvm::raw_svector_ostream OS(ExpansionBuf);
- TokenPrinter Printer(OS, *PPToUse);
- llvm::SmallPtrSet<IdentifierInfo*, 8> AlreadyProcessedTokens;
-
- std::string MacroName = getMacroNameAndPrintExpansion(
- Printer, MacroLoc, *PPToUse, MacroParamMap{}, AlreadyProcessedTokens);
- return {MacroName, std::string(OS.str())};
-}
-
-static std::string getMacroNameAndPrintExpansion(
- TokenPrinter &Printer, SourceLocation MacroLoc, const Preprocessor &PP,
- const MacroParamMap &PrevParamMap,
- llvm::SmallPtrSet<IdentifierInfo *, 8> &AlreadyProcessedTokens) {
-
- const SourceManager &SM = PP.getSourceManager();
-
- MacroExpansionInfo MExpInfo =
- getMacroExpansionInfo(PrevParamMap, SM.getExpansionLoc(MacroLoc), PP);
- IdentifierInfo *MacroNameII = PP.getIdentifierInfo(MExpInfo.Name);
-
- // TODO: If the macro definition contains another symbol then this function is
- // called recursively. In case this symbol is the one being defined, it will
- // be an infinite recursion which is stopped by this "if" statement. However,
- // in this case we don't get the full expansion text in the Plist file. See
- // the test file where "value" is expanded to "garbage_" instead of
- // "garbage_value".
- if (!AlreadyProcessedTokens.insert(MacroNameII).second)
- return MExpInfo.Name;
-
- if (!MExpInfo.MI)
- return MExpInfo.Name;
-
- // Manually expand its arguments from the previous macro.
- MExpInfo.ParamMap.expandFromPrevMacro(PrevParamMap);
-
- // Iterate over the macro's tokens and stringify them.
- for (auto It = MExpInfo.MI->tokens_begin(), E = MExpInfo.MI->tokens_end();
- It != E; ++It) {
- Token T = *It;
-
- // If this token is not an identifier, we only need to print it.
- if (T.isNot(tok::identifier)) {
- Printer.printToken(T);
- continue;
- }
-
- const auto *II = T.getIdentifierInfo();
- assert(II &&
- "This token is an identifier but has no IdentifierInfo!");
-
- // If this token is a macro that should be expanded inside the current
- // macro.
- if (getMacroInfoForLocation(PP, SM, II, T.getLocation())) {
- getMacroNameAndPrintExpansion(Printer, T.getLocation(), PP,
- MExpInfo.ParamMap, AlreadyProcessedTokens);
-
- // If this is a function-like macro, skip its arguments, as
- // getExpandedMacro() already printed them. If this is the case, let's
- // first jump to the '(' token.
- auto N = std::next(It);
- if (N != E && N->is(tok::l_paren))
- It = getMatchingRParen(++It, E);
- continue;
- }
-
- // If this token is the current macro's argument, we should expand it.
- auto ParamToArgIt = MExpInfo.ParamMap.find(II);
- if (ParamToArgIt != MExpInfo.ParamMap.end()) {
- for (MacroInfo::tokens_iterator ArgIt = ParamToArgIt->second.begin(),
- ArgEnd = ParamToArgIt->second.end();
- ArgIt != ArgEnd; ++ArgIt) {
-
- // These tokens may still be macros, if that is the case, handle it the
- // same way we did above.
- const auto *ArgII = ArgIt->getIdentifierInfo();
- if (!ArgII) {
- Printer.printToken(*ArgIt);
- continue;
- }
-
- const auto *MI = PP.getMacroInfo(ArgII);
- if (!MI) {
- Printer.printToken(*ArgIt);
- continue;
- }
-
- getMacroNameAndPrintExpansion(Printer, ArgIt->getLocation(), PP,
- MExpInfo.ParamMap,
- AlreadyProcessedTokens);
- // Peek the next token if it is a tok::l_paren. This way we can decide
- // if this is the application or just a reference to a function maxro
- // symbol:
- //
- // #define apply(f) ...
- // #define func(x) ...
- // apply(func)
- // apply(func(42))
- auto N = std::next(ArgIt);
- if (N != ArgEnd && N->is(tok::l_paren))
- ArgIt = getMatchingRParen(++ArgIt, ArgEnd);
- }
- continue;
- }
-
- // If control reached here, then this token isn't a macro identifier, nor an
- // unexpanded macro argument that we need to handle, print it.
- Printer.printToken(T);
+static Optional<StringRef>
+getExpandedMacro(SourceLocation MacroExpansionLoc,
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions,
+ const SourceManager &SM) {
+ if (auto CTUMacroExpCtx =
+ CTU.getMacroExpansionContextForSourceLocation(MacroExpansionLoc)) {
+ return CTUMacroExpCtx->getExpandedText(MacroExpansionLoc);
}
-
- AlreadyProcessedTokens.erase(MacroNameII);
-
- return MExpInfo.Name;
-}
-
-static MacroExpansionInfo
-getMacroExpansionInfo(const MacroParamMap &PrevParamMap,
- SourceLocation ExpanLoc, const Preprocessor &PP) {
-
- const SourceManager &SM = PP.getSourceManager();
- const LangOptions &LangOpts = PP.getLangOpts();
-
- // First, we create a Lexer to lex *at the expansion location* the tokens
- // referring to the macro's name and its arguments.
- TokenStream TStream(ExpanLoc, SM, LangOpts);
-
- // Acquire the macro's name.
- Token TheTok;
- TStream.next(TheTok);
-
- std::string MacroName = PP.getSpelling(TheTok);
-
- const auto *II = PP.getIdentifierInfo(MacroName);
- assert(II && "Failed to acquire the IdentifierInfo for the macro!");
-
- const MacroInfo *MI = getMacroInfoForLocation(PP, SM, II, ExpanLoc);
- // assert(MI && "The macro must've been defined at it's expansion location!");
- //
- // We should always be able to obtain the MacroInfo in a given TU, but if
- // we're running the analyzer with CTU, the Preprocessor won't contain the
- // directive history (or anything for that matter) from another TU.
- // TODO: assert when we're not running with CTU.
- if (!MI)
- return { MacroName, MI, {} };
-
- // Acquire the macro's arguments at the expansion point.
- //
- // The rough idea here is to lex from the first left parentheses to the last
- // right parentheses, and map the macro's parameter to what they will be
- // expanded to. A macro argument may contain several token (like '3 + 4'), so
- // we'll lex until we find a tok::comma or tok::r_paren, at which point we
- // start lexing the next argument or finish.
- ArrayRef<const IdentifierInfo *> MacroParams = MI->params();
- if (MacroParams.empty())
- return { MacroName, MI, {} };
-
- TStream.next(TheTok);
- // When this is a token which expands to another macro function then its
- // parentheses are not at its expansion locaiton. For example:
- //
- // #define foo(x) int bar() { return x; }
- // #define apply_zero(f) f(0)
- // apply_zero(foo)
- // ^
- // This is not a tok::l_paren, but foo is a function.
- if (TheTok.isNot(tok::l_paren))
- return { MacroName, MI, {} };
-
- MacroParamMap ParamMap;
-
- // When the argument is a function call, like
- // CALL_FN(someFunctionName(param1, param2))
- // we will find tok::l_paren, tok::r_paren, and tok::comma that do not divide
- // actual macro arguments, or do not represent the macro argument's closing
- // parentheses, so we'll count how many parentheses aren't closed yet.
- // If ParanthesesDepth
- // * = 0, then there are no more arguments to lex.
- // * = 1, then if we find a tok::comma, we can start lexing the next arg.
- // * > 1, then tok::comma is a part of the current arg.
- int ParenthesesDepth = 1;
-
- // If we encounter the variadic arg, we will lex until the closing
- // tok::r_paren, even if we lex a tok::comma and ParanthesesDepth == 1.
- const IdentifierInfo *VariadicParamII = PP.getIdentifierInfo("__VA_ARGS__");
- if (MI->isGNUVarargs()) {
- // If macro uses GNU-style variadic args, the param name is user-supplied,
- // an not "__VA_ARGS__". E.g.:
- // #define FOO(a, b, myvargs...)
- // In this case, just use the last parameter:
- VariadicParamII = *(MacroParams.rbegin());
- }
-
- for (const IdentifierInfo *CurrParamII : MacroParams) {
- MacroParamMap::mapped_type ArgTokens;
-
- // One could also simply not supply a single argument to __VA_ARGS__ -- this
- // results in a preprocessor warning, but is not an error:
- // #define VARIADIC(ptr, ...) \
- // someVariadicTemplateFunction(__VA_ARGS__)
- //
- // int *ptr;
- // VARIADIC(ptr); // Note that there are no commas, this isn't just an
- // // empty parameter -- there are no parameters for '...'.
- // In any other case, ParenthesesDepth mustn't be 0 here.
- if (ParenthesesDepth != 0) {
-
- // Lex the first token of the next macro parameter.
- TStream.next(TheTok);
-
- while (CurrParamII == VariadicParamII || ParenthesesDepth != 1 ||
- !TheTok.is(tok::comma)) {
- assert(TheTok.isNot(tok::eof) &&
- "EOF encountered while looking for expanded macro args!");
-
- if (TheTok.is(tok::l_paren))
- ++ParenthesesDepth;
-
- if (TheTok.is(tok::r_paren))
- --ParenthesesDepth;
-
- if (ParenthesesDepth == 0)
- break;
-
- if (TheTok.is(tok::raw_identifier)) {
- PP.LookUpIdentifierInfo(TheTok);
- // This token is a variadic parameter:
- //
- // #define PARAMS_RESOLVE_TO_VA_ARGS(i, fmt) foo(i, fmt); \
- // i = 0;
- // #define DISPATCH(...) \
- // PARAMS_RESOLVE_TO_VA_ARGS(__VA_ARGS__);
- // // ^~~~~~~~~~~ Variadic parameter here
- //
- // void multipleParamsResolveToVA_ARGS(void) {
- // int x = 1;
- // DISPATCH(x, "LF1M healer"); // Multiple arguments are mapped to
- // // a single __VA_ARGS__ parameter.
- // (void)(10 / x);
- // }
- //
- // We will stumble across this while trying to expand
- // PARAMS_RESOLVE_TO_VA_ARGS. By this point, we already noted during
- // the processing of DISPATCH what __VA_ARGS__ maps to, so we'll
- // retrieve the next series of tokens from that.
- if (TheTok.getIdentifierInfo() == VariadicParamII) {
- TStream.injectRange(PrevParamMap.at(VariadicParamII));
- TStream.next(TheTok);
- continue;
- }
- }
-
- ArgTokens.push_back(TheTok);
- TStream.next(TheTok);
- }
- } else {
- assert(CurrParamII == VariadicParamII &&
- "No more macro arguments are found, but the current parameter "
- "isn't the variadic arg!");
- }
-
- ParamMap.emplace(CurrParamII, std::move(ArgTokens));
- }
-
- assert(TheTok.is(tok::r_paren) &&
- "Expanded macro argument acquisition failed! After the end of the loop"
- " this token should be ')'!");
-
- return {MacroName, MI, ParamMap};
-}
-
-static MacroInfo::tokens_iterator getMatchingRParen(
- MacroInfo::tokens_iterator It,
- MacroInfo::tokens_iterator End) {
-
- assert(It->is(tok::l_paren) && "This token should be '('!");
-
- // Skip until we find the closing ')'.
- int ParenthesesDepth = 1;
- while (ParenthesesDepth != 0) {
- ++It;
-
- assert(It->isNot(tok::eof) &&
- "Encountered EOF while attempting to skip macro arguments!");
- assert(It != End &&
- "End of the macro definition reached before finding ')'!");
-
- if (It->is(tok::l_paren))
- ++ParenthesesDepth;
-
- if (It->is(tok::r_paren))
- --ParenthesesDepth;
- }
- return It;
-}
-
-static const MacroInfo *getMacroInfoForLocation(const Preprocessor &PP,
- const SourceManager &SM,
- const IdentifierInfo *II,
- SourceLocation Loc) {
-
- const MacroDirective *MD = PP.getLocalMacroDirectiveHistory(II);
- if (!MD)
- return nullptr;
-
- return MD->findDirectiveAtLoc(Loc, SM).getMacroInfo();
-}
-
-void MacroParamMap::expandFromPrevMacro(const MacroParamMap &Super) {
-
- for (value_type &Pair : *this) {
- ArgTokensTy &CurrArgTokens = Pair.second;
-
- // For each token in the expanded macro argument.
- auto It = CurrArgTokens.begin();
- while (It != CurrArgTokens.end()) {
- if (It->isNot(tok::identifier)) {
- ++It;
- continue;
- }
-
- const auto *II = It->getIdentifierInfo();
- assert(II);
-
- // Is this an argument that "Super" expands further?
- if (!Super.count(II)) {
- ++It;
- continue;
- }
-
- const ArgTokensTy &SuperArgTokens = Super.at(II);
-
- It = CurrArgTokens.insert(It, SuperArgTokens.begin(),
- SuperArgTokens.end());
- std::advance(It, SuperArgTokens.size());
- It = CurrArgTokens.erase(It);
- }
- }
-}
-
-void MacroParamMap::dumpToStream(llvm::raw_ostream &Out,
- const Preprocessor &PP) const {
- for (const std::pair<const IdentifierInfo *, ArgTokensTy> Pair : *this) {
- Out << Pair.first->getName() << " -> ";
- dumpArgTokensToStream(Out, PP, Pair.second);
- Out << '\n';
- }
-}
-
-static void dumpArgTokensToStream(llvm::raw_ostream &Out,
- const Preprocessor &PP,
- const ArgTokensTy &Toks) {
- TokenPrinter Printer(Out, PP);
- for (Token Tok : Toks)
- Printer.printToken(Tok);
-}
-
-void TokenPrinter::printToken(const Token &Tok) {
- // TODO: Handle GNU extensions where hash and hashhash occurs right before
- // __VA_ARGS__.
- // cppreference.com: "some compilers offer an extension that allows ## to
- // appear after a comma and before __VA_ARGS__, in which case the ## does
- // nothing when the variable arguments are present, but removes the comma when
- // the variable arguments are not present: this makes it possible to define
- // macros such as fprintf (stderr, format, ##__VA_ARGS__)"
- // FIXME: Handle named variadic macro parameters (also a GNU extension).
-
- // If this is the first token to be printed, don't print space.
- if (PrevTok.isNot(tok::unknown)) {
- // If the tokens were already space separated, or if they must be to avoid
- // them being implicitly pasted, add a space between them.
- if(Tok.hasLeadingSpace() || ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok,
- Tok)) {
- // AvoidConcat doesn't check for ##, don't print a space around it.
- if (PrevTok.isNot(tok::hashhash) && Tok.isNot(tok::hashhash)) {
- OS << ' ';
- }
- }
- }
-
- if (!Tok.isOneOf(tok::hash, tok::hashhash)) {
- if (PrevTok.is(tok::hash))
- OS << '\"' << PP.getSpelling(Tok) << '\"';
- else
- OS << PP.getSpelling(Tok);
- }
-
- PrevPrevTok = PrevTok;
- PrevTok = Tok;
+ return MacroExpansions.getExpandedText(MacroExpansionLoc);
}
diff --git a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index a481bde1651b..69554576bdb2 100644
--- a/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -19,7 +19,13 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <iterator>
using namespace clang;
using namespace ento;
@@ -97,47 +103,63 @@ public:
return CmpOpTable[getIndexFromOp(CurrentOP)][CmpOpCount];
}
};
+
//===----------------------------------------------------------------------===//
// RangeSet implementation
//===----------------------------------------------------------------------===//
-void RangeSet::IntersectInRange(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Lower,
- const llvm::APSInt &Upper,
- PrimRangeSet &newRanges,
- PrimRangeSet::iterator &i,
- PrimRangeSet::iterator &e) const {
- // There are six cases for each range R in the set:
- // 1. R is entirely before the intersection range.
- // 2. R is entirely after the intersection range.
- // 3. R contains the entire intersection range.
- // 4. R starts before the intersection range and ends in the middle.
- // 5. R starts in the middle of the intersection range and ends after it.
- // 6. R is entirely contained in the intersection range.
- // These correspond to each of the conditions below.
- for (/* i = begin(), e = end() */; i != e; ++i) {
- if (i->To() < Lower) {
- continue;
- }
- if (i->From() > Upper) {
- break;
- }
+RangeSet::ContainerType RangeSet::Factory::EmptySet{};
- if (i->Includes(Lower)) {
- if (i->Includes(Upper)) {
- newRanges =
- F.add(newRanges, Range(BV.getValue(Lower), BV.getValue(Upper)));
- break;
- } else
- newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
- } else {
- if (i->Includes(Upper)) {
- newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
- break;
- } else
- newRanges = F.add(newRanges, *i);
- }
+RangeSet RangeSet::Factory::add(RangeSet Original, Range Element) {
+ ContainerType Result;
+ Result.reserve(Original.size() + 1);
+
+ const_iterator Lower = llvm::lower_bound(Original, Element);
+ Result.insert(Result.end(), Original.begin(), Lower);
+ Result.push_back(Element);
+ Result.insert(Result.end(), Lower, Original.end());
+
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::add(RangeSet Original, const llvm::APSInt &Point) {
+ return add(Original, Range(Point));
+}
+
+RangeSet RangeSet::Factory::getRangeSet(Range From) {
+ ContainerType Result;
+ Result.push_back(From);
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::makePersistent(ContainerType &&From) {
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+
+ From.Profile(ID);
+ ContainerType *Result = Cache.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Result) {
+ // It is cheaper to fully construct the resulting range on stack
+ // and move it to the freshly allocated buffer if we don't have
+ // a set like this already.
+ Result = construct(std::move(From));
+ Cache.InsertNode(Result, InsertPos);
}
+
+ return Result;
+}
+
+RangeSet::ContainerType *RangeSet::Factory::construct(ContainerType &&From) {
+ void *Buffer = Arena.Allocate();
+ return new (Buffer) ContainerType(std::move(From));
+}
+
+RangeSet RangeSet::Factory::add(RangeSet LHS, RangeSet RHS) {
+ ContainerType Result;
+ std::merge(LHS.begin(), LHS.end(), RHS.begin(), RHS.end(),
+ std::back_inserter(Result));
+ return makePersistent(std::move(Result));
}
const llvm::APSInt &RangeSet::getMinValue() const {
@@ -147,22 +169,31 @@ const llvm::APSInt &RangeSet::getMinValue() const {
const llvm::APSInt &RangeSet::getMaxValue() const {
assert(!isEmpty());
- // NOTE: It's a shame that we can't implement 'getMaxValue' without scanning
- // the whole tree to get to the last element.
- // llvm::ImmutableSet should support decrement for 'end' iterators
- // or reverse order iteration.
- auto It = begin();
- for (auto End = end(); std::next(It) != End; ++It) {
- }
- return It->To();
+ return std::prev(end())->To();
}
-bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
- if (isEmpty()) {
- // This range is already infeasible.
+bool RangeSet::containsImpl(llvm::APSInt &Point) const {
+ if (isEmpty() || !pin(Point))
+ return false;
+
+ Range Dummy(Point);
+ const_iterator It = llvm::upper_bound(*this, Dummy);
+ if (It == begin())
return false;
- }
+ return std::prev(It)->Includes(Point);
+}
+
+bool RangeSet::pin(llvm::APSInt &Point) const {
+ APSIntType Type(getMinValue());
+ if (Type.testInRange(Point, true) != APSIntType::RTR_Within)
+ return false;
+
+ Type.apply(Point);
+ return true;
+}
+
+bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
// This function has nine cases, the cartesian product of range-testing
// both the upper and lower bounds against the symbol's type.
// Each case requires a different pinning operation.
@@ -243,129 +274,216 @@ bool RangeSet::pin(llvm::APSInt &Lower, llvm::APSInt &Upper) const {
return true;
}
-// Returns a set containing the values in the receiving set, intersected with
-// the closed range [Lower, Upper]. Unlike the Range type, this range uses
-// modular arithmetic, corresponding to the common treatment of C integer
-// overflow. Thus, if the Lower bound is greater than the Upper bound, the
-// range is taken to wrap around. This is equivalent to taking the
-// intersection with the two ranges [Min, Upper] and [Lower, Max],
-// or, alternatively, /removing/ all integers between Upper and Lower.
-RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
- llvm::APSInt Lower, llvm::APSInt Upper) const {
- PrimRangeSet newRanges = F.getEmptySet();
-
- if (isEmpty() || !pin(Lower, Upper))
- return newRanges;
-
- PrimRangeSet::iterator i = begin(), e = end();
- if (Lower <= Upper)
- IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
- else {
- // The order of the next two statements is important!
- // IntersectInRange() does not reset the iteration state for i and e.
- // Therefore, the lower range most be handled first.
- IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
- IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
- }
-
- return newRanges;
-}
-
-// Returns a set containing the values in the receiving set, intersected with
-// the range set passed as parameter.
-RangeSet RangeSet::Intersect(BasicValueFactory &BV, Factory &F,
- const RangeSet &Other) const {
- PrimRangeSet newRanges = F.getEmptySet();
-
- for (iterator i = Other.begin(), e = Other.end(); i != e; ++i) {
- RangeSet newPiece = Intersect(BV, F, i->From(), i->To());
- for (iterator j = newPiece.begin(), ee = newPiece.end(); j != ee; ++j) {
- newRanges = F.add(newRanges, *j);
- }
+RangeSet RangeSet::Factory::intersect(RangeSet What, llvm::APSInt Lower,
+ llvm::APSInt Upper) {
+ if (What.isEmpty() || !What.pin(Lower, Upper))
+ return getEmptySet();
+
+ ContainerType DummyContainer;
+
+ if (Lower <= Upper) {
+ // [Lower, Upper] is a regular range.
+ //
+ // Shortcut: check that there is even a possibility of the intersection
+ // by checking the two following situations:
+ //
+ // <---[ What ]---[------]------>
+ // Lower Upper
+ // -or-
+ // <----[------]----[ What ]---->
+ // Lower Upper
+ if (What.getMaxValue() < Lower || Upper < What.getMinValue())
+ return getEmptySet();
+
+ DummyContainer.push_back(
+ Range(ValueFactory.getValue(Lower), ValueFactory.getValue(Upper)));
+ } else {
+ // [Lower, Upper] is an inverted range, i.e. [MIN, Upper] U [Lower, MAX]
+ //
+ // Shortcut: check that there is even a possibility of the intersection
+ // by checking the following situation:
+ //
+ // <------]---[ What ]---[------>
+ // Upper Lower
+ if (What.getMaxValue() < Lower && Upper < What.getMinValue())
+ return getEmptySet();
+
+ DummyContainer.push_back(
+ Range(ValueFactory.getMinValue(Upper), ValueFactory.getValue(Upper)));
+ DummyContainer.push_back(
+ Range(ValueFactory.getValue(Lower), ValueFactory.getMaxValue(Lower)));
}
- return newRanges;
+ return intersect(*What.Impl, DummyContainer);
}
-// Turn all [A, B] ranges to [-B, -A], when "-" is a C-like unary minus
-// operation under the values of the type.
-//
-// We also handle MIN because applying unary minus to MIN does not change it.
-// Example 1:
-// char x = -128; // -128 is a MIN value in a range of 'char'
-// char y = -x; // y: -128
-// Example 2:
-// unsigned char x = 0; // 0 is a MIN value in a range of 'unsigned char'
-// unsigned char y = -x; // y: 0
-//
-// And it makes us to separate the range
-// like [MIN, N] to [MIN, MIN] U [-N,MAX].
-// For instance, whole range is {-128..127} and subrange is [-128,-126],
-// thus [-128,-127,-126,.....] negates to [-128,.....,126,127].
-//
-// Negate restores disrupted ranges on bounds,
-// e.g. [MIN, B] => [MIN, MIN] U [-B, MAX] => [MIN, B].
-RangeSet RangeSet::Negate(BasicValueFactory &BV, Factory &F) const {
- PrimRangeSet newRanges = F.getEmptySet();
+RangeSet RangeSet::Factory::intersect(const RangeSet::ContainerType &LHS,
+ const RangeSet::ContainerType &RHS) {
+ ContainerType Result;
+ Result.reserve(std::max(LHS.size(), RHS.size()));
+
+ const_iterator First = LHS.begin(), Second = RHS.begin(),
+ FirstEnd = LHS.end(), SecondEnd = RHS.end();
+
+ const auto SwapIterators = [&First, &FirstEnd, &Second, &SecondEnd]() {
+ std::swap(First, Second);
+ std::swap(FirstEnd, SecondEnd);
+ };
+
+ // If we ran out of ranges in one set, but not in the other,
+ // it means that those elements are definitely not in the
+ // intersection.
+ while (First != FirstEnd && Second != SecondEnd) {
+ // We want to keep the following invariant at all times:
+ //
+ // ----[ First ---------------------->
+ // --------[ Second ----------------->
+ if (Second->From() < First->From())
+ SwapIterators();
+
+ // Loop where the invariant holds:
+ do {
+ // Check for the following situation:
+ //
+ // ----[ First ]--------------------->
+ // ---------------[ Second ]--------->
+ //
+ // which means that...
+ if (Second->From() > First->To()) {
+ // ...First is not in the intersection.
+ //
+ // We should move on to the next range after First and break out of the
+ // loop because the invariant might not be true.
+ ++First;
+ break;
+ }
- if (isEmpty())
- return newRanges;
+ // We have a guaranteed intersection at this point!
+ // And this is the current situation:
+ //
+ // ----[ First ]----------------->
+ // -------[ Second ------------------>
+ //
+ // Additionally, it definitely starts with Second->From().
+ const llvm::APSInt &IntersectionStart = Second->From();
+
+ // It is important to know which of the two ranges' ends
+ // is greater. That "longer" range might have some other
+ // intersections, while the "shorter" range might not.
+ if (Second->To() > First->To()) {
+ // Here we make a decision to keep First as the "longer"
+ // range.
+ SwapIterators();
+ }
- const llvm::APSInt sampleValue = getMinValue();
- const llvm::APSInt &MIN = BV.getMinValue(sampleValue);
- const llvm::APSInt &MAX = BV.getMaxValue(sampleValue);
+ // At this point, we have the following situation:
+ //
+ // ---- First ]-------------------->
+ // ---- Second ]--[ Second+1 ---------->
+ //
+ // We don't know the relationship between First->From and
+ // Second->From and we don't know whether Second+1 intersects
+ // with First.
+ //
+ // However, we know that [IntersectionStart, Second->To] is
+ // a part of the intersection...
+ Result.push_back(Range(IntersectionStart, Second->To()));
+ ++Second;
+ // ...and that the invariant will hold for a valid Second+1
+ // because First->From <= Second->To < (Second+1)->From.
+ } while (Second != SecondEnd);
+ }
+
+ if (Result.empty())
+ return getEmptySet();
+
+ return makePersistent(std::move(Result));
+}
+
+RangeSet RangeSet::Factory::intersect(RangeSet LHS, RangeSet RHS) {
+ // Shortcut: let's see if the intersection is even possible.
+ if (LHS.isEmpty() || RHS.isEmpty() || LHS.getMaxValue() < RHS.getMinValue() ||
+ RHS.getMaxValue() < LHS.getMinValue())
+ return getEmptySet();
+
+ return intersect(*LHS.Impl, *RHS.Impl);
+}
+
+RangeSet RangeSet::Factory::intersect(RangeSet LHS, llvm::APSInt Point) {
+ if (LHS.containsImpl(Point))
+ return getRangeSet(ValueFactory.getValue(Point));
+
+ return getEmptySet();
+}
+
+RangeSet RangeSet::Factory::negate(RangeSet What) {
+ if (What.isEmpty())
+ return getEmptySet();
+
+ const llvm::APSInt SampleValue = What.getMinValue();
+ const llvm::APSInt &MIN = ValueFactory.getMinValue(SampleValue);
+ const llvm::APSInt &MAX = ValueFactory.getMaxValue(SampleValue);
+
+ ContainerType Result;
+ Result.reserve(What.size() + (SampleValue == MIN));
// Handle a special case for MIN value.
- iterator i = begin();
- const llvm::APSInt &from = i->From();
- const llvm::APSInt &to = i->To();
- if (from == MIN) {
- // If [from, to] are [MIN, MAX], then just return the same [MIN, MAX].
- if (to == MAX) {
- newRanges = ranges;
+ const_iterator It = What.begin();
+ const_iterator End = What.end();
+
+ const llvm::APSInt &From = It->From();
+ const llvm::APSInt &To = It->To();
+
+ if (From == MIN) {
+ // If the range [From, To] is [MIN, MAX], then result is also [MIN, MAX].
+ if (To == MAX) {
+ return What;
+ }
+
+ const_iterator Last = std::prev(End);
+
+ // Try to find and unite the following ranges:
+ // [MIN, MIN] & [MIN + 1, N] => [MIN, N].
+ if (Last->To() == MAX) {
+ // It means that in the original range we have ranges
+ // [MIN, A], ... , [B, MAX]
+ // And the result should be [MIN, -B], ..., [-A, MAX]
+ Result.emplace_back(MIN, ValueFactory.getValue(-Last->From()));
+ // We already negated Last, so we can skip it.
+ End = Last;
} else {
- // Add separate range for the lowest value.
- newRanges = F.add(newRanges, Range(MIN, MIN));
- // Skip adding the second range in case when [from, to] are [MIN, MIN].
- if (to != MIN) {
- newRanges = F.add(newRanges, Range(BV.getValue(-to), MAX));
- }
+ // Add a separate range for the lowest value.
+ Result.emplace_back(MIN, MIN);
}
+
+ // Skip adding the second range in case when [From, To] are [MIN, MIN].
+ if (To != MIN) {
+ Result.emplace_back(ValueFactory.getValue(-To), MAX);
+ }
+
// Skip the first range in the loop.
- ++i;
+ ++It;
}
// Negate all other ranges.
- for (iterator e = end(); i != e; ++i) {
+ for (; It != End; ++It) {
// Negate int values.
- const llvm::APSInt &newFrom = BV.getValue(-i->To());
- const llvm::APSInt &newTo = BV.getValue(-i->From());
- // Add a negated range.
- newRanges = F.add(newRanges, Range(newFrom, newTo));
- }
+ const llvm::APSInt &NewFrom = ValueFactory.getValue(-It->To());
+ const llvm::APSInt &NewTo = ValueFactory.getValue(-It->From());
- if (newRanges.isSingleton())
- return newRanges;
-
- // Try to find and unite next ranges:
- // [MIN, MIN] & [MIN + 1, N] => [MIN, N].
- iterator iter1 = newRanges.begin();
- iterator iter2 = std::next(iter1);
-
- if (iter1->To() == MIN && (iter2->From() - 1) == MIN) {
- const llvm::APSInt &to = iter2->To();
- // remove adjacent ranges
- newRanges = F.remove(newRanges, *iter1);
- newRanges = F.remove(newRanges, *newRanges.begin());
- // add united range
- newRanges = F.add(newRanges, Range(MIN, to));
+ // Add a negated range.
+ Result.emplace_back(NewFrom, NewTo);
}
- return newRanges;
+ llvm::sort(Result);
+ return makePersistent(std::move(Result));
}
-RangeSet RangeSet::Delete(BasicValueFactory &BV, Factory &F,
- const llvm::APSInt &Point) const {
+RangeSet RangeSet::Factory::deletePoint(RangeSet From,
+ const llvm::APSInt &Point) {
+ if (!From.contains(Point))
+ return From;
+
llvm::APSInt Upper = Point;
llvm::APSInt Lower = Point;
@@ -373,22 +491,17 @@ RangeSet RangeSet::Delete(BasicValueFactory &BV, Factory &F,
--Lower;
// Notice that the lower bound is greater than the upper bound.
- return Intersect(BV, F, Upper, Lower);
+ return intersect(From, Upper, Lower);
}
-void RangeSet::print(raw_ostream &os) const {
- bool isFirst = true;
- os << "{ ";
- for (iterator i = begin(), e = end(); i != e; ++i) {
- if (isFirst)
- isFirst = false;
- else
- os << ", ";
+void Range::dump(raw_ostream &OS) const {
+ OS << '[' << toString(From(), 10) << ", " << toString(To(), 10) << ']';
+}
- os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
- << ']';
- }
- os << " }";
+void RangeSet::dump(raw_ostream &OS) const {
+ OS << "{ ";
+ llvm::interleaveComma(*this, OS, [&OS](const Range &R) { R.dump(OS); });
+ OS << " }";
}
REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(SymbolSet, SymbolRef)
@@ -436,33 +549,43 @@ public:
SymbolRef Sym);
/// Merge classes for the given symbols and return a new state.
- LLVM_NODISCARD static inline ProgramStateRef
- merge(BasicValueFactory &BV, RangeSet::Factory &F, ProgramStateRef State,
- SymbolRef First, SymbolRef Second);
+ LLVM_NODISCARD static inline ProgramStateRef merge(RangeSet::Factory &F,
+ ProgramStateRef State,
+ SymbolRef First,
+ SymbolRef Second);
// Merge this class with the given class and return a new state.
- LLVM_NODISCARD inline ProgramStateRef merge(BasicValueFactory &BV,
- RangeSet::Factory &F,
- ProgramStateRef State,
- EquivalenceClass Other);
+ LLVM_NODISCARD inline ProgramStateRef
+ merge(RangeSet::Factory &F, ProgramStateRef State, EquivalenceClass Other);
/// Return a set of class members for the given state.
- LLVM_NODISCARD inline SymbolSet getClassMembers(ProgramStateRef State);
+ LLVM_NODISCARD inline SymbolSet getClassMembers(ProgramStateRef State) const;
+
/// Return true if the current class is trivial in the given state.
- LLVM_NODISCARD inline bool isTrivial(ProgramStateRef State);
+ /// A class is trivial if and only if there is not any member relations stored
+ /// to it in State/ClassMembers.
+ /// An equivalence class with one member might seem as it does not hold any
+ /// meaningful information, i.e. that is a tautology. However, during the
+ /// removal of dead symbols we do not remove classes with one member for
+ /// resource and performance reasons. Consequently, a class with one member is
+ /// not necessarily trivial. It could happen that we have a class with two
+ /// members and then during the removal of dead symbols we remove one of its
+ /// members. In this case, the class is still non-trivial (it still has the
+ /// mappings in ClassMembers), even though it has only one member.
+ LLVM_NODISCARD inline bool isTrivial(ProgramStateRef State) const;
+
/// Return true if the current class is trivial and its only member is dead.
LLVM_NODISCARD inline bool isTriviallyDead(ProgramStateRef State,
- SymbolReaper &Reaper);
+ SymbolReaper &Reaper) const;
LLVM_NODISCARD static inline ProgramStateRef
- markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, SymbolRef First, SymbolRef Second);
+ markDisequal(RangeSet::Factory &F, ProgramStateRef State, SymbolRef First,
+ SymbolRef Second);
LLVM_NODISCARD static inline ProgramStateRef
- markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, EquivalenceClass First,
- EquivalenceClass Second);
+ markDisequal(RangeSet::Factory &F, ProgramStateRef State,
+ EquivalenceClass First, EquivalenceClass Second);
LLVM_NODISCARD inline ProgramStateRef
- markDisequal(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, EquivalenceClass Other) const;
+ markDisequal(RangeSet::Factory &F, ProgramStateRef State,
+ EquivalenceClass Other) const;
LLVM_NODISCARD static inline ClassSet
getDisequalClasses(ProgramStateRef State, SymbolRef Sym);
LLVM_NODISCARD inline ClassSet
@@ -470,9 +593,23 @@ public:
LLVM_NODISCARD inline ClassSet
getDisequalClasses(DisequalityMapTy Map, ClassSet::Factory &Factory) const;
+ LLVM_NODISCARD static inline Optional<bool> areEqual(ProgramStateRef State,
+ EquivalenceClass First,
+ EquivalenceClass Second);
LLVM_NODISCARD static inline Optional<bool>
areEqual(ProgramStateRef State, SymbolRef First, SymbolRef Second);
+ /// Iterate over all symbols and try to simplify them.
+ LLVM_NODISCARD static inline ProgramStateRef simplify(SValBuilder &SVB,
+ RangeSet::Factory &F,
+ ProgramStateRef State,
+ EquivalenceClass Class);
+
+ void dumpToStream(ProgramStateRef State, raw_ostream &os) const;
+ LLVM_DUMP_METHOD void dump(ProgramStateRef State) const {
+ dumpToStream(State, llvm::errs());
+ }
+
/// Check equivalence data for consistency.
LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED static bool
isClassDataConsistent(ProgramStateRef State);
@@ -515,15 +652,13 @@ private:
}
static inline SymbolSet::Factory &getMembersFactory(ProgramStateRef State);
- inline ProgramStateRef mergeImpl(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, SymbolSet Members,
- EquivalenceClass Other,
+ inline ProgramStateRef mergeImpl(RangeSet::Factory &F, ProgramStateRef State,
+ SymbolSet Members, EquivalenceClass Other,
SymbolSet OtherMembers);
- static inline void
+ static inline bool
addToDisequalityInfo(DisequalityMapTy &Info, ConstraintRangeTy &Constraints,
- BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, EquivalenceClass First,
- EquivalenceClass Second);
+ RangeSet::Factory &F, ProgramStateRef State,
+ EquivalenceClass First, EquivalenceClass Second);
/// This is a unique identifier of the class.
uintptr_t ID;
@@ -533,6 +668,15 @@ private:
// Constraint functions
//===----------------------------------------------------------------------===//
+LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED bool
+areFeasible(ConstraintRangeTy Constraints) {
+ return llvm::none_of(
+ Constraints,
+ [](const std::pair<EquivalenceClass, RangeSet> &ClassConstraint) {
+ return ClassConstraint.second.isEmpty();
+ });
+}
+
LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
EquivalenceClass Class) {
return State->get<ConstraintRange>(Class);
@@ -543,70 +687,52 @@ LLVM_NODISCARD inline const RangeSet *getConstraint(ProgramStateRef State,
return getConstraint(State, EquivalenceClass::find(State, Sym));
}
+LLVM_NODISCARD ProgramStateRef setConstraint(ProgramStateRef State,
+ EquivalenceClass Class,
+ RangeSet Constraint) {
+ return State->set<ConstraintRange>(Class, Constraint);
+}
+
+LLVM_NODISCARD ProgramStateRef setConstraints(ProgramStateRef State,
+ ConstraintRangeTy Constraints) {
+ return State->set<ConstraintRange>(Constraints);
+}
+
//===----------------------------------------------------------------------===//
// Equality/diseqiality abstraction
//===----------------------------------------------------------------------===//
-/// A small helper structure representing symbolic equality.
+/// A small helper function for detecting symbolic (dis)equality.
///
/// Equality check can have different forms (like a == b or a - b) and this
/// class encapsulates those away if the only thing the user wants to check -
-/// whether it's equality/diseqiality or not and have an easy access to the
-/// compared symbols.
-struct EqualityInfo {
-public:
- SymbolRef Left, Right;
- // true for equality and false for disequality.
- bool IsEquality = true;
-
- void invert() { IsEquality = !IsEquality; }
- /// Extract equality information from the given symbol and the constants.
- ///
- /// This function assumes the following expression Sym + Adjustment != Int.
- /// It is a default because the most widespread case of the equality check
- /// is (A == B) + 0 != 0.
- static Optional<EqualityInfo> extract(SymbolRef Sym, const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
- // As of now, the only equality form supported is Sym + 0 != 0.
- if (!Int.isNullValue() || !Adjustment.isNullValue())
- return llvm::None;
-
- return extract(Sym);
- }
- /// Extract equality information from the given symbol.
- static Optional<EqualityInfo> extract(SymbolRef Sym) {
- return EqualityExtractor().Visit(Sym);
+/// whether it's equality/diseqiality or not.
+///
+/// \returns true if assuming this Sym to be true means equality of operands
+/// false if it means disequality of operands
+/// None otherwise
+Optional<bool> meansEquality(const SymSymExpr *Sym) {
+ switch (Sym->getOpcode()) {
+ case BO_Sub:
+ // This case is: A - B != 0 -> disequality check.
+ return false;
+ case BO_EQ:
+ // This case is: A == B != 0 -> equality check.
+ return true;
+ case BO_NE:
+ // This case is: A != B != 0 -> diseqiality check.
+ return false;
+ default:
+ return llvm::None;
}
-
-private:
- class EqualityExtractor
- : public SymExprVisitor<EqualityExtractor, Optional<EqualityInfo>> {
- public:
- Optional<EqualityInfo> VisitSymSymExpr(const SymSymExpr *Sym) const {
- switch (Sym->getOpcode()) {
- case BO_Sub:
- // This case is: A - B != 0 -> disequality check.
- return EqualityInfo{Sym->getLHS(), Sym->getRHS(), false};
- case BO_EQ:
- // This case is: A == B != 0 -> equality check.
- return EqualityInfo{Sym->getLHS(), Sym->getRHS(), true};
- case BO_NE:
- // This case is: A != B != 0 -> diseqiality check.
- return EqualityInfo{Sym->getLHS(), Sym->getRHS(), false};
- default:
- return llvm::None;
- }
- }
- };
-};
+}
//===----------------------------------------------------------------------===//
// Intersection functions
//===----------------------------------------------------------------------===//
template <class SecondTy, class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
- RangeSet::Factory &F, RangeSet Head,
+LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
SecondTy Second, RestTy... Tail);
template <class... RangeTy> struct IntersectionTraits;
@@ -629,15 +755,14 @@ struct IntersectionTraits<OptionalOrPointer, TailTy...> {
};
template <class EndTy>
-LLVM_NODISCARD inline EndTy intersect(BasicValueFactory &BV,
- RangeSet::Factory &F, EndTy End) {
+LLVM_NODISCARD inline EndTy intersect(RangeSet::Factory &F, EndTy End) {
// If the list contains only RangeSet or Optional<RangeSet>, simply return
// that range set.
return End;
}
LLVM_NODISCARD LLVM_ATTRIBUTE_UNUSED inline Optional<RangeSet>
-intersect(BasicValueFactory &BV, RangeSet::Factory &F, const RangeSet *End) {
+intersect(RangeSet::Factory &F, const RangeSet *End) {
// This is an extraneous conversion from a raw pointer into Optional<RangeSet>
if (End) {
return *End;
@@ -646,25 +771,23 @@ intersect(BasicValueFactory &BV, RangeSet::Factory &F, const RangeSet *End) {
}
template <class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
- RangeSet::Factory &F, RangeSet Head,
+LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
RangeSet Second, RestTy... Tail) {
// Here we call either the <RangeSet,RangeSet,...> or <RangeSet,...> version
// of the function and can be sure that the result is RangeSet.
- return intersect(BV, F, Head.Intersect(BV, F, Second), Tail...);
+ return intersect(F, F.intersect(Head, Second), Tail...);
}
template <class SecondTy, class... RestTy>
-LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
- RangeSet::Factory &F, RangeSet Head,
+LLVM_NODISCARD inline RangeSet intersect(RangeSet::Factory &F, RangeSet Head,
SecondTy Second, RestTy... Tail) {
if (Second) {
// Here we call the <RangeSet,RangeSet,...> version of the function...
- return intersect(BV, F, Head, *Second, Tail...);
+ return intersect(F, Head, *Second, Tail...);
}
// ...and here it is either <RangeSet,RangeSet,...> or <RangeSet,...>, which
// means that the result is definitely RangeSet.
- return intersect(BV, F, Head, Tail...);
+ return intersect(F, Head, Tail...);
}
/// Main generic intersect function.
@@ -689,12 +812,12 @@ LLVM_NODISCARD inline RangeSet intersect(BasicValueFactory &BV,
template <class HeadTy, class SecondTy, class... RestTy>
LLVM_NODISCARD inline
typename IntersectionTraits<HeadTy, SecondTy, RestTy...>::Type
- intersect(BasicValueFactory &BV, RangeSet::Factory &F, HeadTy Head,
- SecondTy Second, RestTy... Tail) {
+ intersect(RangeSet::Factory &F, HeadTy Head, SecondTy Second,
+ RestTy... Tail) {
if (Head) {
- return intersect(BV, F, *Head, Second, Tail...);
+ return intersect(F, *Head, Second, Tail...);
}
- return intersect(BV, F, Second, Tail...);
+ return intersect(F, Second, Tail...);
}
//===----------------------------------------------------------------------===//
@@ -710,9 +833,9 @@ class SymbolicRangeInferrer
: public SymExprVisitor<SymbolicRangeInferrer, RangeSet> {
public:
template <class SourceType>
- static RangeSet inferRange(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef State, SourceType Origin) {
- SymbolicRangeInferrer Inferrer(BV, F, State);
+ static RangeSet inferRange(RangeSet::Factory &F, ProgramStateRef State,
+ SourceType Origin) {
+ SymbolicRangeInferrer Inferrer(F, State);
return Inferrer.infer(Origin);
}
@@ -733,13 +856,18 @@ public:
}
RangeSet VisitSymSymExpr(const SymSymExpr *Sym) {
- return VisitBinaryOperator(Sym);
+ return intersect(
+ RangeFactory,
+ // If Sym is (dis)equality, we might have some information
+ // on that in our equality classes data structure.
+ getRangeForEqualities(Sym),
+ // And we should always check what we can get from the operands.
+ VisitBinaryOperator(Sym));
}
private:
- SymbolicRangeInferrer(BasicValueFactory &BV, RangeSet::Factory &F,
- ProgramStateRef S)
- : ValueFactory(BV), RangeFactory(F), State(S) {}
+ SymbolicRangeInferrer(RangeSet::Factory &F, ProgramStateRef S)
+ : ValueFactory(F.getValueFactory()), RangeFactory(F), State(S) {}
/// Infer range information from the given integer constant.
///
@@ -763,26 +891,25 @@ private:
}
RangeSet infer(SymbolRef Sym) {
- if (Optional<RangeSet> ConstraintBasedRange = intersect(
- ValueFactory, RangeFactory, getConstraint(State, Sym),
- // If Sym is a difference of symbols A - B, then maybe we have range
- // set stored for B - A.
- //
- // If we have range set stored for both A - B and B - A then
- // calculate the effective range set by intersecting the range set
- // for A - B and the negated range set of B - A.
- getRangeForNegatedSub(Sym), getRangeForEqualities(Sym))) {
- return *ConstraintBasedRange;
- }
-
- // If Sym is a comparison expression (except <=>),
- // find any other comparisons with the same operands.
- // See function description.
- if (Optional<RangeSet> CmpRangeSet = getRangeForComparisonSymbol(Sym)) {
- return *CmpRangeSet;
- }
-
- return Visit(Sym);
+ return intersect(
+ RangeFactory,
+ // Of course, we should take the constraint directly associated with
+ // this symbol into consideration.
+ getConstraint(State, Sym),
+ // If Sym is a difference of symbols A - B, then maybe we have range
+ // set stored for B - A.
+ //
+ // If we have range set stored for both A - B and B - A then
+ // calculate the effective range set by intersecting the range set
+ // for A - B and the negated range set of B - A.
+ getRangeForNegatedSub(Sym),
+ // If Sym is a comparison expression (except <=>),
+ // find any other comparisons with the same operands.
+ // See function description.
+ getRangeForComparisonSymbol(Sym),
+ // Apart from the Sym itself, we can infer quite a lot if we look
+ // into subexpressions of Sym.
+ Visit(Sym));
}
RangeSet infer(EquivalenceClass Class) {
@@ -940,7 +1067,7 @@ private:
/// Return a range set subtracting zero from \p Domain.
RangeSet assumeNonZero(RangeSet Domain, QualType T) {
APSIntType IntType = ValueFactory.getAPSIntType(T);
- return Domain.Delete(ValueFactory, RangeFactory, IntType.getZeroValue());
+ return RangeFactory.deletePoint(Domain, IntType.getZeroValue());
}
// FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
@@ -963,7 +1090,7 @@ private:
SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
if (const RangeSet *NegatedRange = getConstraint(State, NegatedSym)) {
- return NegatedRange->Negate(ValueFactory, RangeFactory);
+ return RangeFactory.negate(*NegatedRange);
}
}
}
@@ -1054,17 +1181,21 @@ private:
return llvm::None;
}
- Optional<RangeSet> getRangeForEqualities(SymbolRef Sym) {
- Optional<EqualityInfo> Equality = EqualityInfo::extract(Sym);
+ Optional<RangeSet> getRangeForEqualities(const SymSymExpr *Sym) {
+ Optional<bool> Equality = meansEquality(Sym);
if (!Equality)
return llvm::None;
- if (Optional<bool> AreEqual = EquivalenceClass::areEqual(
- State, Equality->Left, Equality->Right)) {
- if (*AreEqual == Equality->IsEquality) {
+ if (Optional<bool> AreEqual =
+ EquivalenceClass::areEqual(State, Sym->getLHS(), Sym->getRHS())) {
+ // Here we cover two cases at once:
+ // * if Sym is equality and its operands are known to be equal -> true
+ // * if Sym is disequality and its operands are disequal -> true
+ if (*AreEqual == *Equality) {
return getTrueRange(Sym->getType());
}
+ // Opposite combinations result in false.
return getFalseRange(Sym->getType());
}
@@ -1251,13 +1382,215 @@ RangeSet SymbolicRangeInferrer::VisitBinaryOperator<BO_Rem>(Range LHS,
}
//===----------------------------------------------------------------------===//
+// Constraint assignment logic
+//===----------------------------------------------------------------------===//
+
+/// ConstraintAssignorBase is a small utility class that unifies visitor
+/// for ranges with a visitor for constraints (rangeset/range/constant).
+///
+/// It is designed to have one derived class, but generally it can have more.
+/// Derived class can control which types we handle by defining methods of the
+/// following form:
+///
+/// bool handle${SYMBOL}To${CONSTRAINT}(const SYMBOL *Sym,
+/// CONSTRAINT Constraint);
+///
+/// where SYMBOL is the type of the symbol (e.g. SymSymExpr, SymbolCast, etc.)
+/// CONSTRAINT is the type of constraint (RangeSet/Range/Const)
+/// return value signifies whether we should try other handle methods
+/// (i.e. false would mean to stop right after calling this method)
+template <class Derived> class ConstraintAssignorBase {
+public:
+ using Const = const llvm::APSInt &;
+
+#define DISPATCH(CLASS) return assign##CLASS##Impl(cast<CLASS>(Sym), Constraint)
+
+#define ASSIGN(CLASS, TO, SYM, CONSTRAINT) \
+ if (!static_cast<Derived *>(this)->assign##CLASS##To##TO(SYM, CONSTRAINT)) \
+ return false
+
+ void assign(SymbolRef Sym, RangeSet Constraint) {
+ assignImpl(Sym, Constraint);
+ }
+
+ bool assignImpl(SymbolRef Sym, RangeSet Constraint) {
+ switch (Sym->getKind()) {
+#define SYMBOL(Id, Parent) \
+ case SymExpr::Id##Kind: \
+ DISPATCH(Id);
+#include "clang/StaticAnalyzer/Core/PathSensitive/Symbols.def"
+ }
+ llvm_unreachable("Unknown SymExpr kind!");
+ }
+
+#define DEFAULT_ASSIGN(Id) \
+ bool assign##Id##To##RangeSet(const Id *Sym, RangeSet Constraint) { \
+ return true; \
+ } \
+ bool assign##Id##To##Range(const Id *Sym, Range Constraint) { return true; } \
+ bool assign##Id##To##Const(const Id *Sym, Const Constraint) { return true; }
+
+ // When we dispatch for constraint types, we first try to check
+ // if the new constraint is the constant and try the corresponding
+ // assignor methods. If it didn't interrupt, we can proceed to the
+ // range, and finally to the range set.
+#define CONSTRAINT_DISPATCH(Id) \
+ if (const llvm::APSInt *Const = Constraint.getConcreteValue()) { \
+ ASSIGN(Id, Const, Sym, *Const); \
+ } \
+ if (Constraint.size() == 1) { \
+ ASSIGN(Id, Range, Sym, *Constraint.begin()); \
+ } \
+ ASSIGN(Id, RangeSet, Sym, Constraint)
+
+ // Our internal assign method first tries to call assignor methods for all
+ // constraint types that apply. And if not interrupted, continues with its
+ // parent class.
+#define SYMBOL(Id, Parent) \
+ bool assign##Id##Impl(const Id *Sym, RangeSet Constraint) { \
+ CONSTRAINT_DISPATCH(Id); \
+ DISPATCH(Parent); \
+ } \
+ DEFAULT_ASSIGN(Id)
+#define ABSTRACT_SYMBOL(Id, Parent) SYMBOL(Id, Parent)
+#include "clang/StaticAnalyzer/Core/PathSensitive/Symbols.def"
+
+ // Default implementations for the top class that doesn't have parents.
+ bool assignSymExprImpl(const SymExpr *Sym, RangeSet Constraint) {
+ CONSTRAINT_DISPATCH(SymExpr);
+ return true;
+ }
+ DEFAULT_ASSIGN(SymExpr);
+
+#undef DISPATCH
+#undef CONSTRAINT_DISPATCH
+#undef DEFAULT_ASSIGN
+#undef ASSIGN
+};
+
+/// A little component aggregating all of the reasoning we have about
+/// assigning new constraints to symbols.
+///
+/// The main purpose of this class is to associate constraints to symbols,
+/// and impose additional constraints on other symbols, when we can imply
+/// them.
+///
+/// It has a nice symmetry with SymbolicRangeInferrer. When the latter
+/// can provide more precise ranges by looking into the operands of the
+/// expression in question, ConstraintAssignor looks into the operands
+/// to see if we can imply more from the new constraint.
+class ConstraintAssignor : public ConstraintAssignorBase<ConstraintAssignor> {
+public:
+ template <class ClassOrSymbol>
+ LLVM_NODISCARD static ProgramStateRef
+ assign(ProgramStateRef State, SValBuilder &Builder, RangeSet::Factory &F,
+ ClassOrSymbol CoS, RangeSet NewConstraint) {
+ if (!State || NewConstraint.isEmpty())
+ return nullptr;
+
+ ConstraintAssignor Assignor{State, Builder, F};
+ return Assignor.assign(CoS, NewConstraint);
+ }
+
+ inline bool assignSymExprToConst(const SymExpr *Sym, Const Constraint);
+ inline bool assignSymSymExprToRangeSet(const SymSymExpr *Sym,
+ RangeSet Constraint);
+
+private:
+ ConstraintAssignor(ProgramStateRef State, SValBuilder &Builder,
+ RangeSet::Factory &F)
+ : State(State), Builder(Builder), RangeFactory(F) {}
+ using Base = ConstraintAssignorBase<ConstraintAssignor>;
+
+ /// Base method for handling new constraints for symbols.
+ LLVM_NODISCARD ProgramStateRef assign(SymbolRef Sym, RangeSet NewConstraint) {
+ // All constraints are actually associated with equivalence classes, and
+ // that's what we are going to do first.
+ State = assign(EquivalenceClass::find(State, Sym), NewConstraint);
+ if (!State)
+ return nullptr;
+
+ // And after that we can check what other things we can get from this
+ // constraint.
+ Base::assign(Sym, NewConstraint);
+ return State;
+ }
+
+ /// Base method for handling new constraints for classes.
+ LLVM_NODISCARD ProgramStateRef assign(EquivalenceClass Class,
+ RangeSet NewConstraint) {
+ // There is a chance that we might need to update constraints for the
+ // classes that are known to be disequal to Class.
+ //
+ // In order for this to be even possible, the new constraint should
+ // be simply a constant because we can't reason about range disequalities.
+ if (const llvm::APSInt *Point = NewConstraint.getConcreteValue()) {
+
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ ConstraintRangeTy::Factory &CF = State->get_context<ConstraintRange>();
+
+ // Add new constraint.
+ Constraints = CF.add(Constraints, Class, NewConstraint);
+
+ for (EquivalenceClass DisequalClass : Class.getDisequalClasses(State)) {
+ RangeSet UpdatedConstraint = SymbolicRangeInferrer::inferRange(
+ RangeFactory, State, DisequalClass);
+
+ UpdatedConstraint = RangeFactory.deletePoint(UpdatedConstraint, *Point);
+
+ // If we end up with at least one of the disequal classes to be
+ // constrained with an empty range-set, the state is infeasible.
+ if (UpdatedConstraint.isEmpty())
+ return nullptr;
+
+ Constraints = CF.add(Constraints, DisequalClass, UpdatedConstraint);
+ }
+ assert(areFeasible(Constraints) && "Constraint manager shouldn't produce "
+ "a state with infeasible constraints");
+
+ return setConstraints(State, Constraints);
+ }
+
+ return setConstraint(State, Class, NewConstraint);
+ }
+
+ ProgramStateRef trackDisequality(ProgramStateRef State, SymbolRef LHS,
+ SymbolRef RHS) {
+ return EquivalenceClass::markDisequal(RangeFactory, State, LHS, RHS);
+ }
+
+ ProgramStateRef trackEquality(ProgramStateRef State, SymbolRef LHS,
+ SymbolRef RHS) {
+ return EquivalenceClass::merge(RangeFactory, State, LHS, RHS);
+ }
+
+ LLVM_NODISCARD Optional<bool> interpreteAsBool(RangeSet Constraint) {
+ assert(!Constraint.isEmpty() && "Empty ranges shouldn't get here");
+
+ if (Constraint.getConcreteValue())
+ return !Constraint.getConcreteValue()->isNullValue();
+
+ APSIntType T{Constraint.getMinValue()};
+ Const Zero = T.getZeroValue();
+ if (!Constraint.contains(Zero))
+ return true;
+
+ return llvm::None;
+ }
+
+ ProgramStateRef State;
+ SValBuilder &Builder;
+ RangeSet::Factory &RangeFactory;
+};
+
+//===----------------------------------------------------------------------===//
// Constraint manager implementation details
//===----------------------------------------------------------------------===//
class RangeConstraintManager : public RangedConstraintManager {
public:
RangeConstraintManager(ExprEngine *EE, SValBuilder &SVB)
- : RangedConstraintManager(EE, SVB) {}
+ : RangedConstraintManager(EE, SVB), F(getBasicVals()) {}
//===------------------------------------------------------------------===//
// Implementation for interface from ConstraintManager.
@@ -1284,6 +1617,15 @@ public:
void printJson(raw_ostream &Out, ProgramStateRef State, const char *NL = "\n",
unsigned int Space = 0, bool IsDot = false) const override;
+ void printConstraints(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
+ void printEquivalenceClasses(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
+ void printDisequalities(raw_ostream &Out, ProgramStateRef State,
+ const char *NL = "\n", unsigned int Space = 0,
+ bool IsDot = false) const;
//===------------------------------------------------------------------===//
// Implementation for interface from RangedConstraintManager.
@@ -1326,6 +1668,10 @@ private:
RangeSet getRange(ProgramStateRef State, SymbolRef Sym);
RangeSet getRange(ProgramStateRef State, EquivalenceClass Class);
+ ProgramStateRef setRange(ProgramStateRef State, SymbolRef Sym,
+ RangeSet Range);
+ ProgramStateRef setRange(ProgramStateRef State, EquivalenceClass Class,
+ RangeSet Range);
RangeSet getSymLTRange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
@@ -1342,88 +1688,63 @@ private:
RangeSet getSymGERange(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment);
+};
- //===------------------------------------------------------------------===//
- // Equality tracking implementation
- //===------------------------------------------------------------------===//
-
- ProgramStateRef trackEQ(RangeSet NewConstraint, ProgramStateRef State,
- SymbolRef Sym, const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
- return track<true>(NewConstraint, State, Sym, Int, Adjustment);
+bool ConstraintAssignor::assignSymExprToConst(const SymExpr *Sym,
+ const llvm::APSInt &Constraint) {
+ llvm::SmallSet<EquivalenceClass, 4> SimplifiedClasses;
+ // Iterate over all equivalence classes and try to simplify them.
+ ClassMembersTy Members = State->get<ClassMembers>();
+ for (std::pair<EquivalenceClass, SymbolSet> ClassToSymbolSet : Members) {
+ EquivalenceClass Class = ClassToSymbolSet.first;
+ State = EquivalenceClass::simplify(Builder, RangeFactory, State, Class);
+ if (!State)
+ return false;
+ SimplifiedClasses.insert(Class);
}
- ProgramStateRef trackNE(RangeSet NewConstraint, ProgramStateRef State,
- SymbolRef Sym, const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
- return track<false>(NewConstraint, State, Sym, Int, Adjustment);
+ // Trivial equivalence classes (those that have only one symbol member) are
+ // not stored in the State. Thus, we must skim through the constraints as
+ // well. And we try to simplify symbols in the constraints.
+ ConstraintRangeTy Constraints = State->get<ConstraintRange>();
+ for (std::pair<EquivalenceClass, RangeSet> ClassConstraint : Constraints) {
+ EquivalenceClass Class = ClassConstraint.first;
+ if (SimplifiedClasses.count(Class)) // Already simplified.
+ continue;
+ State = EquivalenceClass::simplify(Builder, RangeFactory, State, Class);
+ if (!State)
+ return false;
}
- template <bool EQ>
- ProgramStateRef track(RangeSet NewConstraint, ProgramStateRef State,
- SymbolRef Sym, const llvm::APSInt &Int,
- const llvm::APSInt &Adjustment) {
- if (NewConstraint.isEmpty())
- // This is an infeasible assumption.
- return nullptr;
+ return true;
+}
- ProgramStateRef NewState = setConstraint(State, Sym, NewConstraint);
- if (auto Equality = EqualityInfo::extract(Sym, Int, Adjustment)) {
- // If the original assumption is not Sym + Adjustment !=/</> Int,
- // we should invert IsEquality flag.
- Equality->IsEquality = Equality->IsEquality != EQ;
- return track(NewState, *Equality);
- }
+bool ConstraintAssignor::assignSymSymExprToRangeSet(const SymSymExpr *Sym,
+ RangeSet Constraint) {
+ Optional<bool> ConstraintAsBool = interpreteAsBool(Constraint);
- return NewState;
- }
+ if (!ConstraintAsBool)
+ return true;
- ProgramStateRef track(ProgramStateRef State, EqualityInfo ToTrack) {
- if (ToTrack.IsEquality) {
- return trackEquality(State, ToTrack.Left, ToTrack.Right);
+ if (Optional<bool> Equality = meansEquality(Sym)) {
+ // Here we cover two cases:
+ // * if Sym is equality and the new constraint is true -> Sym's operands
+ // should be marked as equal
+ // * if Sym is disequality and the new constraint is false -> Sym's
+ // operands should be also marked as equal
+ if (*Equality == *ConstraintAsBool) {
+ State = trackEquality(State, Sym->getLHS(), Sym->getRHS());
+ } else {
+ // Other combinations leave as with disequal operands.
+ State = trackDisequality(State, Sym->getLHS(), Sym->getRHS());
}
- return trackDisequality(State, ToTrack.Left, ToTrack.Right);
- }
- ProgramStateRef trackDisequality(ProgramStateRef State, SymbolRef LHS,
- SymbolRef RHS) {
- return EquivalenceClass::markDisequal(getBasicVals(), F, State, LHS, RHS);
- }
-
- ProgramStateRef trackEquality(ProgramStateRef State, SymbolRef LHS,
- SymbolRef RHS) {
- return EquivalenceClass::merge(getBasicVals(), F, State, LHS, RHS);
- }
-
- LLVM_NODISCARD inline ProgramStateRef setConstraint(ProgramStateRef State,
- EquivalenceClass Class,
- RangeSet Constraint) {
- ConstraintRangeTy Constraints = State->get<ConstraintRange>();
- ConstraintRangeTy::Factory &CF = State->get_context<ConstraintRange>();
-
- // Add new constraint.
- Constraints = CF.add(Constraints, Class, Constraint);
-
- // There is a chance that we might need to update constraints for the
- // classes that are known to be disequal to Class.
- //
- // In order for this to be even possible, the new constraint should
- // be simply a constant because we can't reason about range disequalities.
- if (const llvm::APSInt *Point = Constraint.getConcreteValue())
- for (EquivalenceClass DisequalClass : Class.getDisequalClasses(State)) {
- RangeSet UpdatedConstraint =
- getRange(State, DisequalClass).Delete(getBasicVals(), F, *Point);
- Constraints = CF.add(Constraints, DisequalClass, UpdatedConstraint);
- }
-
- return State->set<ConstraintRange>(Constraints);
+ if (!State)
+ return false;
}
- LLVM_NODISCARD inline ProgramStateRef
- setConstraint(ProgramStateRef State, SymbolRef Sym, RangeSet Constraint) {
- return setConstraint(State, EquivalenceClass::find(State, Sym), Constraint);
- }
-};
+ return true;
+}
} // end anonymous namespace
@@ -1455,8 +1776,19 @@ ConstraintMap ento::getConstraintMap(ProgramStateRef State) {
// EqualityClass implementation details
//===----------------------------------------------------------------------===//
+LLVM_DUMP_METHOD void EquivalenceClass::dumpToStream(ProgramStateRef State,
+ raw_ostream &os) const {
+ SymbolSet ClassMembers = getClassMembers(State);
+ for (const SymbolRef &MemberSym : ClassMembers) {
+ MemberSym->dump();
+ os << "\n";
+ }
+}
+
inline EquivalenceClass EquivalenceClass::find(ProgramStateRef State,
SymbolRef Sym) {
+ assert(State && "State should not be null");
+ assert(Sym && "Symbol should not be null");
// We store far from all Symbol -> Class mappings
if (const EquivalenceClass *NontrivialClass = State->get<ClassMap>(Sym))
return *NontrivialClass;
@@ -1465,19 +1797,17 @@ inline EquivalenceClass EquivalenceClass::find(ProgramStateRef State,
return Sym;
}
-inline ProgramStateRef EquivalenceClass::merge(BasicValueFactory &BV,
- RangeSet::Factory &F,
+inline ProgramStateRef EquivalenceClass::merge(RangeSet::Factory &F,
ProgramStateRef State,
SymbolRef First,
SymbolRef Second) {
EquivalenceClass FirstClass = find(State, First);
EquivalenceClass SecondClass = find(State, Second);
- return FirstClass.merge(BV, F, State, SecondClass);
+ return FirstClass.merge(F, State, SecondClass);
}
-inline ProgramStateRef EquivalenceClass::merge(BasicValueFactory &BV,
- RangeSet::Factory &F,
+inline ProgramStateRef EquivalenceClass::merge(RangeSet::Factory &F,
ProgramStateRef State,
EquivalenceClass Other) {
// It is already the same class.
@@ -1505,15 +1835,14 @@ inline ProgramStateRef EquivalenceClass::merge(BasicValueFactory &BV,
// its members. Merging is not a trivial operation, so it's easier to
// merge the smaller class into the bigger one.
if (Members.getHeight() >= OtherMembers.getHeight()) {
- return mergeImpl(BV, F, State, Members, Other, OtherMembers);
+ return mergeImpl(F, State, Members, Other, OtherMembers);
} else {
- return Other.mergeImpl(BV, F, State, OtherMembers, *this, Members);
+ return Other.mergeImpl(F, State, OtherMembers, *this, Members);
}
}
inline ProgramStateRef
-EquivalenceClass::mergeImpl(BasicValueFactory &ValueFactory,
- RangeSet::Factory &RangeFactory,
+EquivalenceClass::mergeImpl(RangeSet::Factory &RangeFactory,
ProgramStateRef State, SymbolSet MyMembers,
EquivalenceClass Other, SymbolSet OtherMembers) {
// Essentially what we try to recreate here is some kind of union-find
@@ -1536,7 +1865,7 @@ EquivalenceClass::mergeImpl(BasicValueFactory &ValueFactory,
// Intersection here makes perfect sense because both of these constraints
// must hold for the whole new class.
if (Optional<RangeSet> NewClassConstraint =
- intersect(ValueFactory, RangeFactory, getConstraint(State, *this),
+ intersect(RangeFactory, getConstraint(State, *this),
getConstraint(State, Other))) {
// NOTE: Essentially, NewClassConstraint should NEVER be infeasible because
// range inferrer shouldn't generate ranges incompatible with
@@ -1552,6 +1881,9 @@ EquivalenceClass::mergeImpl(BasicValueFactory &ValueFactory,
// Assign new constraints for this class.
Constraints = CRF.add(Constraints, *this, *NewClassConstraint);
+ assert(areFeasible(Constraints) && "Constraint manager shouldn't produce "
+ "a state with infeasible constraints");
+
State = State->set<ConstraintRange>(Constraints);
}
@@ -1585,6 +1917,11 @@ EquivalenceClass::mergeImpl(BasicValueFactory &ValueFactory,
// 4. Update disequality relations
ClassSet DisequalToOther = Other.getDisequalClasses(DisequalityInfo, CF);
+ // We are about to merge two classes but they are already known to be
+ // non-equal. This is a contradiction.
+ if (DisequalToOther.contains(*this))
+ return nullptr;
+
if (!DisequalToOther.isEmpty()) {
ClassSet DisequalToThis = getDisequalClasses(DisequalityInfo, CF);
DisequalityInfo = DF.remove(DisequalityInfo, Other);
@@ -1622,7 +1959,7 @@ EquivalenceClass::getMembersFactory(ProgramStateRef State) {
return State->get_context<SymbolSet>();
}
-SymbolSet EquivalenceClass::getClassMembers(ProgramStateRef State) {
+SymbolSet EquivalenceClass::getClassMembers(ProgramStateRef State) const {
if (const SymbolSet *Members = State->get<ClassMembers>(*this))
return *Members;
@@ -1632,34 +1969,31 @@ SymbolSet EquivalenceClass::getClassMembers(ProgramStateRef State) {
return F.add(F.getEmptySet(), getRepresentativeSymbol());
}
-bool EquivalenceClass::isTrivial(ProgramStateRef State) {
+bool EquivalenceClass::isTrivial(ProgramStateRef State) const {
return State->get<ClassMembers>(*this) == nullptr;
}
bool EquivalenceClass::isTriviallyDead(ProgramStateRef State,
- SymbolReaper &Reaper) {
+ SymbolReaper &Reaper) const {
return isTrivial(State) && Reaper.isDead(getRepresentativeSymbol());
}
-inline ProgramStateRef EquivalenceClass::markDisequal(BasicValueFactory &VF,
- RangeSet::Factory &RF,
+inline ProgramStateRef EquivalenceClass::markDisequal(RangeSet::Factory &RF,
ProgramStateRef State,
SymbolRef First,
SymbolRef Second) {
- return markDisequal(VF, RF, State, find(State, First), find(State, Second));
+ return markDisequal(RF, State, find(State, First), find(State, Second));
}
-inline ProgramStateRef EquivalenceClass::markDisequal(BasicValueFactory &VF,
- RangeSet::Factory &RF,
+inline ProgramStateRef EquivalenceClass::markDisequal(RangeSet::Factory &RF,
ProgramStateRef State,
EquivalenceClass First,
EquivalenceClass Second) {
- return First.markDisequal(VF, RF, State, Second);
+ return First.markDisequal(RF, State, Second);
}
inline ProgramStateRef
-EquivalenceClass::markDisequal(BasicValueFactory &VF, RangeSet::Factory &RF,
- ProgramStateRef State,
+EquivalenceClass::markDisequal(RangeSet::Factory &RF, ProgramStateRef State,
EquivalenceClass Other) const {
// If we know that two classes are equal, we can only produce an infeasible
// state.
@@ -1672,10 +2006,14 @@ EquivalenceClass::markDisequal(BasicValueFactory &VF, RangeSet::Factory &RF,
// Disequality is a symmetric relation, so if we mark A as disequal to B,
// we should also mark B as disequalt to A.
- addToDisequalityInfo(DisequalityInfo, Constraints, VF, RF, State, *this,
- Other);
- addToDisequalityInfo(DisequalityInfo, Constraints, VF, RF, State, Other,
- *this);
+ if (!addToDisequalityInfo(DisequalityInfo, Constraints, RF, State, *this,
+ Other) ||
+ !addToDisequalityInfo(DisequalityInfo, Constraints, RF, State, Other,
+ *this))
+ return nullptr;
+
+ assert(areFeasible(Constraints) && "Constraint manager shouldn't produce "
+ "a state with infeasible constraints");
State = State->set<DisequalityMap>(DisequalityInfo);
State = State->set<ConstraintRange>(Constraints);
@@ -1683,10 +2021,10 @@ EquivalenceClass::markDisequal(BasicValueFactory &VF, RangeSet::Factory &RF,
return State;
}
-inline void EquivalenceClass::addToDisequalityInfo(
+inline bool EquivalenceClass::addToDisequalityInfo(
DisequalityMapTy &Info, ConstraintRangeTy &Constraints,
- BasicValueFactory &VF, RangeSet::Factory &RF, ProgramStateRef State,
- EquivalenceClass First, EquivalenceClass Second) {
+ RangeSet::Factory &RF, ProgramStateRef State, EquivalenceClass First,
+ EquivalenceClass Second) {
// 1. Get all of the required factories.
DisequalityMapTy::Factory &F = State->get_context<DisequalityMap>();
@@ -1709,19 +2047,31 @@ inline void EquivalenceClass::addToDisequalityInfo(
if (const llvm::APSInt *Point = SecondConstraint->getConcreteValue()) {
RangeSet FirstConstraint = SymbolicRangeInferrer::inferRange(
- VF, RF, State, First.getRepresentativeSymbol());
+ RF, State, First.getRepresentativeSymbol());
+
+ FirstConstraint = RF.deletePoint(FirstConstraint, *Point);
+
+ // If the First class is about to be constrained with an empty
+ // range-set, the state is infeasible.
+ if (FirstConstraint.isEmpty())
+ return false;
- FirstConstraint = FirstConstraint.Delete(VF, RF, *Point);
Constraints = CRF.add(Constraints, First, FirstConstraint);
}
+
+ return true;
}
inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
SymbolRef FirstSym,
SymbolRef SecondSym) {
- EquivalenceClass First = find(State, FirstSym);
- EquivalenceClass Second = find(State, SecondSym);
+ return EquivalenceClass::areEqual(State, find(State, FirstSym),
+ find(State, SecondSym));
+}
+inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
+ EquivalenceClass First,
+ EquivalenceClass Second) {
// The same equivalence class => symbols are equal.
if (First == Second)
return true;
@@ -1736,6 +2086,29 @@ inline Optional<bool> EquivalenceClass::areEqual(ProgramStateRef State,
return llvm::None;
}
+// Iterate over all symbols and try to simplify them. Once a symbol is
+// simplified then we check if we can merge the simplified symbol's equivalence
+// class to this class. This way, we simplify not just the symbols but the
+// classes as well: we strive to keep the number of the classes to be the
+// absolute minimum.
+LLVM_NODISCARD ProgramStateRef
+EquivalenceClass::simplify(SValBuilder &SVB, RangeSet::Factory &F,
+ ProgramStateRef State, EquivalenceClass Class) {
+ SymbolSet ClassMembers = Class.getClassMembers(State);
+ for (const SymbolRef &MemberSym : ClassMembers) {
+ SymbolRef SimplifiedMemberSym = ento::simplify(State, MemberSym);
+ if (SimplifiedMemberSym && MemberSym != SimplifiedMemberSym) {
+ // The simplified symbol should be the member of the original Class,
+ // however, it might be in another existing class at the moment. We
+ // have to merge these classes.
+ State = merge(F, State, MemberSym, SimplifiedMemberSym);
+ if (!State)
+ return nullptr;
+ }
+ }
+ return State;
+}
+
inline ClassSet EquivalenceClass::getDisequalClasses(ProgramStateRef State,
SymbolRef Sym) {
return find(State, Sym).getDisequalClasses(State);
@@ -1862,7 +2235,7 @@ ConditionTruthVal RangeConstraintManager::checkNull(ProgramStateRef State,
llvm::APSInt Zero = IntType.getZeroValue();
// Check if zero is in the set of possible values.
- if (Ranges->Intersect(BV, F, Zero, Zero).isEmpty())
+ if (!Ranges->contains(Zero))
return false;
// Zero is a possible value, but it is not the /only/ possible value.
@@ -2017,12 +2390,13 @@ RangeConstraintManager::removeDeadBindings(ProgramStateRef State,
RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
SymbolRef Sym) {
- return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Sym);
+ return SymbolicRangeInferrer::inferRange(F, State, Sym);
}
-RangeSet RangeConstraintManager::getRange(ProgramStateRef State,
- EquivalenceClass Class) {
- return SymbolicRangeInferrer::inferRange(getBasicVals(), F, State, Class);
+ProgramStateRef RangeConstraintManager::setRange(ProgramStateRef State,
+ SymbolRef Sym,
+ RangeSet Range) {
+ return ConstraintAssignor::assign(State, getSValBuilder(), F, Sym, Range);
}
//===------------------------------------------------------------------------===
@@ -2047,10 +2421,10 @@ RangeConstraintManager::assumeSymNE(ProgramStateRef St, SymbolRef Sym,
return St;
llvm::APSInt Point = AdjustmentType.convert(Int) - Adjustment;
+ RangeSet New = getRange(St, Sym);
+ New = F.deletePoint(New, Point);
- RangeSet New = getRange(St, Sym).Delete(getBasicVals(), F, Point);
-
- return trackNE(New, St, Sym, Int, Adjustment);
+ return setRange(St, Sym, New);
}
ProgramStateRef
@@ -2064,9 +2438,10 @@ RangeConstraintManager::assumeSymEQ(ProgramStateRef St, SymbolRef Sym,
// [Int-Adjustment, Int-Adjustment]
llvm::APSInt AdjInt = AdjustmentType.convert(Int) - Adjustment;
- RangeSet New = getRange(St, Sym).Intersect(getBasicVals(), F, AdjInt, AdjInt);
+ RangeSet New = getRange(St, Sym);
+ New = F.intersect(New, AdjInt);
- return trackEQ(New, St, Sym, Int, Adjustment);
+ return setRange(St, Sym, New);
}
RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
@@ -2094,7 +2469,8 @@ RangeSet RangeConstraintManager::getSymLTRange(ProgramStateRef St,
llvm::APSInt Upper = ComparisonVal - Adjustment;
--Upper;
- return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ RangeSet Result = getRange(St, Sym);
+ return F.intersect(Result, Lower, Upper);
}
ProgramStateRef
@@ -2102,7 +2478,7 @@ RangeConstraintManager::assumeSymLT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymLTRange(St, Sym, Int, Adjustment);
- return trackNE(New, St, Sym, Int, Adjustment);
+ return setRange(St, Sym, New);
}
RangeSet RangeConstraintManager::getSymGTRange(ProgramStateRef St,
@@ -2130,7 +2506,8 @@ RangeSet RangeConstraintManager::getSymGTRange(ProgramStateRef St,
llvm::APSInt Upper = Max - Adjustment;
++Lower;
- return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ RangeSet SymRange = getRange(St, Sym);
+ return F.intersect(SymRange, Lower, Upper);
}
ProgramStateRef
@@ -2138,7 +2515,7 @@ RangeConstraintManager::assumeSymGT(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymGTRange(St, Sym, Int, Adjustment);
- return trackNE(New, St, Sym, Int, Adjustment);
+ return setRange(St, Sym, New);
}
RangeSet RangeConstraintManager::getSymGERange(ProgramStateRef St,
@@ -2166,7 +2543,8 @@ RangeSet RangeConstraintManager::getSymGERange(ProgramStateRef St,
llvm::APSInt Lower = ComparisonVal - Adjustment;
llvm::APSInt Upper = Max - Adjustment;
- return getRange(St, Sym).Intersect(getBasicVals(), F, Lower, Upper);
+ RangeSet SymRange = getRange(St, Sym);
+ return F.intersect(SymRange, Lower, Upper);
}
ProgramStateRef
@@ -2174,7 +2552,7 @@ RangeConstraintManager::assumeSymGE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymGERange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : setConstraint(St, Sym, New);
+ return setRange(St, Sym, New);
}
RangeSet
@@ -2202,7 +2580,8 @@ RangeConstraintManager::getSymLERange(llvm::function_ref<RangeSet()> RS,
llvm::APSInt Lower = Min - Adjustment;
llvm::APSInt Upper = ComparisonVal - Adjustment;
- return RS().Intersect(getBasicVals(), F, Lower, Upper);
+ RangeSet Default = RS();
+ return F.intersect(Default, Lower, Upper);
}
RangeSet RangeConstraintManager::getSymLERange(ProgramStateRef St,
@@ -2217,7 +2596,7 @@ RangeConstraintManager::assumeSymLE(ProgramStateRef St, SymbolRef Sym,
const llvm::APSInt &Int,
const llvm::APSInt &Adjustment) {
RangeSet New = getSymLERange(St, Sym, Int, Adjustment);
- return New.isEmpty() ? nullptr : setConstraint(St, Sym, New);
+ return setRange(St, Sym, New);
}
ProgramStateRef RangeConstraintManager::assumeSymWithinInclusiveRange(
@@ -2227,7 +2606,7 @@ ProgramStateRef RangeConstraintManager::assumeSymWithinInclusiveRange(
if (New.isEmpty())
return nullptr;
RangeSet Out = getSymLERange([&] { return New; }, To, Adjustment);
- return Out.isEmpty() ? nullptr : setConstraint(State, Sym, Out);
+ return setRange(State, Sym, Out);
}
ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
@@ -2235,8 +2614,8 @@ ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
const llvm::APSInt &To, const llvm::APSInt &Adjustment) {
RangeSet RangeLT = getSymLTRange(State, Sym, From, Adjustment);
RangeSet RangeGT = getSymGTRange(State, Sym, To, Adjustment);
- RangeSet New(RangeLT.addRange(F, RangeGT));
- return New.isEmpty() ? nullptr : setConstraint(State, Sym, New);
+ RangeSet New(F.add(RangeLT, RangeGT));
+ return setRange(State, Sym, New);
}
//===----------------------------------------------------------------------===//
@@ -2246,6 +2625,23 @@ ProgramStateRef RangeConstraintManager::assumeSymOutsideInclusiveRange(
void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
const char *NL, unsigned int Space,
bool IsDot) const {
+ printConstraints(Out, State, NL, Space, IsDot);
+ printEquivalenceClasses(Out, State, NL, Space, IsDot);
+ printDisequalities(Out, State, NL, Space, IsDot);
+}
+
+static std::string toString(const SymbolRef &Sym) {
+ std::string S;
+ llvm::raw_string_ostream O(S);
+ Sym->dumpToStream(O);
+ return O.str();
+}
+
+void RangeConstraintManager::printConstraints(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL,
+ unsigned int Space,
+ bool IsDot) const {
ConstraintRangeTy Constraints = State->get<ConstraintRange>();
Indent(Out, Space, IsDot) << "\"constraints\": ";
@@ -2254,25 +2650,162 @@ void RangeConstraintManager::printJson(raw_ostream &Out, ProgramStateRef State,
return;
}
+ std::map<std::string, RangeSet> OrderedConstraints;
+ for (std::pair<EquivalenceClass, RangeSet> P : Constraints) {
+ SymbolSet ClassMembers = P.first.getClassMembers(State);
+ for (const SymbolRef &ClassMember : ClassMembers) {
+ bool insertion_took_place;
+ std::tie(std::ignore, insertion_took_place) =
+ OrderedConstraints.insert({toString(ClassMember), P.second});
+ assert(insertion_took_place &&
+ "two symbols should not have the same dump");
+ }
+ }
+
++Space;
Out << '[' << NL;
bool First = true;
- for (std::pair<EquivalenceClass, RangeSet> P : Constraints) {
- SymbolSet ClassMembers = P.first.getClassMembers(State);
+ for (std::pair<std::string, RangeSet> P : OrderedConstraints) {
+ if (First) {
+ First = false;
+ } else {
+ Out << ',';
+ Out << NL;
+ }
+ Indent(Out, Space, IsDot)
+ << "{ \"symbol\": \"" << P.first << "\", \"range\": \"";
+ P.second.dump(Out);
+ Out << "\" }";
+ }
+ Out << NL;
- // We can print the same constraint for every class member.
- for (SymbolRef ClassMember : ClassMembers) {
- if (First) {
- First = false;
- } else {
- Out << ',';
- Out << NL;
+ --Space;
+ Indent(Out, Space, IsDot) << "]," << NL;
+}
+
+static std::string toString(ProgramStateRef State, EquivalenceClass Class) {
+ SymbolSet ClassMembers = Class.getClassMembers(State);
+ llvm::SmallVector<SymbolRef, 8> ClassMembersSorted(ClassMembers.begin(),
+ ClassMembers.end());
+ llvm::sort(ClassMembersSorted,
+ [](const SymbolRef &LHS, const SymbolRef &RHS) {
+ return toString(LHS) < toString(RHS);
+ });
+
+ bool FirstMember = true;
+
+ std::string Str;
+ llvm::raw_string_ostream Out(Str);
+ Out << "[ ";
+ for (SymbolRef ClassMember : ClassMembersSorted) {
+ if (FirstMember)
+ FirstMember = false;
+ else
+ Out << ", ";
+ Out << "\"" << ClassMember << "\"";
+ }
+ Out << " ]";
+ return Out.str();
+}
+
+void RangeConstraintManager::printEquivalenceClasses(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL,
+ unsigned int Space,
+ bool IsDot) const {
+ ClassMembersTy Members = State->get<ClassMembers>();
+
+ Indent(Out, Space, IsDot) << "\"equivalence_classes\": ";
+ if (Members.isEmpty()) {
+ Out << "null," << NL;
+ return;
+ }
+
+ std::set<std::string> MembersStr;
+ for (std::pair<EquivalenceClass, SymbolSet> ClassToSymbolSet : Members)
+ MembersStr.insert(toString(State, ClassToSymbolSet.first));
+
+ ++Space;
+ Out << '[' << NL;
+ bool FirstClass = true;
+ for (const std::string &Str : MembersStr) {
+ if (FirstClass) {
+ FirstClass = false;
+ } else {
+ Out << ',';
+ Out << NL;
+ }
+ Indent(Out, Space, IsDot);
+ Out << Str;
+ }
+ Out << NL;
+
+ --Space;
+ Indent(Out, Space, IsDot) << "]," << NL;
+}
+
+void RangeConstraintManager::printDisequalities(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL,
+ unsigned int Space,
+ bool IsDot) const {
+ DisequalityMapTy Disequalities = State->get<DisequalityMap>();
+
+ Indent(Out, Space, IsDot) << "\"disequality_info\": ";
+ if (Disequalities.isEmpty()) {
+ Out << "null," << NL;
+ return;
+ }
+
+ // Transform the disequality info to an ordered map of
+ // [string -> (ordered set of strings)]
+ using EqClassesStrTy = std::set<std::string>;
+ using DisequalityInfoStrTy = std::map<std::string, EqClassesStrTy>;
+ DisequalityInfoStrTy DisequalityInfoStr;
+ for (std::pair<EquivalenceClass, ClassSet> ClassToDisEqSet : Disequalities) {
+ EquivalenceClass Class = ClassToDisEqSet.first;
+ ClassSet DisequalClasses = ClassToDisEqSet.second;
+ EqClassesStrTy MembersStr;
+ for (EquivalenceClass DisEqClass : DisequalClasses)
+ MembersStr.insert(toString(State, DisEqClass));
+ DisequalityInfoStr.insert({toString(State, Class), MembersStr});
+ }
+
+ ++Space;
+ Out << '[' << NL;
+ bool FirstClass = true;
+ for (std::pair<std::string, EqClassesStrTy> ClassToDisEqSet :
+ DisequalityInfoStr) {
+ const std::string &Class = ClassToDisEqSet.first;
+ if (FirstClass) {
+ FirstClass = false;
+ } else {
+ Out << ',';
+ Out << NL;
+ }
+ Indent(Out, Space, IsDot) << "{" << NL;
+ unsigned int DisEqSpace = Space + 1;
+ Indent(Out, DisEqSpace, IsDot) << "\"class\": ";
+ Out << Class;
+ const EqClassesStrTy &DisequalClasses = ClassToDisEqSet.second;
+ if (!DisequalClasses.empty()) {
+ Out << "," << NL;
+ Indent(Out, DisEqSpace, IsDot) << "\"disequal_to\": [" << NL;
+ unsigned int DisEqClassSpace = DisEqSpace + 1;
+ Indent(Out, DisEqClassSpace, IsDot);
+ bool FirstDisEqClass = true;
+ for (const std::string &DisEqClass : DisequalClasses) {
+ if (FirstDisEqClass) {
+ FirstDisEqClass = false;
+ } else {
+ Out << ',' << NL;
+ Indent(Out, DisEqClassSpace, IsDot);
+ }
+ Out << DisEqClass;
}
- Indent(Out, Space, IsDot)
- << "{ \"symbol\": \"" << ClassMember << "\", \"range\": \"";
- P.second.print(Out);
- Out << "\" }";
+ Out << "]" << NL;
}
+ Indent(Out, Space, IsDot) << "}";
}
Out << NL;
diff --git a/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index e7a03e6ed582..d227c025fb20 100644
--- a/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -23,12 +23,14 @@ RangedConstraintManager::~RangedConstraintManager() {}
ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
SymbolRef Sym,
bool Assumption) {
+ Sym = simplify(State, Sym);
+
// Handle SymbolData.
- if (isa<SymbolData>(Sym)) {
+ if (isa<SymbolData>(Sym))
return assumeSymUnsupported(State, Sym, Assumption);
- // Handle symbolic expression.
- } else if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(Sym)) {
+ // Handle symbolic expression.
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(Sym)) {
// We can only simplify expressions whose RHS is an integer.
BinaryOperator::Opcode op = SIE->getOpcode();
@@ -93,6 +95,9 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
ProgramStateRef RangedConstraintManager::assumeSymInclusiveRange(
ProgramStateRef State, SymbolRef Sym, const llvm::APSInt &From,
const llvm::APSInt &To, bool InRange) {
+
+ Sym = simplify(State, Sym);
+
// Get the type used for calculating wraparound.
BasicValueFactory &BVF = getBasicVals();
APSIntType WraparoundType = BVF.getAPSIntType(Sym->getType());
@@ -121,6 +126,8 @@ ProgramStateRef RangedConstraintManager::assumeSymInclusiveRange(
ProgramStateRef
RangedConstraintManager::assumeSymUnsupported(ProgramStateRef State,
SymbolRef Sym, bool Assumption) {
+ Sym = simplify(State, Sym);
+
BasicValueFactory &BVF = getBasicVals();
QualType T = Sym->getType();
@@ -219,6 +226,13 @@ void RangedConstraintManager::computeAdjustment(SymbolRef &Sym,
}
}
-} // end of namespace ento
+SymbolRef simplify(ProgramStateRef State, SymbolRef Sym) {
+ SValBuilder &SVB = State->getStateManager().getSValBuilder();
+ SVal SimplifiedVal = SVB.simplifySVal(State, SVB.makeSymbolVal(Sym));
+ if (SymbolRef SimplifiedSym = SimplifiedVal.getAsSymbol())
+ return SimplifiedSym;
+ return Sym;
+}
+} // end of namespace ento
} // end of namespace clang
diff --git a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 57fde32bc01d..4ffa1aacb41f 100644
--- a/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -23,7 +23,6 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/DynamicSize.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
@@ -1479,7 +1478,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
return UnknownVal();
if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
- return CastRetrievedVal(getBindingForField(B, FR), FR, T);
+ return svalBuilder.evalCast(getBindingForField(B, FR), T, QualType{});
if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
// FIXME: Here we actually perform an implicit conversion from the loaded
@@ -1487,7 +1486,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// more intelligently. For example, an 'element' can encompass multiple
// bound regions (e.g., several bound bytes), or could be a subset of
// a larger value.
- return CastRetrievedVal(getBindingForElement(B, ER), ER, T);
+ return svalBuilder.evalCast(getBindingForElement(B, ER), T, QualType{});
}
if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
@@ -1497,7 +1496,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// reinterpretted, it is possible we stored a different value that could
// fit within the ivar. Either we need to cast these when storing them
// or reinterpret them lazily (as we do here).
- return CastRetrievedVal(getBindingForObjCIvar(B, IVR), IVR, T);
+ return svalBuilder.evalCast(getBindingForObjCIvar(B, IVR), T, QualType{});
}
if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
@@ -1507,7 +1506,7 @@ SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T)
// variable is reinterpretted, it is possible we stored a different value
// that could fit within the variable. Either we need to cast these when
// storing them or reinterpret them lazily (as we do here).
- return CastRetrievedVal(getBindingForVar(B, VR), VR, T);
+ return svalBuilder.evalCast(getBindingForVar(B, VR), T, QualType{});
}
const SVal *V = B.lookup(R, BindingKey::Direct);
diff --git a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index 72b8ada1dfab..b459b5adb511 100644
--- a/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -107,7 +107,7 @@ SVal SValBuilder::convertToArrayIndex(SVal val) {
return val;
}
- return evalCastFromNonLoc(val.castAs<NonLoc>(), ArrayIndexTy);
+ return evalCast(val, ArrayIndexTy, QualType{});
}
nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
@@ -192,12 +192,19 @@ SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
const LocationContext *LCtx,
unsigned VisitCount) {
QualType T = E->getType();
- assert(Loc::isLocType(T));
- assert(SymbolManager::canSymbolicate(T));
- if (T->isNullPtrType())
- return makeZeroVal(T);
+ return getConjuredHeapSymbolVal(E, LCtx, T, VisitCount);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredHeapSymbolVal(const Expr *E,
+ const LocationContext *LCtx,
+ QualType type, unsigned VisitCount) {
+ assert(Loc::isLocType(type));
+ assert(SymbolManager::canSymbolicate(type));
+ if (type->isNullPtrType())
+ return makeZeroVal(type);
- SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, T, VisitCount);
+ SymbolRef sym = SymMgr.conjureSymbol(E, LCtx, type, VisitCount);
return loc::MemRegionVal(MemMgr.getSymbolicHeapRegion(sym));
}
@@ -268,6 +275,13 @@ DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
return loc::MemRegionVal(BD);
}
+Optional<loc::MemRegionVal>
+SValBuilder::getCastedMemRegionVal(const MemRegion *R, QualType Ty) {
+ if (auto OptR = StateMgr.getStoreManager().castRegion(R, Ty))
+ return loc::MemRegionVal(*OptR);
+ return None;
+}
+
/// Return a memory region for the 'this' object reference.
loc::MemRegionVal SValBuilder::getCXXThis(const CXXMethodDecl *D,
const StackFrameContext *SFC) {
@@ -423,6 +437,14 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
return UnknownVal();
}
+ if (op == BinaryOperatorKind::BO_Cmp) {
+ // We can't reason about C++20 spaceship operator yet.
+ //
+ // FIXME: Support C++20 spaceship operator.
+ // The main problem here is that the result is not integer.
+ return UnknownVal();
+ }
+
if (Optional<Loc> LV = lhs.getAs<Loc>()) {
if (Optional<Loc> RV = rhs.getAs<Loc>())
return evalBinOpLL(state, op, *LV, *RV, type);
@@ -530,108 +552,272 @@ SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
return evalCast(val, castTy, originalTy);
}
-// FIXME: should rewrite according to the cast kind.
-SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
- castTy = Context.getCanonicalType(castTy);
- originalTy = Context.getCanonicalType(originalTy);
- if (val.isUnknownOrUndef() || castTy == originalTy)
- return val;
+//===----------------------------------------------------------------------===//
+// Cast methods.
+// `evalCast` is the main method
+// `evalCastKind` and `evalCastSubKind` are helpers
+//===----------------------------------------------------------------------===//
- if (castTy->isBooleanType()) {
- if (val.isUnknownOrUndef())
- return val;
- if (val.isConstant())
- return makeTruthVal(!val.isZeroConstant(), castTy);
- if (!Loc::isLocType(originalTy) &&
- !originalTy->isIntegralOrEnumerationType() &&
- !originalTy->isMemberPointerType())
- return UnknownVal();
- if (SymbolRef Sym = val.getAsSymbol(true)) {
- BasicValueFactory &BVF = getBasicValueFactory();
- // FIXME: If we had a state here, we could see if the symbol is known to
- // be zero, but we don't.
- return makeNonLoc(Sym, BO_NE, BVF.getValue(0, Sym->getType()), castTy);
- }
- // Loc values are not always true, they could be weakly linked functions.
- if (Optional<Loc> L = val.getAs<Loc>())
- return evalCastFromLoc(*L, castTy);
+/// Cast a given SVal to another SVal using given QualType's.
+/// \param V -- SVal that should be casted.
+/// \param CastTy -- QualType that V should be casted according to.
+/// \param OriginalTy -- QualType which is associated to V. It provides
+/// additional information about what type the cast performs from.
+/// \returns the most appropriate casted SVal.
+/// Note: Many cases don't use an exact OriginalTy. It can be extracted
+/// from SVal or the cast can performs unconditionaly. Always pass OriginalTy!
+/// It can be crucial in certain cases and generates different results.
+/// FIXME: If `OriginalTy.isNull()` is true, then cast performs based on CastTy
+/// only. This behavior is uncertain and should be improved.
+SVal SValBuilder::evalCast(SVal V, QualType CastTy, QualType OriginalTy) {
+ if (CastTy.isNull())
+ return V;
+
+ CastTy = Context.getCanonicalType(CastTy);
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ if (!IsUnknownOriginalType) {
+ OriginalTy = Context.getCanonicalType(OriginalTy);
+
+ if (CastTy == OriginalTy)
+ return V;
+
+ // FIXME: Move this check to the most appropriate
+ // evalCastKind/evalCastSubKind function. For const casts, casts to void,
+ // just propagate the value.
+ if (!CastTy->isVariableArrayType() && !OriginalTy->isVariableArrayType())
+ if (shouldBeModeledWithNoOp(Context, Context.getPointerType(CastTy),
+ Context.getPointerType(OriginalTy)))
+ return V;
+ }
- Loc L = val.castAs<nonloc::LocAsInteger>().getLoc();
- return evalCastFromLoc(L, castTy);
+ // Cast SVal according to kinds.
+ switch (V.getBaseKind()) {
+ case SVal::UndefinedValKind:
+ return evalCastKind(V.castAs<UndefinedVal>(), CastTy, OriginalTy);
+ case SVal::UnknownValKind:
+ return evalCastKind(V.castAs<UnknownVal>(), CastTy, OriginalTy);
+ case SVal::LocKind:
+ return evalCastKind(V.castAs<Loc>(), CastTy, OriginalTy);
+ case SVal::NonLocKind:
+ return evalCastKind(V.castAs<NonLoc>(), CastTy, OriginalTy);
}
- // For const casts, casts to void, just propagate the value.
- if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
- if (shouldBeModeledWithNoOp(Context, Context.getPointerType(castTy),
- Context.getPointerType(originalTy)))
- return val;
+ llvm_unreachable("Unknown SVal kind");
+}
- // Check for casts from pointers to integers.
- if (castTy->isIntegralOrEnumerationType() && Loc::isLocType(originalTy))
- return evalCastFromLoc(val.castAs<Loc>(), castTy);
-
- // Check for casts from integers to pointers.
- if (Loc::isLocType(castTy) && originalTy->isIntegralOrEnumerationType()) {
- if (Optional<nonloc::LocAsInteger> LV = val.getAs<nonloc::LocAsInteger>()) {
- if (const MemRegion *R = LV->getLoc().getAsRegion()) {
- StoreManager &storeMgr = StateMgr.getStoreManager();
- R = storeMgr.castRegion(R, castTy);
- return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
- }
- return LV->getLoc();
- }
- return dispatchCast(val, castTy);
+SVal SValBuilder::evalCastKind(UndefinedVal V, QualType CastTy,
+ QualType OriginalTy) {
+ return V;
+}
+
+SVal SValBuilder::evalCastKind(UnknownVal V, QualType CastTy,
+ QualType OriginalTy) {
+ return V;
+}
+
+SVal SValBuilder::evalCastKind(Loc V, QualType CastTy, QualType OriginalTy) {
+ switch (V.getSubKind()) {
+ case loc::ConcreteIntKind:
+ return evalCastSubKind(V.castAs<loc::ConcreteInt>(), CastTy, OriginalTy);
+ case loc::GotoLabelKind:
+ return evalCastSubKind(V.castAs<loc::GotoLabel>(), CastTy, OriginalTy);
+ case loc::MemRegionValKind:
+ return evalCastSubKind(V.castAs<loc::MemRegionVal>(), CastTy, OriginalTy);
}
- // Just pass through function and block pointers.
- if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
- assert(Loc::isLocType(castTy));
- return val;
+ llvm_unreachable("Unknown SVal kind");
+}
+
+SVal SValBuilder::evalCastKind(NonLoc V, QualType CastTy, QualType OriginalTy) {
+ switch (V.getSubKind()) {
+ case nonloc::CompoundValKind:
+ return evalCastSubKind(V.castAs<nonloc::CompoundVal>(), CastTy, OriginalTy);
+ case nonloc::ConcreteIntKind:
+ return evalCastSubKind(V.castAs<nonloc::ConcreteInt>(), CastTy, OriginalTy);
+ case nonloc::LazyCompoundValKind:
+ return evalCastSubKind(V.castAs<nonloc::LazyCompoundVal>(), CastTy,
+ OriginalTy);
+ case nonloc::LocAsIntegerKind:
+ return evalCastSubKind(V.castAs<nonloc::LocAsInteger>(), CastTy,
+ OriginalTy);
+ case nonloc::SymbolValKind:
+ return evalCastSubKind(V.castAs<nonloc::SymbolVal>(), CastTy, OriginalTy);
+ case nonloc::PointerToMemberKind:
+ return evalCastSubKind(V.castAs<nonloc::PointerToMember>(), CastTy,
+ OriginalTy);
}
- // Check for casts from array type to another type.
- if (const auto *arrayT =
- dyn_cast<ArrayType>(originalTy.getCanonicalType())) {
- // We will always decay to a pointer.
- QualType elemTy = arrayT->getElementType();
- val = StateMgr.ArrayToPointer(val.castAs<Loc>(), elemTy);
+ llvm_unreachable("Unknown SVal kind");
+}
- // Are we casting from an array to a pointer? If so just pass on
- // the decayed value.
- if (castTy->isPointerType() || castTy->isReferenceType())
- return val;
+SVal SValBuilder::evalCastSubKind(loc::ConcreteInt V, QualType CastTy,
+ QualType OriginalTy) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType())
+ return makeTruthVal(V.getValue().getBoolValue(), CastTy);
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ llvm::APSInt Value = V.getValue();
+ BasicVals.getAPSIntType(CastTy).apply(Value);
+ return makeIntVal(Value);
+ }
+
+ // Pointer to any pointer.
+ if (Loc::isLocType(CastTy))
+ return V;
+
+ // Pointer to whatever else.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(loc::GotoLabel V, QualType CastTy,
+ QualType OriginalTy) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType())
+ // Labels are always true.
+ return makeTruthVal(true, CastTy);
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ const unsigned BitWidth = Context.getIntWidth(CastTy);
+ return makeLocAsInteger(V, BitWidth);
+ }
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ if (!IsUnknownOriginalType) {
+ // Array to pointer.
+ if (isa<ArrayType>(OriginalTy))
+ if (CastTy->isPointerType() || CastTy->isReferenceType())
+ return UnknownVal();
+ }
+
+ // Pointer to any pointer.
+ if (Loc::isLocType(CastTy))
+ return V;
+
+ // Pointer to whatever else.
+ return UnknownVal();
+}
- // Are we casting from an array to an integer? If so, cast the decayed
- // pointer value to an integer.
- assert(castTy->isIntegralOrEnumerationType());
+static bool hasSameUnqualifiedPointeeType(QualType ty1, QualType ty2) {
+ return ty1->getPointeeType().getCanonicalType().getTypePtr() ==
+ ty2->getPointeeType().getCanonicalType().getTypePtr();
+}
+
+SVal SValBuilder::evalCastSubKind(loc::MemRegionVal V, QualType CastTy,
+ QualType OriginalTy) {
+ // Pointer to bool.
+ if (CastTy->isBooleanType()) {
+ const MemRegion *R = V.getRegion();
+ if (const FunctionCodeRegion *FTR = dyn_cast<FunctionCodeRegion>(R))
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FTR->getDecl()))
+ if (FD->isWeak())
+ // FIXME: Currently we are using an extent symbol here,
+ // because there are no generic region address metadata
+ // symbols to use, only content metadata.
+ return nonloc::SymbolVal(SymMgr.getExtentSymbol(FTR));
+
+ if (const SymbolicRegion *SymR = R->getSymbolicBase()) {
+ SymbolRef Sym = SymR->getSymbol();
+ QualType Ty = Sym->getType();
+ // This change is needed for architectures with varying
+ // pointer widths. See the amdgcn opencl reproducer with
+ // this change as an example: solver-sym-simplification-ptr-bool.cl
+ // FIXME: We could encounter a reference here,
+ // try returning a concrete 'true' since it might
+ // be easier on the solver.
+ // FIXME: Cleanup remainder of `getZeroWithPtrWidth ()`
+ // and `getIntWithPtrWidth()` functions to prevent future
+ // confusion
+ const llvm::APSInt &Zero = Ty->isReferenceType()
+ ? BasicVals.getZeroWithPtrWidth()
+ : BasicVals.getZeroWithTypeSize(Ty);
+ return makeNonLoc(Sym, BO_NE, Zero, CastTy);
+ }
+ // Non-symbolic memory regions are always true.
+ return makeTruthVal(true, CastTy);
+ }
- // FIXME: Keep these here for now in case we decide soon that we
- // need the original decayed type.
- // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
- // QualType pointerTy = C.getPointerType(elemTy);
- return evalCastFromLoc(val.castAs<Loc>(), castTy);
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Try to cast to array
+ const auto *ArrayTy =
+ IsUnknownOriginalType
+ ? nullptr
+ : dyn_cast<ArrayType>(OriginalTy.getCanonicalType());
+
+ // Pointer to integer.
+ if (CastTy->isIntegralOrEnumerationType()) {
+ SVal Val = V;
+ // Array to integer.
+ if (ArrayTy) {
+ // We will always decay to a pointer.
+ QualType ElemTy = ArrayTy->getElementType();
+ Val = StateMgr.ArrayToPointer(V, ElemTy);
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ }
+ const unsigned BitWidth = Context.getIntWidth(CastTy);
+ return makeLocAsInteger(Val.castAs<Loc>(), BitWidth);
}
- // Check for casts from a region to a specific type.
- if (const MemRegion *R = val.getAsRegion()) {
- // Handle other casts of locations to integers.
- if (castTy->isIntegralOrEnumerationType())
- return evalCastFromLoc(loc::MemRegionVal(R), castTy);
-
- // FIXME: We should handle the case where we strip off view layers to get
- // to a desugared type.
- if (!Loc::isLocType(castTy)) {
- // FIXME: There can be gross cases where one casts the result of a function
- // (that returns a pointer) to some other value that happens to fit
- // within that pointer value. We currently have no good way to
- // model such operations. When this happens, the underlying operation
- // is that the caller is reasoning about bits. Conceptually we are
- // layering a "view" of a location on top of those bits. Perhaps
- // we need to be more lazy about mutual possible views, even on an
- // SVal? This may be necessary for bit-level reasoning as well.
- return UnknownVal();
+ // Pointer to pointer.
+ if (Loc::isLocType(CastTy)) {
+
+ if (IsUnknownOriginalType) {
+ // When retrieving symbolic pointer and expecting a non-void pointer,
+ // wrap them into element regions of the expected type if necessary.
+ // It is necessary to make sure that the retrieved value makes sense,
+ // because there's no other cast in the AST that would tell us to cast
+ // it to the correct pointer type. We might need to do that for non-void
+ // pointers as well.
+ // FIXME: We really need a single good function to perform casts for us
+ // correctly every time we need it.
+ const MemRegion *R = V.getRegion();
+ if (CastTy->isPointerType() && !CastTy->isVoidPointerType()) {
+ if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
+ QualType SRTy = SR->getSymbol()->getType();
+ if (!hasSameUnqualifiedPointeeType(SRTy, CastTy)) {
+ if (auto OptMemRegV = getCastedMemRegionVal(SR, CastTy))
+ return *OptMemRegV;
+ }
+ }
+ }
+ // Next fixes pointer dereference using type different from its initial
+ // one. See PR37503 and PR49007 for details.
+ if (const auto *ER = dyn_cast<ElementRegion>(R)) {
+ if (auto OptMemRegV = getCastedMemRegionVal(ER, CastTy))
+ return *OptMemRegV;
+ }
+
+ return V;
+ }
+
+ if (OriginalTy->isIntegralOrEnumerationType() ||
+ OriginalTy->isBlockPointerType() || OriginalTy->isFunctionPointerType())
+ return V;
+
+ // Array to pointer.
+ if (ArrayTy) {
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (CastTy->isPointerType() || CastTy->isReferenceType()) {
+ // We will always decay to a pointer.
+ QualType ElemTy = ArrayTy->getElementType();
+ return StateMgr.ArrayToPointer(V, ElemTy);
+ }
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(CastTy->isIntegralOrEnumerationType());
}
+ // Other pointer to pointer.
+ assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
+ CastTy->isReferenceType());
+
// We get a symbolic function pointer for a dereference of a function
// pointer, but it is of function type. Example:
@@ -647,17 +833,161 @@ SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
// return bar(x)+1; // no-warning
// }
- assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
- originalTy->isBlockPointerType() || castTy->isReferenceType());
+ // Get the result of casting a region to a different type.
+ const MemRegion *R = V.getRegion();
+ if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
+ return *OptMemRegV;
+ }
+
+ // Pointer to whatever else.
+ // FIXME: There can be gross cases where one casts the result of a
+ // function (that returns a pointer) to some other value that happens to
+ // fit within that pointer value. We currently have no good way to model
+ // such operations. When this happens, the underlying operation is that
+ // the caller is reasoning about bits. Conceptually we are layering a
+ // "view" of a location on top of those bits. Perhaps we need to be more
+ // lazy about mutual possible views, even on an SVal? This may be
+ // necessary for bit-level reasoning as well.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(nonloc::CompoundVal V, QualType CastTy,
+ QualType OriginalTy) {
+ // Compound to whatever.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(nonloc::ConcreteInt V, QualType CastTy,
+ QualType OriginalTy) {
+ auto CastedValue = [V, CastTy, this]() {
+ llvm::APSInt Value = V.getValue();
+ BasicVals.getAPSIntType(CastTy).apply(Value);
+ return Value;
+ };
+
+ // Integer to bool.
+ if (CastTy->isBooleanType())
+ return makeTruthVal(V.getValue().getBoolValue(), CastTy);
+
+ // Integer to pointer.
+ if (CastTy->isIntegralOrEnumerationType())
+ return makeIntVal(CastedValue());
+
+ // Integer to pointer.
+ if (Loc::isLocType(CastTy))
+ return makeIntLocVal(CastedValue());
+
+ // Pointer to whatever else.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(nonloc::LazyCompoundVal V, QualType CastTy,
+ QualType OriginalTy) {
+ // Compound to whatever.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(nonloc::LocAsInteger V, QualType CastTy,
+ QualType OriginalTy) {
+ Loc L = V.getLoc();
+
+ // Pointer as integer to bool.
+ if (CastTy->isBooleanType())
+ // Pass to Loc function.
+ return evalCastKind(L, CastTy, OriginalTy);
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Pointer as integer to pointer.
+ if (!IsUnknownOriginalType && Loc::isLocType(CastTy) &&
+ OriginalTy->isIntegralOrEnumerationType()) {
+ if (const MemRegion *R = L.getAsRegion())
+ if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
+ return *OptMemRegV;
+ return L;
+ }
+
+ // Pointer as integer with region to integer/pointer.
+ const MemRegion *R = L.getAsRegion();
+ if (!IsUnknownOriginalType && R) {
+ if (CastTy->isIntegralOrEnumerationType())
+ return evalCastSubKind(loc::MemRegionVal(R), CastTy, OriginalTy);
+
+ if (Loc::isLocType(CastTy)) {
+ assert(Loc::isLocType(OriginalTy) || OriginalTy->isFunctionType() ||
+ CastTy->isReferenceType());
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // Evaluates to UnknownVal.
+ if (auto OptMemRegV = getCastedMemRegionVal(R, CastTy))
+ return *OptMemRegV;
+ }
+ } else {
+ if (Loc::isLocType(CastTy)) {
+ if (IsUnknownOriginalType)
+ return evalCastSubKind(loc::MemRegionVal(R), CastTy, OriginalTy);
+ return L;
+ }
+
+ SymbolRef SE = nullptr;
+ if (R) {
+ if (const SymbolicRegion *SR =
+ dyn_cast<SymbolicRegion>(R->StripCasts())) {
+ SE = SR->getSymbol();
+ }
+ }
+
+ if (!CastTy->isFloatingType() || !SE || SE->getType()->isFloatingType()) {
+ // FIXME: Correctly support promotions/truncations.
+ const unsigned CastSize = Context.getIntWidth(CastTy);
+ if (CastSize == V.getNumBits())
+ return V;
+
+ return makeLocAsInteger(L, CastSize);
+ }
+ }
- StoreManager &storeMgr = StateMgr.getStoreManager();
+ // Pointer as integer to whatever else.
+ return UnknownVal();
+}
- // Delegate to store manager to get the result of casting a region to a
- // different type. If the MemRegion* returned is NULL, this expression
- // Evaluates to UnknownVal.
- R = storeMgr.castRegion(R, castTy);
- return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+SVal SValBuilder::evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
+ QualType OriginalTy) {
+ SymbolRef SE = V.getSymbol();
+
+ const bool IsUnknownOriginalType = OriginalTy.isNull();
+ // Symbol to bool.
+ if (!IsUnknownOriginalType && CastTy->isBooleanType()) {
+ // Non-float to bool.
+ if (Loc::isLocType(OriginalTy) ||
+ OriginalTy->isIntegralOrEnumerationType() ||
+ OriginalTy->isMemberPointerType()) {
+ BasicValueFactory &BVF = getBasicValueFactory();
+ return makeNonLoc(SE, BO_NE, BVF.getValue(0, SE->getType()), CastTy);
+ }
+ } else {
+ // Symbol to integer, float.
+ QualType T = Context.getCanonicalType(SE->getType());
+ // If types are the same or both are integers, ignore the cast.
+ // FIXME: Remove this hack when we support symbolic truncation/extension.
+ // HACK: If both castTy and T are integers, ignore the cast. This is
+ // not a permanent solution. Eventually we want to precisely handle
+ // extension/truncation of symbolic integers. This prevents us from losing
+ // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+ // different integer types.
+ if (haveSameType(T, CastTy))
+ return V;
+ if (!Loc::isLocType(CastTy))
+ if (!IsUnknownOriginalType || !CastTy->isFloatingType() ||
+ T->isFloatingType())
+ return makeNonLoc(SE, T, CastTy);
}
- return dispatchCast(val, castTy);
+ // Symbol to pointer and whatever else.
+ return UnknownVal();
+}
+
+SVal SValBuilder::evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
+ QualType OriginalTy) {
+ // Member pointer to whatever.
+ return V;
}
diff --git a/clang/lib/StaticAnalyzer/Core/SVals.cpp b/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 252596887e4f..117546e43b1a 100644
--- a/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
@@ -21,6 +22,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValVisitor.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymExpr.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
#include "llvm/ADT/Optional.h"
@@ -136,6 +138,63 @@ const MemRegion *SVal::getAsRegion() const {
return nullptr;
}
+namespace {
+class TypeRetrievingVisitor
+ : public FullSValVisitor<TypeRetrievingVisitor, QualType> {
+private:
+ const ASTContext &Context;
+
+public:
+ TypeRetrievingVisitor(const ASTContext &Context) : Context(Context) {}
+
+ QualType VisitLocMemRegionVal(loc::MemRegionVal MRV) {
+ return Visit(MRV.getRegion());
+ }
+ QualType VisitLocGotoLabel(loc::GotoLabel GL) {
+ return QualType{Context.VoidPtrTy};
+ }
+ template <class ConcreteInt> QualType VisitConcreteInt(ConcreteInt CI) {
+ const llvm::APSInt &Value = CI.getValue();
+ return Context.getIntTypeForBitwidth(Value.getBitWidth(), Value.isSigned());
+ }
+ QualType VisitLocConcreteInt(loc::ConcreteInt CI) {
+ return VisitConcreteInt(CI);
+ }
+ QualType VisitNonLocConcreteInt(nonloc::ConcreteInt CI) {
+ return VisitConcreteInt(CI);
+ }
+ QualType VisitNonLocLocAsInteger(nonloc::LocAsInteger LI) {
+ QualType NestedType = Visit(LI.getLoc());
+ if (NestedType.isNull())
+ return NestedType;
+
+ return Context.getIntTypeForBitwidth(LI.getNumBits(),
+ NestedType->isSignedIntegerType());
+ }
+ QualType VisitNonLocCompoundVal(nonloc::CompoundVal CV) {
+ return CV.getValue()->getType();
+ }
+ QualType VisitNonLocLazyCompoundVal(nonloc::LazyCompoundVal LCV) {
+ return LCV.getRegion()->getValueType();
+ }
+ QualType VisitNonLocSymbolVal(nonloc::SymbolVal SV) {
+ return Visit(SV.getSymbol());
+ }
+ QualType VisitSymbolicRegion(const SymbolicRegion *SR) {
+ return Visit(SR->getSymbol());
+ }
+ QualType VisitTypedRegion(const TypedRegion *TR) {
+ return TR->getLocationType();
+ }
+ QualType VisitSymExpr(const SymExpr *SE) { return SE->getType(); }
+};
+} // end anonymous namespace
+
+QualType SVal::getType(const ASTContext &Context) const {
+ TypeRetrievingVisitor TRV{Context};
+ return TRV.Visit(*this);
+}
+
const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
const MemRegion *R = getRegion();
return R ? R->StripCasts(StripBaseCasts) : nullptr;
diff --git a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index f93d04ccd61a..e1319a4c2e41 100644
--- a/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/Version.h"
@@ -48,7 +49,8 @@ public:
void ento::createSarifDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
const std::string &Output, const Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
// TODO: Emit an error here.
if (Output.empty())
@@ -56,7 +58,7 @@ void ento::createSarifDiagnosticConsumer(
C.push_back(new SarifDiagnostics(Output, PP.getLangOpts()));
createTextMinimalPathDiagnosticConsumer(std::move(DiagOpts), C, Output, PP,
- CTU);
+ CTU, MacroExpansions);
}
static StringRef getFileName(const FileEntry &FE) {
@@ -385,7 +387,7 @@ void SarifDiagnostics::FlushDiagnosticsImpl(
// file can become large very quickly, so decoding into JSON to append a run
// may be an expensive operation.
std::error_code EC;
- llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_Text);
+ llvm::raw_fd_ostream OS(OutputFile, EC, llvm::sys::fs::OF_TextWithCRLF);
if (EC) {
llvm::errs() << "warning: could not create file: " << EC.message() << '\n';
return;
diff --git a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index facadaf1225f..e57d92fbcebb 100644
--- a/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -22,11 +22,6 @@ using namespace ento;
namespace {
class SimpleSValBuilder : public SValBuilder {
-protected:
- SVal dispatchCast(SVal val, QualType castTy) override;
- SVal evalCastFromNonLoc(NonLoc val, QualType castTy) override;
- SVal evalCastFromLoc(Loc val, QualType castTy) override;
-
public:
SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
ProgramStateManager &stateMgr)
@@ -62,133 +57,6 @@ SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
}
//===----------------------------------------------------------------------===//
-// Transfer function for Casts.
-//===----------------------------------------------------------------------===//
-
-SVal SimpleSValBuilder::dispatchCast(SVal Val, QualType CastTy) {
- assert(Val.getAs<Loc>() || Val.getAs<NonLoc>());
- return Val.getAs<Loc>() ? evalCastFromLoc(Val.castAs<Loc>(), CastTy)
- : evalCastFromNonLoc(Val.castAs<NonLoc>(), CastTy);
-}
-
-SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
- bool isLocType = Loc::isLocType(castTy);
- if (val.getAs<nonloc::PointerToMember>())
- return val;
-
- if (Optional<nonloc::LocAsInteger> LI = val.getAs<nonloc::LocAsInteger>()) {
- if (isLocType)
- return LI->getLoc();
- // FIXME: Correctly support promotions/truncations.
- unsigned castSize = Context.getIntWidth(castTy);
- if (castSize == LI->getNumBits())
- return val;
- return makeLocAsInteger(LI->getLoc(), castSize);
- }
-
- if (SymbolRef se = val.getAsSymbol()) {
- QualType T = Context.getCanonicalType(se->getType());
- // If types are the same or both are integers, ignore the cast.
- // FIXME: Remove this hack when we support symbolic truncation/extension.
- // HACK: If both castTy and T are integers, ignore the cast. This is
- // not a permanent solution. Eventually we want to precisely handle
- // extension/truncation of symbolic integers. This prevents us from losing
- // precision when we assign 'x = y' and 'y' is symbolic and x and y are
- // different integer types.
- if (haveSameType(T, castTy))
- return val;
-
- if (!isLocType)
- return makeNonLoc(se, T, castTy);
- return UnknownVal();
- }
-
- // If value is a non-integer constant, produce unknown.
- if (!val.getAs<nonloc::ConcreteInt>())
- return UnknownVal();
-
- // Handle casts to a boolean type.
- if (castTy->isBooleanType()) {
- bool b = val.castAs<nonloc::ConcreteInt>().getValue().getBoolValue();
- return makeTruthVal(b, castTy);
- }
-
- // Only handle casts from integers to integers - if val is an integer constant
- // being cast to a non-integer type, produce unknown.
- if (!isLocType && !castTy->isIntegralOrEnumerationType())
- return UnknownVal();
-
- llvm::APSInt i = val.castAs<nonloc::ConcreteInt>().getValue();
- BasicVals.getAPSIntType(castTy).apply(i);
-
- if (isLocType)
- return makeIntLocVal(i);
- else
- return makeIntVal(i);
-}
-
-SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
-
- // Casts from pointers -> pointers, just return the lval.
- //
- // Casts from pointers -> references, just return the lval. These
- // can be introduced by the frontend for corner cases, e.g
- // casting from va_list* to __builtin_va_list&.
- //
- if (Loc::isLocType(castTy) || castTy->isReferenceType())
- return val;
-
- // FIXME: Handle transparent unions where a value can be "transparently"
- // lifted into a union type.
- if (castTy->isUnionType())
- return UnknownVal();
-
- // Casting a Loc to a bool will almost always be true,
- // unless this is a weak function or a symbolic region.
- if (castTy->isBooleanType()) {
- switch (val.getSubKind()) {
- case loc::MemRegionValKind: {
- const MemRegion *R = val.castAs<loc::MemRegionVal>().getRegion();
- if (const FunctionCodeRegion *FTR = dyn_cast<FunctionCodeRegion>(R))
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(FTR->getDecl()))
- if (FD->isWeak())
- // FIXME: Currently we are using an extent symbol here,
- // because there are no generic region address metadata
- // symbols to use, only content metadata.
- return nonloc::SymbolVal(SymMgr.getExtentSymbol(FTR));
-
- if (const SymbolicRegion *SymR = R->getSymbolicBase())
- return makeNonLoc(SymR->getSymbol(), BO_NE,
- BasicVals.getZeroWithPtrWidth(), castTy);
-
- // FALL-THROUGH
- LLVM_FALLTHROUGH;
- }
-
- case loc::GotoLabelKind:
- // Labels and non-symbolic memory regions are always true.
- return makeTruthVal(true, castTy);
- }
- }
-
- if (castTy->isIntegralOrEnumerationType()) {
- unsigned BitWidth = Context.getIntWidth(castTy);
-
- if (!val.getAs<loc::ConcreteInt>())
- return makeLocAsInteger(val, BitWidth);
-
- llvm::APSInt i = val.castAs<loc::ConcreteInt>().getValue();
- BasicVals.getAPSIntType(castTy).apply(i);
- return makeIntVal(i);
- }
-
- // All other cases: return 'UnknownVal'. This includes casting pointers
- // to floats, which is probably badness it itself, but this is a good
- // intermediate solution until we do something better.
- return UnknownVal();
-}
-
-//===----------------------------------------------------------------------===//
// Transfer function for unary operators.
//===----------------------------------------------------------------------===//
@@ -276,10 +144,10 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
}
// Idempotent ops (like a*1) can still change the type of an expression.
- // Wrap the LHS up in a NonLoc again and let evalCastFromNonLoc do the
+ // Wrap the LHS up in a NonLoc again and let evalCast do the
// dirty work.
if (isIdempotent)
- return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
+ return evalCast(nonloc::SymbolVal(LHS), resultTy, QualType{});
// If we reach this point, the expression cannot be simplified.
// Make a SymbolVal for the entire expression, after converting the RHS.
@@ -525,10 +393,11 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_Sub:
if (resultTy->isIntegralOrEnumerationType())
return makeIntVal(0, resultTy);
- return evalCastFromNonLoc(makeIntVal(0, /*isUnsigned=*/false), resultTy);
+ return evalCast(makeIntVal(0, /*isUnsigned=*/false), resultTy,
+ QualType{});
case BO_Or:
case BO_And:
- return evalCastFromNonLoc(lhs, resultTy);
+ return evalCast(lhs, resultTy, QualType{});
}
while (1) {
@@ -645,13 +514,15 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
case BO_Shr:
// (~0)>>a
if (LHSValue.isAllOnesValue() && LHSValue.isSigned())
- return evalCastFromNonLoc(lhs, resultTy);
+ return evalCast(lhs, resultTy, QualType{});
LLVM_FALLTHROUGH;
case BO_Shl:
// 0<<a and 0>>a
if (LHSValue == 0)
- return evalCastFromNonLoc(lhs, resultTy);
+ return evalCast(lhs, resultTy, QualType{});
return makeSymExprValNN(op, InputLHS, InputRHS, resultTy);
+ case BO_Div:
+ // 0 / x == 0
case BO_Rem:
// 0 % x == 0
if (LHSValue == 0)
@@ -865,7 +736,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
default:
break;
case BO_Sub:
- return evalCastFromLoc(lhs, resultTy);
+ return evalCast(lhs, resultTy, QualType{});
case BO_EQ:
case BO_LE:
case BO_LT:
@@ -902,7 +773,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
SVal ResultVal =
lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
- return evalCastFromNonLoc(*Result, resultTy);
+ return evalCast(*Result, resultTy, QualType{});
assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
return UnknownVal();
@@ -947,11 +818,11 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// to be non-NULL.
if (rInt->isZeroConstant()) {
if (op == BO_Sub)
- return evalCastFromLoc(lhs, resultTy);
+ return evalCast(lhs, resultTy, QualType{});
if (BinaryOperator::isComparisonOp(op)) {
QualType boolType = getContext().BoolTy;
- NonLoc l = evalCastFromLoc(lhs, boolType).castAs<NonLoc>();
+ NonLoc l = evalCast(lhs, boolType, QualType{}).castAs<NonLoc>();
NonLoc r = makeTruthVal(false, boolType).castAs<NonLoc>();
return evalBinOpNN(state, op, l, r, resultTy);
}
@@ -1033,7 +904,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
Optional<NonLoc> LeftIndex = LeftIndexVal.getAs<NonLoc>();
if (!LeftIndex)
return UnknownVal();
- LeftIndexVal = evalCastFromNonLoc(*LeftIndex, ArrayIndexTy);
+ LeftIndexVal = evalCast(*LeftIndex, ArrayIndexTy, QualType{});
LeftIndex = LeftIndexVal.getAs<NonLoc>();
if (!LeftIndex)
return UnknownVal();
@@ -1043,7 +914,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
Optional<NonLoc> RightIndex = RightIndexVal.getAs<NonLoc>();
if (!RightIndex)
return UnknownVal();
- RightIndexVal = evalCastFromNonLoc(*RightIndex, ArrayIndexTy);
+ RightIndexVal = evalCast(*RightIndex, ArrayIndexTy, QualType{});
RightIndex = RightIndexVal.getAs<NonLoc>();
if (!RightIndex)
return UnknownVal();
diff --git a/clang/lib/StaticAnalyzer/Core/Store.cpp b/clang/lib/StaticAnalyzer/Core/Store.cpp
index ea617bbeeba1..b867b0746f90 100644
--- a/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -71,7 +71,8 @@ const ElementRegion *StoreManager::GetElementZeroRegion(const SubRegion *R,
return MRMgr.getElementRegion(T, idx, R, Ctx);
}
-const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
+Optional<const MemRegion *> StoreManager::castRegion(const MemRegion *R,
+ QualType CastToTy) {
ASTContext &Ctx = StateMgr.getContext();
// Handle casts to Objective-C objects.
@@ -88,7 +89,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
// We don't know what to make of it. Return a NULL region, which
// will be interpreted as UnknownVal.
- return nullptr;
+ return None;
}
// Now assume we are casting from pointer to pointer. Other cases should
@@ -168,7 +169,7 @@ const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy)
// If we cannot compute a raw offset, throw up our hands and return
// a NULL MemRegion*.
if (!baseR)
- return nullptr;
+ return None;
CharUnits off = rawOff.getOffset();
@@ -394,48 +395,6 @@ SVal StoreManager::attemptDownCast(SVal Base, QualType TargetType,
return UnknownVal();
}
-static bool hasSameUnqualifiedPointeeType(QualType ty1, QualType ty2) {
- return ty1->getPointeeType().getCanonicalType().getTypePtr() ==
- ty2->getPointeeType().getCanonicalType().getTypePtr();
-}
-
-/// CastRetrievedVal - Used by subclasses of StoreManager to implement
-/// implicit casts that arise from loads from regions that are reinterpreted
-/// as another region.
-SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
- QualType castTy) {
- if (castTy.isNull() || V.isUnknownOrUndef())
- return V;
-
- // The dispatchCast() call below would convert the int into a float.
- // What we want, however, is a bit-by-bit reinterpretation of the int
- // as a float, which usually yields nothing garbage. For now skip casts
- // from ints to floats.
- // TODO: What other combinations of types are affected?
- if (castTy->isFloatingType()) {
- SymbolRef Sym = V.getAsSymbol();
- if (Sym && !Sym->getType()->isFloatingType())
- return UnknownVal();
- }
-
- // When retrieving symbolic pointer and expecting a non-void pointer,
- // wrap them into element regions of the expected type if necessary.
- // SValBuilder::dispatchCast() doesn't do that, but it is necessary to
- // make sure that the retrieved value makes sense, because there's no other
- // cast in the AST that would tell us to cast it to the correct pointer type.
- // We might need to do that for non-void pointers as well.
- // FIXME: We really need a single good function to perform casts for us
- // correctly every time we need it.
- if (castTy->isPointerType() && !castTy->isVoidPointerType())
- if (const auto *SR = dyn_cast_or_null<SymbolicRegion>(V.getAsRegion())) {
- QualType sr = SR->getSymbol()->getType();
- if (!hasSameUnqualifiedPointeeType(sr, castTy))
- return loc::MemRegionVal(castRegion(SR, castTy));
- }
-
- return svalBuilder.dispatchCast(V, castTy);
-}
-
SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
diff --git a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
index ae2bad7ee77c..4f3be7cae331 100644
--- a/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
+++ b/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -10,6 +10,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/Version.h"
@@ -138,8 +139,9 @@ public:
void ento::createTextPathDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
- const std::string &Prefix, const clang::Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const std::string &Prefix, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
C.emplace_back(new TextDiagnostics(std::move(DiagOpts), PP.getDiagnostics(),
PP.getLangOpts(),
/*ShouldDisplayPathNotes=*/true));
@@ -147,8 +149,9 @@ void ento::createTextPathDiagnosticConsumer(
void ento::createTextMinimalPathDiagnosticConsumer(
PathDiagnosticConsumerOptions DiagOpts, PathDiagnosticConsumers &C,
- const std::string &Prefix, const clang::Preprocessor &PP,
- const cross_tu::CrossTranslationUnitContext &CTU) {
+ const std::string &Prefix, const Preprocessor &PP,
+ const cross_tu::CrossTranslationUnitContext &CTU,
+ const MacroExpansionContext &MacroExpansions) {
C.emplace_back(new TextDiagnostics(std::move(DiagOpts), PP.getDiagnostics(),
PP.getLangOpts(),
/*ShouldDisplayPathNotes=*/false));
diff --git a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index f2a19b2ccc90..31de49033ac2 100644
--- a/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -20,6 +20,7 @@
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CallGraph.h"
#include "clang/Analysis/CodeInjector.h"
+#include "clang/Analysis/MacroExpansionContext.h"
#include "clang/Analysis/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/CrossTU/CrossTranslationUnit.h"
@@ -98,6 +99,8 @@ public:
/// working with a PCH file.
SetOfDecls LocalTUDecls;
+ MacroExpansionContext MacroExpansions;
+
// Set of PathDiagnosticConsumers. Owned by AnalysisManager.
PathDiagnosticConsumers PathConsumers;
@@ -122,9 +125,11 @@ public:
CodeInjector *injector)
: RecVisitorMode(0), RecVisitorBR(nullptr), Ctx(nullptr),
PP(CI.getPreprocessor()), OutDir(outdir), Opts(std::move(opts)),
- Plugins(plugins), Injector(injector), CTU(CI) {
+ Plugins(plugins), Injector(injector), CTU(CI),
+ MacroExpansions(CI.getLangOpts()) {
DigestAnalyzerOptions();
- if (Opts->PrintStats || Opts->ShouldSerializeStats) {
+ if (Opts->AnalyzerDisplayProgress || Opts->PrintStats ||
+ Opts->ShouldSerializeStats) {
AnalyzerTimers = std::make_unique<llvm::TimerGroup>(
"analyzer", "Analyzer timers");
SyntaxCheckTimer = std::make_unique<llvm::Timer>(
@@ -134,8 +139,14 @@ public:
BugReporterTimer = std::make_unique<llvm::Timer>(
"bugreporter", "Path-sensitive report post-processing time",
*AnalyzerTimers);
+ }
+
+ if (Opts->PrintStats || Opts->ShouldSerializeStats) {
llvm::EnableStatistics(/* PrintOnExit= */ false);
}
+
+ if (Opts->ShouldDisplayMacroExpansions)
+ MacroExpansions.registerForPreprocessor(PP);
}
~AnalysisConsumer() override {
@@ -150,7 +161,8 @@ public:
break;
#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN) \
case PD_##NAME: \
- CREATEFN(Opts->getDiagOpts(), PathConsumers, OutDir, PP, CTU); \
+ CREATEFN(Opts->getDiagOpts(), PathConsumers, OutDir, PP, CTU, \
+ MacroExpansions); \
break;
#include "clang/StaticAnalyzer/Core/Analyses.def"
default:
@@ -175,6 +187,14 @@ public:
}
}
+ void DisplayTime(llvm::TimeRecord &Time) {
+ if (!Opts->AnalyzerDisplayProgress) {
+ return;
+ }
+ llvm::errs() << " : " << llvm::format("%1.1f", Time.getWallTime() * 1000)
+ << " ms\n";
+ }
+
void DisplayFunction(const Decl *D, AnalysisMode Mode,
ExprEngine::InliningModes IMode) {
if (!Opts->AnalyzerDisplayProgress)
@@ -201,8 +221,8 @@ public:
} else
assert(Mode == (AM_Syntax | AM_Path) && "Unexpected mode!");
- llvm::errs() << ": " << Loc.getFilename() << ' ' << getFunctionName(D)
- << '\n';
+ llvm::errs() << ": " << Loc.getFilename() << ' '
+ << AnalysisDeclContext::getFunctionName(D);
}
}
@@ -560,63 +580,10 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
Mgr.reset();
}
-std::string AnalysisConsumer::getFunctionName(const Decl *D) {
- std::string Str;
- llvm::raw_string_ostream OS(Str);
-
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- OS << FD->getQualifiedNameAsString();
-
- // In C++, there are overloads.
- if (Ctx->getLangOpts().CPlusPlus) {
- OS << '(';
- for (const auto &P : FD->parameters()) {
- if (P != *FD->param_begin())
- OS << ", ";
- OS << P->getType().getAsString();
- }
- OS << ')';
- }
-
- } else if (isa<BlockDecl>(D)) {
- PresumedLoc Loc = Ctx->getSourceManager().getPresumedLoc(D->getLocation());
-
- if (Loc.isValid()) {
- OS << "block (line: " << Loc.getLine() << ", col: " << Loc.getColumn()
- << ')';
- }
-
- } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
-
- // FIXME: copy-pasted from CGDebugInfo.cpp.
- OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
- const DeclContext *DC = OMD->getDeclContext();
- if (const auto *OID = dyn_cast<ObjCImplementationDecl>(DC)) {
- OS << OID->getName();
- } else if (const auto *OID = dyn_cast<ObjCInterfaceDecl>(DC)) {
- OS << OID->getName();
- } else if (const auto *OC = dyn_cast<ObjCCategoryDecl>(DC)) {
- if (OC->IsClassExtension()) {
- OS << OC->getClassInterface()->getName();
- } else {
- OS << OC->getIdentifier()->getNameStart() << '('
- << OC->getIdentifier()->getNameStart() << ')';
- }
- } else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
- OS << OCD->getClassInterface()->getName() << '('
- << OCD->getName() << ')';
- }
- OS << ' ' << OMD->getSelector().getAsString() << ']';
-
- }
-
- return OS.str();
-}
-
AnalysisConsumer::AnalysisMode
AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
if (!Opts->AnalyzeSpecificFunction.empty() &&
- getFunctionName(D) != Opts->AnalyzeSpecificFunction)
+ AnalysisDeclContext::getFunctionName(D) != Opts->AnalyzeSpecificFunction)
return AM_None;
// Unless -analyze-all is specified, treat decls differently depending on
@@ -653,19 +620,26 @@ void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
if (Mgr->getAnalysisDeclContext(D)->isBodyAutosynthesized())
return;
- DisplayFunction(D, Mode, IMode);
CFG *DeclCFG = Mgr->getCFG(D);
if (DeclCFG)
MaxCFGSize.updateMax(DeclCFG->size());
+ DisplayFunction(D, Mode, IMode);
BugReporter BR(*Mgr);
if (Mode & AM_Syntax) {
- if (SyntaxCheckTimer)
+ llvm::TimeRecord CheckerStartTime;
+ if (SyntaxCheckTimer) {
+ CheckerStartTime = SyntaxCheckTimer->getTotalTime();
SyntaxCheckTimer->startTimer();
+ }
checkerMgr->runCheckersOnASTBody(D, *Mgr, BR);
- if (SyntaxCheckTimer)
+ if (SyntaxCheckTimer) {
SyntaxCheckTimer->stopTimer();
+ llvm::TimeRecord CheckerEndTime = SyntaxCheckTimer->getTotalTime();
+ CheckerEndTime -= CheckerStartTime;
+ DisplayTime(CheckerEndTime);
+ }
}
BR.FlushReports();
@@ -696,12 +670,19 @@ void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
ExprEngine Eng(CTU, *Mgr, VisitedCallees, &FunctionSummaries, IMode);
// Execute the worklist algorithm.
- if (ExprEngineTimer)
+ llvm::TimeRecord ExprEngineStartTime;
+ if (ExprEngineTimer) {
+ ExprEngineStartTime = ExprEngineTimer->getTotalTime();
ExprEngineTimer->startTimer();
+ }
Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D),
Mgr->options.MaxNodesPerTopLevelFunction);
- if (ExprEngineTimer)
+ if (ExprEngineTimer) {
ExprEngineTimer->stopTimer();
+ llvm::TimeRecord ExprEngineEndTime = ExprEngineTimer->getTotalTime();
+ ExprEngineEndTime -= ExprEngineStartTime;
+ DisplayTime(ExprEngineEndTime);
+ }
if (!Mgr->options.DumpExplodedGraphTo.empty())
Eng.DumpGraph(Mgr->options.TrimGraph, Mgr->options.DumpExplodedGraphTo);
diff --git a/clang/lib/Tooling/ArgumentsAdjusters.cpp b/clang/lib/Tooling/ArgumentsAdjusters.cpp
index bcfb5b39a077..7f5dc4d62f11 100644
--- a/clang/lib/Tooling/ArgumentsAdjusters.cpp
+++ b/clang/lib/Tooling/ArgumentsAdjusters.cpp
@@ -62,7 +62,8 @@ ArgumentsAdjuster getClangSyntaxOnlyAdjuster() {
HasSyntaxOnly = true;
}
if (!HasSyntaxOnly)
- AdjustedArgs.push_back("-fsyntax-only");
+ AdjustedArgs =
+ getInsertArgumentAdjuster("-fsyntax-only")(AdjustedArgs, "");
return AdjustedArgs;
};
}
@@ -85,22 +86,6 @@ ArgumentsAdjuster getClangStripOutputAdjuster() {
};
}
-ArgumentsAdjuster getClangStripSerializeDiagnosticAdjuster() {
- return [](const CommandLineArguments &Args, StringRef /*unused*/) {
- CommandLineArguments AdjustedArgs;
- for (size_t i = 0, e = Args.size(); i < e; ++i) {
- StringRef Arg = Args[i];
- if (Arg == "--serialize-diagnostics") {
- // Skip the diagnostic output argument.
- ++i;
- continue;
- }
- AdjustedArgs.push_back(Args[i]);
- }
- return AdjustedArgs;
- };
-}
-
ArgumentsAdjuster getClangStripDependencyFileAdjuster() {
return [](const CommandLineArguments &Args, StringRef /*unused*/) {
auto UsingClDriver = (getDriverMode(Args) == "cl");
@@ -137,7 +122,7 @@ ArgumentsAdjuster getInsertArgumentAdjuster(const CommandLineArguments &Extra,
CommandLineArguments::iterator I;
if (Pos == ArgumentInsertPosition::END) {
- I = Return.end();
+ I = std::find(Return.begin(), Return.end(), "--");
} else {
I = Return.begin();
++I; // To leave the program name in place
diff --git a/clang/lib/Tooling/CommonOptionsParser.cpp b/clang/lib/Tooling/CommonOptionsParser.cpp
index 5d881aab1e0d..6301544dbb28 100644
--- a/clang/lib/Tooling/CommonOptionsParser.cpp
+++ b/clang/lib/Tooling/CommonOptionsParser.cpp
@@ -115,8 +115,7 @@ llvm::Error CommonOptionsParser::init(
// Stop initializing if command-line option parsing failed.
if (!cl::ParseCommandLineOptions(argc, argv, Overview, &OS)) {
OS.flush();
- return llvm::make_error<llvm::StringError>("[CommonOptionsParser]: " +
- ErrorMessage,
+ return llvm::make_error<llvm::StringError>(ErrorMessage,
llvm::inconvertibleErrorCode());
}
diff --git a/clang/lib/Tooling/Core/Diagnostic.cpp b/clang/lib/Tooling/Core/Diagnostic.cpp
index b0c4ea8c5608..fb3358024692 100644
--- a/clang/lib/Tooling/Core/Diagnostic.cpp
+++ b/clang/lib/Tooling/Core/Diagnostic.cpp
@@ -53,10 +53,9 @@ Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
Diagnostic::Diagnostic(llvm::StringRef DiagnosticName,
const DiagnosticMessage &Message,
const SmallVector<DiagnosticMessage, 1> &Notes,
- Level DiagLevel, llvm::StringRef BuildDirectory,
- const SmallVector<FileByteRange, 1> &Ranges)
+ Level DiagLevel, llvm::StringRef BuildDirectory)
: DiagnosticName(DiagnosticName), Message(Message), Notes(Notes),
- DiagLevel(DiagLevel), BuildDirectory(BuildDirectory), Ranges(Ranges) {}
+ DiagLevel(DiagLevel), BuildDirectory(BuildDirectory) {}
const llvm::StringMap<Replacements> *selectFirstFix(const Diagnostic& D) {
if (!D.Message.Fix.empty())
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 1c10b7d727a5..40e8bd2b8776 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -99,8 +99,7 @@ CachedFileSystemEntry::createDirectoryEntry(llvm::vfs::Status &&Stat) {
return Result;
}
-DependencyScanningFilesystemSharedCache::
- DependencyScanningFilesystemSharedCache() {
+DependencyScanningFilesystemSharedCache::SingleCache::SingleCache() {
// This heuristic was chosen using a empirical testing on a
// reasonably high core machine (iMacPro 18 cores / 36 threads). The cache
// sharding gives a performance edge by reducing the lock contention.
@@ -111,18 +110,20 @@ DependencyScanningFilesystemSharedCache::
CacheShards = std::make_unique<CacheShard[]>(NumShards);
}
-/// Returns a cache entry for the corresponding key.
-///
-/// A new cache entry is created if the key is not in the cache. This is a
-/// thread safe call.
DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
-DependencyScanningFilesystemSharedCache::get(StringRef Key) {
+DependencyScanningFilesystemSharedCache::SingleCache::get(StringRef Key) {
CacheShard &Shard = CacheShards[llvm::hash_value(Key) % NumShards];
std::unique_lock<std::mutex> LockGuard(Shard.CacheLock);
auto It = Shard.Cache.try_emplace(Key);
return It.first->getValue();
}
+DependencyScanningFilesystemSharedCache::SharedFileSystemEntry &
+DependencyScanningFilesystemSharedCache::get(StringRef Key, bool Minimized) {
+ SingleCache &Cache = Minimized ? CacheMinimized : CacheOriginal;
+ return Cache.get(Key);
+}
+
/// Whitelist file extensions that should be minimized, treating no extension as
/// a source file that should be minimized.
///
@@ -149,20 +150,32 @@ static bool shouldCacheStatFailures(StringRef Filename) {
return shouldMinimize(Filename); // Only cache stat failures on source files.
}
+void DependencyScanningWorkerFilesystem::ignoreFile(StringRef RawFilename) {
+ llvm::SmallString<256> Filename;
+ llvm::sys::path::native(RawFilename, Filename);
+ IgnoredFiles.insert(Filename);
+}
+
+bool DependencyScanningWorkerFilesystem::shouldIgnoreFile(
+ StringRef RawFilename) {
+ llvm::SmallString<256> Filename;
+ llvm::sys::path::native(RawFilename, Filename);
+ return IgnoredFiles.contains(Filename);
+}
+
llvm::ErrorOr<const CachedFileSystemEntry *>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
const StringRef Filename) {
- if (const CachedFileSystemEntry *Entry = getCachedEntry(Filename)) {
+ bool ShouldMinimize = !shouldIgnoreFile(Filename) && shouldMinimize(Filename);
+
+ if (const auto *Entry = Cache.getCachedEntry(Filename, ShouldMinimize))
return Entry;
- }
// FIXME: Handle PCM/PCH files.
// FIXME: Handle module map files.
- bool KeepOriginalSource = IgnoredFiles.count(Filename) ||
- !shouldMinimize(Filename);
DependencyScanningFilesystemSharedCache::SharedFileSystemEntry
- &SharedCacheEntry = SharedCache.get(Filename);
+ &SharedCacheEntry = SharedCache.get(Filename, ShouldMinimize);
const CachedFileSystemEntry *Result;
{
std::unique_lock<std::mutex> LockGuard(SharedCacheEntry.ValueLock);
@@ -184,15 +197,15 @@ DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
CacheEntry = CachedFileSystemEntry::createDirectoryEntry(
std::move(*MaybeStatus));
else
- CacheEntry = CachedFileSystemEntry::createFileEntry(
- Filename, FS, !KeepOriginalSource);
+ CacheEntry = CachedFileSystemEntry::createFileEntry(Filename, FS,
+ ShouldMinimize);
}
Result = &CacheEntry;
}
// Store the result in the local cache.
- setCachedEntry(Filename, Result);
+ Cache.setCachedEntry(Filename, ShouldMinimize, Result);
return Result;
}
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
index 93bb0cde439d..4f3e574719d2 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningService.h"
+#include "llvm/Support/TargetSelect.h"
using namespace clang;
using namespace tooling;
@@ -16,4 +17,10 @@ DependencyScanningService::DependencyScanningService(
ScanningMode Mode, ScanningOutputFormat Format, bool ReuseFileManager,
bool SkipExcludedPPRanges)
: Mode(Mode), Format(Format), ReuseFileManager(ReuseFileManager),
- SkipExcludedPPRanges(SkipExcludedPPRanges) {}
+ SkipExcludedPPRanges(SkipExcludedPPRanges) {
+ // Initialize targets for object file support.
+ llvm::InitializeAllTargets();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmPrinters();
+ llvm::InitializeAllAsmParsers();
+}
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index 16040c2f4626..2fd12f7e12b1 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -13,17 +13,38 @@ namespace clang{
namespace tooling{
namespace dependencies{
-std::vector<std::string> FullDependencies::getAdditionalCommandLine(
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
- std::vector<std::string> Ret = AdditionalNonPathCommandLine;
-
- dependencies::detail::appendCommonModuleArguments(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+std::vector<std::string> FullDependencies::getAdditionalArgs(
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
+ std::vector<std::string> Ret = getAdditionalArgsWithoutModulePaths();
+
+ std::vector<std::string> PCMPaths;
+ std::vector<std::string> ModMapPaths;
+ dependencies::detail::collectPCMAndModuleMapPaths(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps, PCMPaths, ModMapPaths);
+ for (const std::string &PCMPath : PCMPaths)
+ Ret.push_back("-fmodule-file=" + PCMPath);
+ for (const std::string &ModMapPath : ModMapPaths)
+ Ret.push_back("-fmodule-map-file=" + ModMapPath);
return Ret;
}
+std::vector<std::string>
+FullDependencies::getAdditionalArgsWithoutModulePaths() const {
+ std::vector<std::string> Args{
+ "-fno-implicit-modules",
+ "-fno-implicit-module-maps",
+ };
+
+ for (const PrebuiltModuleDep &PMD : PrebuiltModuleDeps) {
+ Args.push_back("-fmodule-file=" + PMD.ModuleName + "=" + PMD.PCMFile);
+ Args.push_back("-fmodule-map-file=" + PMD.ModuleMapFile);
+ }
+
+ return Args;
+}
+
DependencyScanningTool::DependencyScanningTool(
DependencyScanningService &Service)
: Worker(Service) {}
@@ -33,13 +54,19 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
/// Prints out all of the gathered dependencies into a string.
class MakeDependencyPrinterConsumer : public DependencyConsumer {
public:
- void handleFileDependency(const DependencyOutputOptions &Opts,
- StringRef File) override {
- if (!this->Opts)
- this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
+ void
+ handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {
+ this->Opts = std::make_unique<DependencyOutputOptions>(Opts);
+ }
+
+ void handleFileDependency(StringRef File) override {
Dependencies.push_back(std::string(File));
}
+ void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
+ // Same as `handleModuleDependency`.
+ }
+
void handleModuleDependency(ModuleDeps MD) override {
// These are ignored for the make format as it can't support the full
// set of deps, and handleFileDependency handles enough for implicitly
@@ -49,8 +76,7 @@ llvm::Expected<std::string> DependencyScanningTool::getDependencyFile(
void handleContextHash(std::string Hash) override {}
void printDependencies(std::string &S) {
- if (!Opts)
- return;
+ assert(Opts && "Handled dependency output options.");
class DependencyPrinter : public DependencyFileGenerator {
public:
@@ -103,13 +129,19 @@ DependencyScanningTool::getFullDependencies(
FullDependencyPrinterConsumer(const llvm::StringSet<> &AlreadySeen)
: AlreadySeen(AlreadySeen) {}
- void handleFileDependency(const DependencyOutputOptions &Opts,
- StringRef File) override {
+ void
+ handleDependencyOutputOpts(const DependencyOutputOptions &Opts) override {}
+
+ void handleFileDependency(StringRef File) override {
Dependencies.push_back(std::string(File));
}
+ void handlePrebuiltModuleDependency(PrebuiltModuleDep PMD) override {
+ PrebuiltModuleDeps.emplace_back(std::move(PMD));
+ }
+
void handleModuleDependency(ModuleDeps MD) override {
- ClangModuleDeps[MD.ContextHash + MD.ModuleName] = std::move(MD);
+ ClangModuleDeps[MD.ID.ContextHash + MD.ID.ModuleName] = std::move(MD);
}
void handleContextHash(std::string Hash) override {
@@ -119,16 +151,18 @@ DependencyScanningTool::getFullDependencies(
FullDependenciesResult getFullDependencies() const {
FullDependencies FD;
- FD.ContextHash = std::move(ContextHash);
+ FD.ID.ContextHash = std::move(ContextHash);
FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
for (auto &&M : ClangModuleDeps) {
auto &MD = M.second;
if (MD.ImportedByMainFile)
- FD.ClangModuleDeps.push_back({MD.ModuleName, ContextHash});
+ FD.ClangModuleDeps.push_back(MD.ID);
}
+ FD.PrebuiltModuleDeps = std::move(PrebuiltModuleDeps);
+
FullDependenciesResult FDR;
for (auto &&M : ClangModuleDeps) {
@@ -145,6 +179,7 @@ DependencyScanningTool::getFullDependencies(
private:
std::vector<std::string> Dependencies;
+ std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
std::unordered_map<std::string, ModuleDeps> ClangModuleDeps;
std::string ContextHash;
std::vector<std::string> OutputPaths;
diff --git a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 63264b0dda2d..d651ff23b387 100644
--- a/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -7,6 +7,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "clang/CodeGen/ObjectFilePCHContainerOperations.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -31,11 +32,12 @@ public:
: DependencyFileGenerator(*Opts), Opts(std::move(Opts)), C(C) {}
void finishedMainFile(DiagnosticsEngine &Diags) override {
+ C.handleDependencyOutputOpts(*Opts);
llvm::SmallString<256> CanonPath;
for (const auto &File : getDependencies()) {
CanonPath = File;
llvm::sys::path::remove_dots(CanonPath, /*remove_dot_dot=*/true);
- C.handleFileDependency(*Opts, CanonPath);
+ C.handleFileDependency(CanonPath);
}
}
@@ -44,6 +46,93 @@ private:
DependencyConsumer &C;
};
+/// A listener that collects the imported modules and optionally the input
+/// files.
+class PrebuiltModuleListener : public ASTReaderListener {
+public:
+ PrebuiltModuleListener(llvm::StringMap<std::string> &PrebuiltModuleFiles,
+ llvm::StringSet<> &InputFiles, bool VisitInputFiles)
+ : PrebuiltModuleFiles(PrebuiltModuleFiles), InputFiles(InputFiles),
+ VisitInputFiles(VisitInputFiles) {}
+
+ bool needsImportVisitation() const override { return true; }
+ bool needsInputFileVisitation() override { return VisitInputFiles; }
+ bool needsSystemInputFileVisitation() override { return VisitInputFiles; }
+
+ void visitImport(StringRef ModuleName, StringRef Filename) override {
+ PrebuiltModuleFiles.insert({ModuleName, Filename.str()});
+ }
+
+ bool visitInputFile(StringRef Filename, bool isSystem, bool isOverridden,
+ bool isExplicitModule) override {
+ InputFiles.insert(Filename);
+ return true;
+ }
+
+private:
+ llvm::StringMap<std::string> &PrebuiltModuleFiles;
+ llvm::StringSet<> &InputFiles;
+ bool VisitInputFiles;
+};
+
+using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
+
+/// Visit the given prebuilt module and collect all of the modules it
+/// transitively imports and contributing input files.
+static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
+ CompilerInstance &CI,
+ PrebuiltModuleFilesT &ModuleFiles,
+ llvm::StringSet<> &InputFiles,
+ bool VisitInputFiles) {
+ // Maps the names of modules that weren't yet visited to their PCM path.
+ llvm::StringMap<std::string> ModuleFilesWorklist;
+ // Contains PCM paths of all visited modules.
+ llvm::StringSet<> VisitedModuleFiles;
+
+ PrebuiltModuleListener Listener(ModuleFilesWorklist, InputFiles,
+ VisitInputFiles);
+
+ auto GatherModuleFileInfo = [&](StringRef ASTFile) {
+ ASTReader::readASTFileControlBlock(
+ ASTFile, CI.getFileManager(), CI.getPCHContainerReader(),
+ /*FindModuleFileExtensions=*/false, Listener,
+ /*ValidateDiagnosticOptions=*/false);
+ };
+
+ GatherModuleFileInfo(PrebuiltModuleFilename);
+ while (!ModuleFilesWorklist.empty()) {
+ auto WorklistItemIt = ModuleFilesWorklist.begin();
+
+ if (!VisitedModuleFiles.contains(WorklistItemIt->getValue())) {
+ VisitedModuleFiles.insert(WorklistItemIt->getValue());
+ GatherModuleFileInfo(WorklistItemIt->getValue());
+ ModuleFiles[WorklistItemIt->getKey().str()] = WorklistItemIt->getValue();
+ }
+
+ ModuleFilesWorklist.erase(WorklistItemIt);
+ }
+}
+
+/// Transform arbitrary file name into an object-like file name.
+static std::string makeObjFileName(StringRef FileName) {
+ SmallString<128> ObjFileName(FileName);
+ llvm::sys::path::replace_extension(ObjFileName, "o");
+ return std::string(ObjFileName.str());
+}
+
+/// Deduce the dependency target based on the output file and input files.
+static std::string
+deduceDepTarget(const std::string &OutputFile,
+ const SmallVectorImpl<FrontendInputFile> &InputFiles) {
+ if (OutputFile != "-")
+ return OutputFile;
+
+ if (InputFiles.empty() || !InputFiles.front().isFile())
+ return "clang-scan-deps\\ dependency";
+
+ return makeObjFileName(InputFiles.front().getFile());
+}
+
/// A clang tool that runs the preprocessor in a mode that's optimized for
/// dependency scanning for the given compiler invocation.
class DependencyScanningAction : public tooling::ToolAction {
@@ -61,28 +150,57 @@ public:
FileManager *FileMgr,
std::shared_ptr<PCHContainerOperations> PCHContainerOps,
DiagnosticConsumer *DiagConsumer) override {
+ // Make a deep copy of the original Clang invocation.
+ CompilerInvocation OriginalInvocation(*Invocation);
+
// Create a compiler instance to handle the actual work.
CompilerInstance Compiler(std::move(PCHContainerOps));
Compiler.setInvocation(std::move(Invocation));
// Don't print 'X warnings and Y errors generated'.
Compiler.getDiagnosticOpts().ShowCarets = false;
+ // Don't write out diagnostic file.
+ Compiler.getDiagnosticOpts().DiagnosticSerializationFile.clear();
+ // Don't treat warnings as errors.
+ Compiler.getDiagnosticOpts().Warnings.push_back("no-error");
// Create the compiler's actual diagnostics engine.
Compiler.createDiagnostics(DiagConsumer, /*ShouldOwnClient=*/false);
if (!Compiler.hasDiagnostics())
return false;
- // Use the dependency scanning optimized file system if we can.
+ Compiler.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath = true;
+
+ FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
+ Compiler.setFileManager(FileMgr);
+ Compiler.createSourceManager(*FileMgr);
+
+ llvm::StringSet<> PrebuiltModulesInputFiles;
+ // Store the list of prebuilt module files into header search options. This
+ // will prevent the implicit build to create duplicate modules and will
+ // force reuse of the existing prebuilt module files instead.
+ if (!Compiler.getPreprocessorOpts().ImplicitPCHInclude.empty())
+ visitPrebuiltModule(
+ Compiler.getPreprocessorOpts().ImplicitPCHInclude, Compiler,
+ Compiler.getHeaderSearchOpts().PrebuiltModuleFiles,
+ PrebuiltModulesInputFiles, /*VisitInputFiles=*/DepFS != nullptr);
+
+ // Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
const CompilerInvocation &CI = Compiler.getInvocation();
+ DepFS->clearIgnoredFiles();
+ // Ignore any files that contributed to prebuilt modules. The implicit
+ // build validates the modules by comparing the reported sizes of their
+ // inputs to the current state of the filesystem. Minimization would throw
+ // this mechanism off.
+ for (const auto &File : PrebuiltModulesInputFiles)
+ DepFS->ignoreFile(File.getKey());
// Add any filenames that were explicity passed in the build settings and
// that might be opened, as we want to ensure we don't run source
// minimization on them.
- DepFS->IgnoredFiles.clear();
for (const auto &Entry : CI.getHeaderSearchOpts().UserEntries)
- DepFS->IgnoredFiles.insert(Entry.Path);
+ DepFS->ignoreFile(Entry.Path);
for (const auto &Entry : CI.getHeaderSearchOpts().VFSOverlayFiles)
- DepFS->IgnoredFiles.insert(Entry);
+ DepFS->ignoreFile(Entry);
// Support for virtual file system overlays on top of the caching
// filesystem.
@@ -96,10 +214,6 @@ public:
.ExcludedConditionalDirectiveSkipMappings = PPSkipMappings;
}
- FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
- Compiler.setFileManager(FileMgr);
- Compiler.createSourceManager(*FileMgr);
-
// Create the dependency collector that will collect the produced
// dependencies.
//
@@ -107,11 +221,14 @@ public:
// invocation to the collector. The options in the invocation are reset,
// which ensures that the compiler won't create new dependency collectors,
// and thus won't write out the extra '.d' files to disk.
- auto Opts = std::make_unique<DependencyOutputOptions>(
- std::move(Compiler.getInvocation().getDependencyOutputOpts()));
- // We need at least one -MT equivalent for the generator to work.
+ auto Opts = std::make_unique<DependencyOutputOptions>();
+ std::swap(*Opts, Compiler.getInvocation().getDependencyOutputOpts());
+ // We need at least one -MT equivalent for the generator of make dependency
+ // files to work.
if (Opts->Targets.empty())
- Opts->Targets = {"clang-scan-deps dependency"};
+ Opts->Targets = {deduceDepTarget(Compiler.getFrontendOpts().OutputFile,
+ Compiler.getFrontendOpts().Inputs)};
+ Opts->IncludeSystemHeaders = true;
switch (Format) {
case ScanningOutputFormat::Make:
@@ -121,7 +238,7 @@ public:
break;
case ScanningOutputFormat::Full:
Compiler.addDependencyCollector(std::make_shared<ModuleDepCollector>(
- std::move(Opts), Compiler, Consumer));
+ std::move(Opts), Compiler, Consumer, std::move(OriginalInvocation)));
break;
}
@@ -132,7 +249,7 @@ public:
// the impact of strict context hashing.
Compiler.getHeaderSearchOpts().ModulesStrictContextHash = true;
- auto Action = std::make_unique<PreprocessOnlyAction>();
+ auto Action = std::make_unique<ReadPCHAndPreprocessAction>();
const bool Result = Compiler.ExecuteAction(*Action);
if (!DepFS)
FileMgr->clearStatCache();
@@ -153,7 +270,15 @@ DependencyScanningWorker::DependencyScanningWorker(
DependencyScanningService &Service)
: Format(Service.getFormat()) {
DiagOpts = new DiagnosticOptions();
+
PCHContainerOps = std::make_shared<PCHContainerOperations>();
+ PCHContainerOps->registerReader(
+ std::make_unique<ObjectFilePCHContainerReader>());
+ // We don't need to write object files, but the current PCH implementation
+ // requires the writer to be registered as well.
+ PCHContainerOps->registerWriter(
+ std::make_unique<ObjectFilePCHContainerWriter>());
+
RealFS = llvm::vfs::createPhysicalFileSystem();
if (Service.canSkipExcludedPPRanges())
PPSkipMappings =
diff --git a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index f74ce7304df5..88cee63c98aa 100644
--- a/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -12,49 +12,97 @@
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Tooling/DependencyScanning/DependencyScanningWorker.h"
+#include "llvm/Support/StringSaver.h"
using namespace clang;
using namespace tooling;
using namespace dependencies;
-std::vector<std::string> ModuleDeps::getFullCommandLine(
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps) const {
- std::vector<std::string> Ret = NonPathCommandLine;
+CompilerInvocation ModuleDepCollector::makeInvocationForModuleBuildWithoutPaths(
+ const ModuleDeps &Deps) const {
+ // Make a deep copy of the original Clang invocation.
+ CompilerInvocation CI(OriginalInvocation);
- // TODO: Build full command line. That also means capturing the original
- // command line into NonPathCommandLine.
+ // Remove options incompatible with explicit module build.
+ CI.getFrontendOpts().Inputs.clear();
+ CI.getFrontendOpts().OutputFile.clear();
- dependencies::detail::appendCommonModuleArguments(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps, Ret);
+ CI.getFrontendOpts().ProgramAction = frontend::GenerateModule;
+ CI.getLangOpts()->ModuleName = Deps.ID.ModuleName;
+ CI.getFrontendOpts().IsSystemModule = Deps.IsSystem;
- return Ret;
+ CI.getLangOpts()->ImplicitModules = false;
+
+ // Report the prebuilt modules this module uses.
+ for (const auto &PrebuiltModule : Deps.PrebuiltModuleDeps) {
+ CI.getFrontendOpts().ModuleFiles.push_back(PrebuiltModule.PCMFile);
+ CI.getFrontendOpts().ModuleMapFiles.push_back(PrebuiltModule.ModuleMapFile);
+ }
+
+ CI.getPreprocessorOpts().ImplicitPCHInclude.clear();
+
+ return CI;
}
-void dependencies::detail::appendCommonModuleArguments(
- llvm::ArrayRef<ClangModuleDep> Modules,
- std::function<StringRef(ClangModuleDep)> LookupPCMPath,
- std::function<const ModuleDeps &(ClangModuleDep)> LookupModuleDeps,
- std::vector<std::string> &Result) {
+static std::vector<std::string>
+serializeCompilerInvocation(const CompilerInvocation &CI) {
+ // Set up string allocator.
+ llvm::BumpPtrAllocator Alloc;
+ llvm::StringSaver Strings(Alloc);
+ auto SA = [&Strings](const Twine &Arg) { return Strings.save(Arg).data(); };
+
+ // Synthesize full command line from the CompilerInvocation, including "-cc1".
+ SmallVector<const char *, 32> Args{"-cc1"};
+ CI.generateCC1CommandLine(Args, SA);
+
+ // Convert arguments to the return type.
+ return std::vector<std::string>{Args.begin(), Args.end()};
+}
+
+std::vector<std::string> ModuleDeps::getCanonicalCommandLine(
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
+ CompilerInvocation CI(Invocation);
+ FrontendOptions &FrontendOpts = CI.getFrontendOpts();
+
+ InputKind ModuleMapInputKind(FrontendOpts.DashX.getLanguage(),
+ InputKind::Format::ModuleMap);
+ FrontendOpts.Inputs.emplace_back(ClangModuleMapFile, ModuleMapInputKind);
+ FrontendOpts.OutputFile = std::string(LookupPCMPath(ID));
+
+ dependencies::detail::collectPCMAndModuleMapPaths(
+ ClangModuleDeps, LookupPCMPath, LookupModuleDeps,
+ FrontendOpts.ModuleFiles, FrontendOpts.ModuleMapFiles);
+
+ return serializeCompilerInvocation(CI);
+}
+
+std::vector<std::string>
+ModuleDeps::getCanonicalCommandLineWithoutModulePaths() const {
+ return serializeCompilerInvocation(Invocation);
+}
+
+void dependencies::detail::collectPCMAndModuleMapPaths(
+ llvm::ArrayRef<ModuleID> Modules,
+ std::function<StringRef(ModuleID)> LookupPCMPath,
+ std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps,
+ std::vector<std::string> &PCMPaths, std::vector<std::string> &ModMapPaths) {
llvm::StringSet<> AlreadyAdded;
- std::function<void(llvm::ArrayRef<ClangModuleDep>)> AddArgs =
- [&](llvm::ArrayRef<ClangModuleDep> Modules) {
- for (const ClangModuleDep &CMD : Modules) {
- if (!AlreadyAdded.insert(CMD.ModuleName + CMD.ContextHash).second)
+ std::function<void(llvm::ArrayRef<ModuleID>)> AddArgs =
+ [&](llvm::ArrayRef<ModuleID> Modules) {
+ for (const ModuleID &MID : Modules) {
+ if (!AlreadyAdded.insert(MID.ModuleName + MID.ContextHash).second)
continue;
- const ModuleDeps &M = LookupModuleDeps(CMD);
+ const ModuleDeps &M = LookupModuleDeps(MID);
// Depth first traversal.
AddArgs(M.ClangModuleDeps);
- Result.push_back(("-fmodule-file=" + LookupPCMPath(CMD)).str());
- if (!M.ClangModuleMapFile.empty()) {
- Result.push_back("-fmodule-map-file=" + M.ClangModuleMapFile);
- }
+ PCMPaths.push_back(LookupPCMPath(MID).str());
+ if (!M.ClangModuleMapFile.empty())
+ ModMapPaths.push_back(M.ClangModuleMapFile);
}
};
- Result.push_back("-fno-implicit-modules");
- Result.push_back("-fno-implicit-module-maps");
AddArgs(Modules);
}
@@ -79,7 +127,7 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
// We do not want #line markers to affect dependency generation!
if (Optional<StringRef> Filename =
SM.getNonBuiltinFilenameForID(SM.getFileID(SM.getExpansionLoc(Loc))))
- MDC.MainDeps.push_back(
+ MDC.FileDeps.push_back(
std::string(llvm::sys::path::remove_leading_dotslash(*Filename)));
}
@@ -91,7 +139,7 @@ void ModuleDepCollectorPP::InclusionDirective(
if (!File && !Imported) {
// This is a non-modular include that HeaderSearch failed to find. Add it
// here as `FileChanged` will never see it.
- MDC.MainDeps.push_back(std::string(FileName));
+ MDC.FileDeps.push_back(std::string(FileName));
}
handleImport(Imported);
}
@@ -106,9 +154,12 @@ void ModuleDepCollectorPP::handleImport(const Module *Imported) {
if (!Imported)
return;
- MDC.Deps[MDC.ContextHash + Imported->getTopLevelModule()->getFullModuleName()]
- .ImportedByMainFile = true;
- DirectDeps.insert(Imported->getTopLevelModule());
+ const Module *TopLevelModule = Imported->getTopLevelModule();
+
+ if (MDC.isPrebuiltModule(TopLevelModule))
+ DirectPrebuiltModularDeps.insert(TopLevelModule);
+ else
+ DirectModularDeps.insert(TopLevelModule);
}
void ModuleDepCollectorPP::EndOfMainFile() {
@@ -116,46 +167,88 @@ void ModuleDepCollectorPP::EndOfMainFile() {
MDC.MainFile = std::string(
Instance.getSourceManager().getFileEntryForID(MainFileID)->getName());
- for (const Module *M : DirectDeps) {
+ if (!Instance.getPreprocessorOpts().ImplicitPCHInclude.empty())
+ MDC.FileDeps.push_back(Instance.getPreprocessorOpts().ImplicitPCHInclude);
+
+ for (const Module *M : DirectModularDeps) {
+ // A top-level module might not be actually imported as a module when
+ // -fmodule-name is used to compile a translation unit that imports this
+ // module. In that case it can be skipped. The appropriate header
+ // dependencies will still be reported as expected.
+ if (!M->getASTFile())
+ continue;
handleTopLevelModule(M);
}
- for (auto &&I : MDC.Deps)
+ MDC.Consumer.handleDependencyOutputOpts(*MDC.Opts);
+
+ for (auto &&I : MDC.ModularDeps)
MDC.Consumer.handleModuleDependency(I.second);
- for (auto &&I : MDC.MainDeps)
- MDC.Consumer.handleFileDependency(*MDC.Opts, I);
+ for (auto &&I : MDC.FileDeps)
+ MDC.Consumer.handleFileDependency(I);
+
+ for (auto &&I : DirectPrebuiltModularDeps)
+ MDC.Consumer.handlePrebuiltModuleDependency(PrebuiltModuleDep{I});
}
-void ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
+ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
assert(M == M->getTopLevelModule() && "Expected top level module!");
- auto ModI = MDC.Deps.insert(
- std::make_pair(MDC.ContextHash + M->getFullModuleName(), ModuleDeps{}));
-
- if (!ModI.first->second.ModuleName.empty())
- return;
+ // If this module has been handled already, just return its ID.
+ auto ModI = MDC.ModularDeps.insert({M, ModuleDeps{}});
+ if (!ModI.second)
+ return ModI.first->second.ID;
ModuleDeps &MD = ModI.first->second;
+ MD.ID.ModuleName = M->getFullModuleName();
+ MD.ImportedByMainFile = DirectModularDeps.contains(M);
+ MD.ImplicitModulePCMPath = std::string(M->getASTFile()->getName());
+ MD.IsSystem = M->IsSystem;
+
const FileEntry *ModuleMap = Instance.getPreprocessor()
.getHeaderSearchInfo()
.getModuleMap()
- .getContainingModuleMapFile(M);
-
+ .getModuleMapFileForUniquing(M);
MD.ClangModuleMapFile = std::string(ModuleMap ? ModuleMap->getName() : "");
- MD.ModuleName = M->getFullModuleName();
- MD.ImplicitModulePCMPath = std::string(M->getASTFile()->getName());
- MD.ContextHash = MDC.ContextHash;
+
serialization::ModuleFile *MF =
MDC.Instance.getASTReader()->getModuleManager().lookup(M->getASTFile());
MDC.Instance.getASTReader()->visitInputFiles(
*MF, true, true, [&](const serialization::InputFile &IF, bool isSystem) {
+ // __inferred_module.map is the result of the way in which an implicit
+ // module build handles inferred modules. It adds an overlay VFS with
+ // this file in the proper directory and relies on the rest of Clang to
+ // handle it like normal. With explicitly built modules we don't need
+ // to play VFS tricks, so replace it with the correct module map.
+ if (IF.getFile()->getName().endswith("__inferred_module.map")) {
+ MD.FileDeps.insert(ModuleMap->getName());
+ return;
+ }
MD.FileDeps.insert(IF.getFile()->getName());
});
+ // Add direct prebuilt module dependencies now, so that we can use them when
+ // creating a CompilerInvocation and computing context hash for this
+ // ModuleDeps instance.
+ addDirectPrebuiltModuleDeps(M, MD);
+
+ MD.Invocation = MDC.makeInvocationForModuleBuildWithoutPaths(MD);
+ MD.ID.ContextHash = MD.Invocation.getModuleHash();
+
llvm::DenseSet<const Module *> AddedModules;
addAllSubmoduleDeps(M, MD, AddedModules);
+
+ return MD.ID;
+}
+
+void ModuleDepCollectorPP::addDirectPrebuiltModuleDeps(const Module *M,
+ ModuleDeps &MD) {
+ for (const Module *Import : M->Imports)
+ if (Import->getTopLevelModule() != M->getTopLevelModule())
+ if (MDC.isPrebuiltModule(Import))
+ MD.PrebuiltModuleDeps.emplace_back(Import);
}
void ModuleDepCollectorPP::addAllSubmoduleDeps(
@@ -171,23 +264,35 @@ void ModuleDepCollectorPP::addModuleDep(
const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &AddedModules) {
for (const Module *Import : M->Imports) {
- if (Import->getTopLevelModule() != M->getTopLevelModule()) {
+ if (Import->getTopLevelModule() != M->getTopLevelModule() &&
+ !MDC.isPrebuiltModule(Import)) {
+ ModuleID ImportID = handleTopLevelModule(Import->getTopLevelModule());
if (AddedModules.insert(Import->getTopLevelModule()).second)
- MD.ClangModuleDeps.push_back(
- {std::string(Import->getTopLevelModuleName()),
- Instance.getInvocation().getModuleHash()});
- handleTopLevelModule(Import->getTopLevelModule());
+ MD.ClangModuleDeps.push_back(ImportID);
}
}
}
ModuleDepCollector::ModuleDepCollector(
std::unique_ptr<DependencyOutputOptions> Opts, CompilerInstance &I,
- DependencyConsumer &C)
- : Instance(I), Consumer(C), Opts(std::move(Opts)) {}
+ DependencyConsumer &C, CompilerInvocation &&OriginalCI)
+ : Instance(I), Consumer(C), Opts(std::move(Opts)),
+ OriginalInvocation(std::move(OriginalCI)) {}
void ModuleDepCollector::attachToPreprocessor(Preprocessor &PP) {
PP.addPPCallbacks(std::make_unique<ModuleDepCollectorPP>(Instance, *this));
}
void ModuleDepCollector::attachToASTReader(ASTReader &R) {}
+
+bool ModuleDepCollector::isPrebuiltModule(const Module *M) {
+ std::string Name(M->getTopLevelModuleName());
+ const auto &PrebuiltModuleFiles =
+ Instance.getHeaderSearchOpts().PrebuiltModuleFiles;
+ auto PrebuiltModuleFileIt = PrebuiltModuleFiles.find(Name);
+ if (PrebuiltModuleFileIt == PrebuiltModuleFiles.end())
+ return false;
+ assert("Prebuilt module came from the expected AST file" &&
+ PrebuiltModuleFileIt->second == M->getASTFile()->getName());
+ return true;
+}
diff --git a/clang/lib/Tooling/DumpTool/APIData.h b/clang/lib/Tooling/DumpTool/APIData.h
new file mode 100644
index 000000000000..03e247a8bd95
--- /dev/null
+++ b/clang/lib/Tooling/DumpTool/APIData.h
@@ -0,0 +1,31 @@
+//===- APIData.h ---------------------------------------------*- C++ -*----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_TOOLING_DUMPTOOL_APIDATA_H
+#define LLVM_CLANG_LIB_TOOLING_DUMPTOOL_APIDATA_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace tooling {
+
+struct ClassData {
+ std::vector<std::string> ASTClassLocations;
+ std::vector<std::string> ASTClassRanges;
+ std::vector<std::string> TemplateParms;
+ std::vector<std::string> TypeSourceInfos;
+ std::vector<std::string> TypeLocs;
+ std::vector<std::string> NestedNameLocs;
+ std::vector<std::string> DeclNameInfos;
+};
+
+} // namespace tooling
+} // namespace clang
+
+#endif
diff --git a/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp b/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
new file mode 100644
index 000000000000..2f97067f6171
--- /dev/null
+++ b/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.cpp
@@ -0,0 +1,271 @@
+//===- ASTSrcLocProcessor.cpp --------------------------------*- C++ -*----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTSrcLocProcessor.h"
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang::tooling;
+using namespace llvm;
+using namespace clang::ast_matchers;
+
+ASTSrcLocProcessor::ASTSrcLocProcessor(StringRef JsonPath)
+ : JsonPath(JsonPath) {
+
+ MatchFinder::MatchFinderOptions FinderOptions;
+
+ Finder = std::make_unique<MatchFinder>(std::move(FinderOptions));
+ Finder->addMatcher(
+ cxxRecordDecl(
+ isDefinition(),
+ isSameOrDerivedFrom(
+ namedDecl(
+ hasAnyName(
+ "clang::Stmt", "clang::Decl", "clang::CXXCtorInitializer",
+ "clang::NestedNameSpecifierLoc",
+ "clang::TemplateArgumentLoc", "clang::CXXBaseSpecifier",
+ "clang::DeclarationNameInfo", "clang::TypeLoc"))
+ .bind("nodeClade")),
+ optionally(isDerivedFrom(cxxRecordDecl().bind("derivedFrom"))))
+ .bind("className"),
+ this);
+ Finder->addMatcher(
+ cxxRecordDecl(isDefinition(), hasAnyName("clang::PointerLikeTypeLoc",
+ "clang::TypeofLikeTypeLoc"))
+ .bind("templateName"),
+ this);
+}
+
+std::unique_ptr<clang::ASTConsumer>
+ASTSrcLocProcessor::createASTConsumer(clang::CompilerInstance &Compiler,
+ StringRef File) {
+ return Finder->newASTConsumer();
+}
+
+llvm::json::Object toJSON(llvm::StringMap<std::vector<StringRef>> const &Obj) {
+ using llvm::json::toJSON;
+
+ llvm::json::Object JsonObj;
+ for (const auto &Item : Obj) {
+ JsonObj[Item.first()] = Item.second;
+ }
+ return JsonObj;
+}
+
+llvm::json::Object toJSON(llvm::StringMap<std::string> const &Obj) {
+ using llvm::json::toJSON;
+
+ llvm::json::Object JsonObj;
+ for (const auto &Item : Obj) {
+ JsonObj[Item.first()] = Item.second;
+ }
+ return JsonObj;
+}
+
+llvm::json::Object toJSON(ClassData const &Obj) {
+ llvm::json::Object JsonObj;
+
+ if (!Obj.ASTClassLocations.empty())
+ JsonObj["sourceLocations"] = Obj.ASTClassLocations;
+ if (!Obj.ASTClassRanges.empty())
+ JsonObj["sourceRanges"] = Obj.ASTClassRanges;
+ if (!Obj.TemplateParms.empty())
+ JsonObj["templateParms"] = Obj.TemplateParms;
+ if (!Obj.TypeSourceInfos.empty())
+ JsonObj["typeSourceInfos"] = Obj.TypeSourceInfos;
+ if (!Obj.TypeLocs.empty())
+ JsonObj["typeLocs"] = Obj.TypeLocs;
+ if (!Obj.NestedNameLocs.empty())
+ JsonObj["nestedNameLocs"] = Obj.NestedNameLocs;
+ if (!Obj.DeclNameInfos.empty())
+ JsonObj["declNameInfos"] = Obj.DeclNameInfos;
+ return JsonObj;
+}
+
+llvm::json::Object toJSON(llvm::StringMap<ClassData> const &Obj) {
+ using llvm::json::toJSON;
+
+ llvm::json::Object JsonObj;
+ for (const auto &Item : Obj)
+ JsonObj[Item.first()] = ::toJSON(Item.second);
+ return JsonObj;
+}
+
+void WriteJSON(StringRef JsonPath, llvm::json::Object &&ClassInheritance,
+ llvm::json::Object &&ClassesInClade,
+ llvm::json::Object &&ClassEntries) {
+ llvm::json::Object JsonObj;
+
+ using llvm::json::toJSON;
+
+ JsonObj["classInheritance"] = std::move(ClassInheritance);
+ JsonObj["classesInClade"] = std::move(ClassesInClade);
+ JsonObj["classEntries"] = std::move(ClassEntries);
+
+ llvm::json::Value JsonVal(std::move(JsonObj));
+
+ bool WriteChange = false;
+ std::string OutString;
+ if (auto ExistingOrErr = MemoryBuffer::getFile(JsonPath, /*IsText=*/true)) {
+ raw_string_ostream Out(OutString);
+ Out << formatv("{0:2}", JsonVal);
+ if (ExistingOrErr.get()->getBuffer() == Out.str())
+ return;
+ WriteChange = true;
+ }
+
+ std::error_code EC;
+ llvm::raw_fd_ostream JsonOut(JsonPath, EC, llvm::sys::fs::OF_Text);
+ if (EC)
+ return;
+
+ if (WriteChange)
+ JsonOut << OutString;
+ else
+ JsonOut << formatv("{0:2}", JsonVal);
+}
+
+void ASTSrcLocProcessor::generate() {
+ WriteJSON(JsonPath, ::toJSON(ClassInheritance), ::toJSON(ClassesInClade),
+ ::toJSON(ClassEntries));
+}
+
+void ASTSrcLocProcessor::generateEmpty() { WriteJSON(JsonPath, {}, {}, {}); }
+
+std::vector<std::string>
+CaptureMethods(std::string TypeString, const clang::CXXRecordDecl *ASTClass,
+ const MatchFinder::MatchResult &Result) {
+
+ auto publicAccessor = [](auto... InnerMatcher) {
+ return cxxMethodDecl(isPublic(), parameterCountIs(0), isConst(),
+ InnerMatcher...);
+ };
+
+ auto BoundNodesVec = match(
+ findAll(
+ publicAccessor(
+ ofClass(cxxRecordDecl(
+ equalsNode(ASTClass),
+ optionally(isDerivedFrom(
+ cxxRecordDecl(hasAnyName("clang::Stmt", "clang::Decl"))
+ .bind("stmtOrDeclBase"))),
+ optionally(isDerivedFrom(
+ cxxRecordDecl(hasName("clang::Expr")).bind("exprBase"))),
+ optionally(
+ isDerivedFrom(cxxRecordDecl(hasName("clang::TypeLoc"))
+ .bind("typeLocBase"))))),
+ returns(asString(TypeString)))
+ .bind("classMethod")),
+ *ASTClass, *Result.Context);
+
+ std::vector<std::string> Methods;
+ for (const auto &BN : BoundNodesVec) {
+ if (const auto *Node = BN.getNodeAs<clang::NamedDecl>("classMethod")) {
+ const auto *StmtOrDeclBase =
+ BN.getNodeAs<clang::CXXRecordDecl>("stmtOrDeclBase");
+ const auto *TypeLocBase =
+ BN.getNodeAs<clang::CXXRecordDecl>("typeLocBase");
+ const auto *ExprBase = BN.getNodeAs<clang::CXXRecordDecl>("exprBase");
+ // The clang AST has several methods on base classes which are overriden
+ // pseudo-virtually by derived classes.
+ // We record only the pseudo-virtual methods on the base classes to
+ // avoid duplication.
+ if (StmtOrDeclBase &&
+ (Node->getName() == "getBeginLoc" || Node->getName() == "getEndLoc" ||
+ Node->getName() == "getSourceRange"))
+ continue;
+ if (ExprBase && Node->getName() == "getExprLoc")
+ continue;
+ if (TypeLocBase && Node->getName() == "getLocalSourceRange")
+ continue;
+ if ((ASTClass->getName() == "PointerLikeTypeLoc" ||
+ ASTClass->getName() == "TypeofLikeTypeLoc") &&
+ Node->getName() == "getLocalSourceRange")
+ continue;
+ Methods.push_back(Node->getName().str());
+ }
+ }
+ return Methods;
+}
+
+void ASTSrcLocProcessor::run(const MatchFinder::MatchResult &Result) {
+
+ const auto *ASTClass =
+ Result.Nodes.getNodeAs<clang::CXXRecordDecl>("className");
+
+ StringRef CladeName;
+ if (ASTClass) {
+ if (const auto *NodeClade =
+ Result.Nodes.getNodeAs<clang::CXXRecordDecl>("nodeClade"))
+ CladeName = NodeClade->getName();
+ } else {
+ ASTClass = Result.Nodes.getNodeAs<clang::CXXRecordDecl>("templateName");
+ CladeName = "TypeLoc";
+ }
+
+ StringRef ClassName = ASTClass->getName();
+
+ ClassData CD;
+
+ CD.ASTClassLocations =
+ CaptureMethods("class clang::SourceLocation", ASTClass, Result);
+ CD.ASTClassRanges =
+ CaptureMethods("class clang::SourceRange", ASTClass, Result);
+ CD.TypeSourceInfos =
+ CaptureMethods("class clang::TypeSourceInfo *", ASTClass, Result);
+ CD.TypeLocs = CaptureMethods("class clang::TypeLoc", ASTClass, Result);
+ CD.NestedNameLocs =
+ CaptureMethods("class clang::NestedNameSpecifierLoc", ASTClass, Result);
+ CD.DeclNameInfos =
+ CaptureMethods("struct clang::DeclarationNameInfo", ASTClass, Result);
+ auto DI = CaptureMethods("const struct clang::DeclarationNameInfo &",
+ ASTClass, Result);
+ CD.DeclNameInfos.insert(CD.DeclNameInfos.end(), DI.begin(), DI.end());
+
+ if (const auto *DerivedFrom =
+ Result.Nodes.getNodeAs<clang::CXXRecordDecl>("derivedFrom")) {
+
+ if (const auto *Templ =
+ llvm::dyn_cast<clang::ClassTemplateSpecializationDecl>(
+ DerivedFrom)) {
+
+ const auto &TArgs = Templ->getTemplateArgs();
+
+ SmallString<256> TArgsString;
+ llvm::raw_svector_ostream OS(TArgsString);
+ OS << DerivedFrom->getName() << '<';
+
+ clang::PrintingPolicy PPol(Result.Context->getLangOpts());
+ PPol.TerseOutput = true;
+
+ for (unsigned I = 0; I < TArgs.size(); ++I) {
+ if (I > 0)
+ OS << ", ";
+ TArgs.get(I).getAsType().print(OS, PPol);
+ }
+ OS << '>';
+
+ ClassInheritance[ClassName] = TArgsString.str().str();
+ } else {
+ ClassInheritance[ClassName] = DerivedFrom->getName().str();
+ }
+ }
+
+ if (const auto *Templ = ASTClass->getDescribedClassTemplate()) {
+ if (auto *TParams = Templ->getTemplateParameters()) {
+ for (const auto &TParam : *TParams) {
+ CD.TemplateParms.push_back(TParam->getName().str());
+ }
+ }
+ }
+
+ ClassEntries[ClassName] = CD;
+ ClassesInClade[CladeName].push_back(ClassName);
+}
diff --git a/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h b/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
new file mode 100644
index 000000000000..05c4f92676e8
--- /dev/null
+++ b/clang/lib/Tooling/DumpTool/ASTSrcLocProcessor.h
@@ -0,0 +1,53 @@
+//===- ASTSrcLocProcessor.h ---------------------------------*- C++ -*-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_DUMPTOOL_ASTSRCLOCPROCESSOR_H
+#define LLVM_CLANG_TOOLING_DUMPTOOL_ASTSRCLOCPROCESSOR_H
+
+#include "APIData.h"
+#include "clang/ASTMatchers/ASTMatchFinder.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace clang {
+
+class CompilerInstance;
+
+namespace tooling {
+
+class ASTSrcLocProcessor : public ast_matchers::MatchFinder::MatchCallback {
+public:
+ explicit ASTSrcLocProcessor(StringRef JsonPath);
+
+ std::unique_ptr<ASTConsumer> createASTConsumer(CompilerInstance &Compiler,
+ StringRef File);
+
+ void generate();
+ void generateEmpty();
+
+private:
+ void run(const ast_matchers::MatchFinder::MatchResult &Result) override;
+
+ llvm::Optional<TraversalKind> getCheckTraversalKind() const override {
+ return TK_IgnoreUnlessSpelledInSource;
+ }
+
+ llvm::StringMap<std::string> ClassInheritance;
+ llvm::StringMap<std::vector<StringRef>> ClassesInClade;
+ llvm::StringMap<ClassData> ClassEntries;
+
+ std::string JsonPath;
+ std::unique_ptr<clang::ast_matchers::MatchFinder> Finder;
+};
+
+} // namespace tooling
+} // namespace clang
+
+#endif
diff --git a/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp b/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
new file mode 100644
index 000000000000..8091a467d056
--- /dev/null
+++ b/clang/lib/Tooling/DumpTool/ClangSrcLocDump.cpp
@@ -0,0 +1,159 @@
+//===- ClangSrcLocDump.cpp ------------------------------------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "clang/Tooling/Tooling.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/JSON.h"
+
+#include "ASTSrcLocProcessor.h"
+
+using namespace clang::tooling;
+using namespace clang;
+using namespace llvm;
+
+static cl::list<std::string> IncludeDirectories(
+ "I", cl::desc("Include directories to use while compiling"),
+ cl::value_desc("directory"), cl::Required, cl::OneOrMore, cl::Prefix);
+
+static cl::opt<bool>
+ SkipProcessing("skip-processing",
+ cl::desc("Avoid processing the AST header file"),
+ cl::Required, cl::value_desc("bool"));
+
+static cl::opt<std::string> JsonOutputPath("json-output-path",
+ cl::desc("json output path"),
+ cl::Required,
+ cl::value_desc("path"));
+
+class ASTSrcLocGenerationAction : public clang::ASTFrontendAction {
+public:
+ ASTSrcLocGenerationAction() : Processor(JsonOutputPath) {}
+
+ void ExecuteAction() override {
+ clang::ASTFrontendAction::ExecuteAction();
+ if (getCompilerInstance().getDiagnostics().getNumErrors() > 0)
+ Processor.generateEmpty();
+ else
+ Processor.generate();
+ }
+
+ std::unique_ptr<clang::ASTConsumer>
+ CreateASTConsumer(clang::CompilerInstance &Compiler,
+ llvm::StringRef File) override {
+ return Processor.createASTConsumer(Compiler, File);
+ }
+
+private:
+ ASTSrcLocProcessor Processor;
+};
+
+static const char Filename[] = "ASTTU.cpp";
+
+int main(int argc, const char **argv) {
+
+ cl::ParseCommandLineOptions(argc, argv);
+
+ if (SkipProcessing) {
+ std::error_code EC;
+ llvm::raw_fd_ostream JsonOut(JsonOutputPath, EC, llvm::sys::fs::OF_Text);
+ if (EC)
+ return 1;
+ JsonOut << formatv("{0:2}", llvm::json::Value(llvm::json::Object()));
+ return 0;
+ }
+
+ std::vector<std::string> Args;
+ Args.push_back("-cc1");
+
+ llvm::transform(IncludeDirectories, std::back_inserter(Args),
+ [](const std::string &IncDir) { return "-I" + IncDir; });
+
+ Args.push_back("-fsyntax-only");
+ Args.push_back(Filename);
+
+ std::vector<const char *> Argv(Args.size(), nullptr);
+ llvm::transform(Args, Argv.begin(),
+ [](const std::string &Arg) { return Arg.c_str(); });
+
+ IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
+ unsigned MissingArgIndex, MissingArgCount;
+ auto Opts = driver::getDriverOptTable();
+ auto ParsedArgs = Opts.ParseArgs(llvm::makeArrayRef(Argv).slice(1),
+ MissingArgIndex, MissingArgCount);
+ ParseDiagnosticArgs(*DiagOpts, ParsedArgs);
+
+ // Don't output diagnostics, because common scenarios such as
+ // cross-compiling fail with diagnostics. This is not fatal, but
+ // just causes attempts to use the introspection API to return no data.
+ TextDiagnosticPrinter DiagnosticPrinter(llvm::nulls(), &*DiagOpts);
+ DiagnosticsEngine Diagnostics(
+ IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
+ &DiagnosticPrinter, false);
+
+ auto *OFS = new llvm::vfs::OverlayFileSystem(vfs::getRealFileSystem());
+
+ auto *MemFS = new llvm::vfs::InMemoryFileSystem();
+ OFS->pushOverlay(MemFS);
+ MemFS->addFile(Filename, 0,
+ MemoryBuffer::getMemBuffer("#include \"clang/AST/AST.h\"\n"));
+
+ auto Files = llvm::makeIntrusiveRefCnt<FileManager>(FileSystemOptions(), OFS);
+
+ auto Driver = std::make_unique<driver::Driver>(
+ "clang", llvm::sys::getDefaultTargetTriple(), Diagnostics,
+ "ast-api-dump-tool", OFS);
+
+ std::unique_ptr<clang::driver::Compilation> Comp(
+ Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ if (!Comp)
+ return 1;
+
+ const auto &Jobs = Comp->getJobs();
+ if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
+ SmallString<256> error_msg;
+ llvm::raw_svector_ostream error_stream(error_msg);
+ Jobs.Print(error_stream, "; ", true);
+ return 1;
+ }
+
+ const auto &Cmd = cast<driver::Command>(*Jobs.begin());
+ const llvm::opt::ArgStringList &CC1Args = Cmd.getArguments();
+
+ auto Invocation = std::make_unique<CompilerInvocation>();
+ CompilerInvocation::CreateFromArgs(*Invocation, CC1Args, Diagnostics);
+
+ CompilerInstance Compiler(std::make_shared<clang::PCHContainerOperations>());
+ Compiler.setInvocation(std::move(Invocation));
+
+ Compiler.createDiagnostics(&DiagnosticPrinter, false);
+ if (!Compiler.hasDiagnostics())
+ return 1;
+
+ // Suppress "2 errors generated" or similar messages
+ Compiler.getDiagnosticOpts().ShowCarets = false;
+ Compiler.createSourceManager(*Files);
+ Compiler.setFileManager(Files.get());
+
+ ASTSrcLocGenerationAction ScopedToolAction;
+ Compiler.ExecuteAction(ScopedToolAction);
+
+ Files->clearStatCache();
+
+ return 0;
+}
diff --git a/clang/lib/Tooling/EmptyNodeIntrospection.inc.in b/clang/lib/Tooling/EmptyNodeIntrospection.inc.in
new file mode 100644
index 000000000000..2071c34cbd04
--- /dev/null
+++ b/clang/lib/Tooling/EmptyNodeIntrospection.inc.in
@@ -0,0 +1,48 @@
+//===- EmptyNodeIntrospection.inc.in --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace tooling {
+bool NodeIntrospection::hasIntrospectionSupport() { return false; }
+
+NodeLocationAccessors NodeIntrospection::GetLocations(clang::Stmt const *) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(clang::Decl const *) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::CXXCtorInitializer const *) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::NestedNameSpecifierLoc const&) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::TemplateArgumentLoc const&) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::CXXBaseSpecifier const*) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::TypeLoc const&) {
+ return {};
+}
+NodeLocationAccessors NodeIntrospection::GetLocations(
+ clang::DeclarationNameInfo const&) {
+ return {};
+}
+NodeLocationAccessors
+NodeIntrospection::GetLocations(clang::DynTypedNode const &) {
+ return {};
+}
+} // namespace tooling
+} // namespace clang
diff --git a/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp b/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
index f1ab2aed54c0..29787b8a8894 100644
--- a/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
+++ b/clang/lib/Tooling/ExpandResponseFilesCompilationDatabase.cpp
@@ -54,14 +54,15 @@ private:
Argv.reserve(Cmd.CommandLine.size());
for (auto &Arg : Cmd.CommandLine) {
Argv.push_back(Arg.c_str());
- SeenRSPFile |= Arg.front() == '@';
+ if (!Arg.empty())
+ SeenRSPFile |= Arg.front() == '@';
}
if (!SeenRSPFile)
continue;
llvm::BumpPtrAllocator Alloc;
llvm::StringSaver Saver(Alloc);
- llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Argv, false, false, *FS,
- llvm::StringRef(Cmd.Directory));
+ llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Argv, false, false,
+ llvm::StringRef(Cmd.Directory), *FS);
// Don't assign directly, Argv aliases CommandLine.
std::vector<std::string> ExpandedArgv(Argv.begin(), Argv.end());
Cmd.CommandLine = std::move(ExpandedArgv);
diff --git a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index d1f984632660..fbceb26c39c7 100644
--- a/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -252,9 +252,9 @@ bool IncludeCategoryManager::isMainHeader(StringRef IncludeName) const {
// 1) foo.h => bar.cc
// 2) foo.proto.h => foo.cc
StringRef Matching;
- if (MatchingFileStem.startswith_lower(HeaderStem))
+ if (MatchingFileStem.startswith_insensitive(HeaderStem))
Matching = MatchingFileStem; // example 1), 2)
- else if (FileStem.equals_lower(HeaderStem))
+ else if (FileStem.equals_insensitive(HeaderStem))
Matching = FileStem; // example 3)
if (!Matching.empty()) {
llvm::Regex MainIncludeRegex(HeaderStem.str() + Style.IncludeIsMainRegex,
diff --git a/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index fa61560e5123..c1e25c41f719 100644
--- a/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -43,9 +43,11 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/LangStandard.h"
+#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Types.h"
#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
@@ -134,8 +136,7 @@ struct TransferableCommand {
bool ClangCLMode;
TransferableCommand(CompileCommand C)
- : Cmd(std::move(C)), Type(guessType(Cmd.Filename)),
- ClangCLMode(checkIsCLMode(Cmd.CommandLine)) {
+ : Cmd(std::move(C)), Type(guessType(Cmd.Filename)) {
std::vector<std::string> OldArgs = std::move(Cmd.CommandLine);
Cmd.CommandLine.clear();
@@ -145,6 +146,9 @@ struct TransferableCommand {
SmallVector<const char *, 16> TmpArgv;
for (const std::string &S : OldArgs)
TmpArgv.push_back(S.c_str());
+ ClangCLMode = !TmpArgv.empty() &&
+ driver::IsClangCL(driver::getDriverMode(
+ TmpArgv.front(), llvm::makeArrayRef(TmpArgv).slice(1)));
ArgList = {TmpArgv.begin(), TmpArgv.end()};
}
@@ -177,6 +181,10 @@ struct TransferableCommand {
Opt.matches(OPT__SLASH_Fo))))
continue;
+ // ...including when the inputs are passed after --.
+ if (Opt.matches(OPT__DASH_DASH))
+ break;
+
// Strip -x, but record the overridden language.
if (const auto GivenType = tryParseTypeArg(*Arg)) {
Type = *GivenType;
@@ -204,8 +212,10 @@ struct TransferableCommand {
}
// Produce a CompileCommand for \p filename, based on this one.
- CompileCommand transferTo(StringRef Filename) const {
- CompileCommand Result = Cmd;
+ // (This consumes the TransferableCommand just to avoid copying Cmd).
+ CompileCommand transferTo(StringRef Filename) && {
+ CompileCommand Result = std::move(Cmd);
+ Result.Heuristic = "inferred from " + Result.Filename;
Result.Filename = std::string(Filename);
bool TypeCertain;
auto TargetType = guessType(Filename, &TypeCertain);
@@ -233,25 +243,13 @@ struct TransferableCommand {
llvm::Twine(ClangCLMode ? "/std:" : "-std=") +
LangStandard::getLangStandardForKind(Std).getName()).str());
}
+ if (Filename.startswith("-") || (ClangCLMode && Filename.startswith("/")))
+ Result.CommandLine.push_back("--");
Result.CommandLine.push_back(std::string(Filename));
- Result.Heuristic = "inferred from " + Cmd.Filename;
return Result;
}
private:
- // Determine whether the given command line is intended for the CL driver.
- static bool checkIsCLMode(ArrayRef<std::string> CmdLine) {
- // First look for --driver-mode.
- for (StringRef S : llvm::reverse(CmdLine)) {
- if (S.consume_front("--driver-mode="))
- return S == "cl";
- }
-
- // Otherwise just check the clang executable file name.
- return !CmdLine.empty() &&
- llvm::sys::path::stem(CmdLine.front()).endswith_lower("cl");
- }
-
// Map the language from the --std flag to that of the -x flag.
static types::ID toType(Language Lang) {
switch (Lang) {
@@ -521,7 +519,7 @@ public:
Inner->getCompileCommands(Index.chooseProxy(Filename, foldType(Lang)));
if (ProxyCommands.empty())
return {};
- return {TransferableCommand(ProxyCommands[0]).transferTo(Filename)};
+ return {transferCompileCommand(std::move(ProxyCommands.front()), Filename)};
}
std::vector<std::string> getAllFiles() const override {
@@ -544,5 +542,10 @@ inferMissingCompileCommands(std::unique_ptr<CompilationDatabase> Inner) {
return std::make_unique<InterpolatingCompilationDatabase>(std::move(Inner));
}
+tooling::CompileCommand transferCompileCommand(CompileCommand Cmd,
+ StringRef Filename) {
+ return TransferableCommand(std::move(Cmd)).transferTo(Filename);
+}
+
} // namespace tooling
} // namespace clang
diff --git a/clang/lib/Tooling/JSONCompilationDatabase.cpp b/clang/lib/Tooling/JSONCompilationDatabase.cpp
index 2d8847a7a327..97ba7e411fbb 100644
--- a/clang/lib/Tooling/JSONCompilationDatabase.cpp
+++ b/clang/lib/Tooling/JSONCompilationDatabase.cpp
@@ -198,7 +198,7 @@ JSONCompilationDatabase::loadFromFile(StringRef FilePath,
JSONCommandLineSyntax Syntax) {
// Don't mmap: if we're a long-lived process, the build system may overwrite.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> DatabaseBuffer =
- llvm::MemoryBuffer::getFile(FilePath, /*FileSize=*/-1,
+ llvm::MemoryBuffer::getFile(FilePath, /*IsText=*/false,
/*RequiresNullTerminator=*/true,
/*IsVolatile=*/true);
if (std::error_code Result = DatabaseBuffer.getError()) {
diff --git a/clang/lib/Tooling/NodeIntrospection.cpp b/clang/lib/Tooling/NodeIntrospection.cpp
new file mode 100644
index 000000000000..f01bb1cb9c3c
--- /dev/null
+++ b/clang/lib/Tooling/NodeIntrospection.cpp
@@ -0,0 +1,88 @@
+//===- NodeIntrospection.h -----------------------------------*- C++ -*----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the NodeIntrospection.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/NodeIntrospection.h"
+
+#include "clang/AST/AST.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+namespace tooling {
+
+void LocationCallFormatterCpp::print(const LocationCall &Call,
+ llvm::raw_ostream &OS) {
+ if (const LocationCall *On = Call.on()) {
+ print(*On, OS);
+ if (On->returnsPointer())
+ OS << "->";
+ else
+ OS << '.';
+ }
+
+ OS << Call.name() << "()";
+}
+
+std::string LocationCallFormatterCpp::format(const LocationCall &Call) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ print(Call, OS);
+ OS.flush();
+ return Result;
+}
+
+namespace internal {
+
+static bool locationCallLessThan(const LocationCall *LHS,
+ const LocationCall *RHS) {
+ if (!LHS && !RHS)
+ return false;
+ if (LHS && !RHS)
+ return true;
+ if (!LHS && RHS)
+ return false;
+ auto compareResult = LHS->name().compare(RHS->name());
+ if (compareResult < 0)
+ return true;
+ if (compareResult > 0)
+ return false;
+ return locationCallLessThan(LHS->on(), RHS->on());
+}
+
+bool RangeLessThan::operator()(
+ std::pair<SourceRange, SharedLocationCall> const &LHS,
+ std::pair<SourceRange, SharedLocationCall> const &RHS) const {
+ if (LHS.first.getBegin() < RHS.first.getBegin())
+ return true;
+ else if (LHS.first.getBegin() != RHS.first.getBegin())
+ return false;
+
+ if (LHS.first.getEnd() < RHS.first.getEnd())
+ return true;
+ else if (LHS.first.getEnd() != RHS.first.getEnd())
+ return false;
+
+ return locationCallLessThan(LHS.second.get(), RHS.second.get());
+}
+bool RangeLessThan::operator()(
+ std::pair<SourceLocation, SharedLocationCall> const &LHS,
+ std::pair<SourceLocation, SharedLocationCall> const &RHS) const {
+ if (LHS.first == RHS.first)
+ return locationCallLessThan(LHS.second.get(), RHS.second.get());
+ return LHS.first < RHS.first;
+}
+} // namespace internal
+
+} // namespace tooling
+} // namespace clang
+
+#include "clang/Tooling/NodeIntrospection.inc"
diff --git a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
index 6a08c7fd5247..aecfffcbef1f 100644
--- a/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
+++ b/clang/lib/Tooling/Refactoring/Rename/USRLocFinder.cpp
@@ -226,6 +226,24 @@ public:
return true;
}
+ bool VisitDesignatedInitExpr(const DesignatedInitExpr *E) {
+ for (const DesignatedInitExpr::Designator &D : E->designators()) {
+ if (D.isFieldDesignator() && D.getField()) {
+ const FieldDecl *Decl = D.getField();
+ if (isInUSRSet(Decl)) {
+ auto StartLoc = D.getFieldLoc();
+ auto EndLoc = D.getFieldLoc();
+ RenameInfos.push_back({StartLoc, EndLoc,
+ /*FromDecl=*/nullptr,
+ /*Context=*/nullptr,
+ /*Specifier=*/nullptr,
+ /*IgnorePrefixQualifiers=*/true});
+ }
+ }
+ }
+ return true;
+ }
+
bool VisitCXXConstructorDecl(const CXXConstructorDecl *CD) {
// Fix the constructor initializer when renaming class members.
for (const auto *Initializer : CD->inits()) {
diff --git a/clang/lib/Tooling/Syntax/BuildTree.cpp b/clang/lib/Tooling/Syntax/BuildTree.cpp
index 7654e3dfaa01..07888b5c32fa 100644
--- a/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -294,11 +294,12 @@ static SourceRange getDeclaratorRange(const SourceManager &SM, TypeLoc T,
SourceRange Initializer) {
SourceLocation Start = GetStartLoc().Visit(T);
SourceLocation End = T.getEndLoc();
- assert(End.isValid());
if (Name.isValid()) {
if (Start.isInvalid())
Start = Name;
- if (SM.isBeforeInTranslationUnit(End, Name))
+ // End of TypeLoc could be invalid if the type is invalid, fallback to the
+ // NameLoc.
+ if (End.isInvalid() || SM.isBeforeInTranslationUnit(End, Name))
End = Name;
}
if (Initializer.isValid()) {
@@ -800,6 +801,30 @@ public:
return true;
}
+ bool TraverseIfStmt(IfStmt *S) {
+ bool Result = [&, this]() {
+ if (S->getInit() && !TraverseStmt(S->getInit())) {
+ return false;
+ }
+ // In cases where the condition is an initialized declaration in a
+ // statement, we want to preserve the declaration and ignore the
+ // implicit condition expression in the syntax tree.
+ if (S->hasVarStorage()) {
+ if (!TraverseStmt(S->getConditionVariableDeclStmt()))
+ return false;
+ } else if (S->getCond() && !TraverseStmt(S->getCond()))
+ return false;
+
+ if (S->getThen() && !TraverseStmt(S->getThen()))
+ return false;
+ if (S->getElse() && !TraverseStmt(S->getElse()))
+ return false;
+ return true;
+ }();
+ WalkUpFromIfStmt(S);
+ return Result;
+ }
+
bool TraverseCXXForRangeStmt(CXXForRangeStmt *S) {
// We override to traverse range initializer as VarDecl.
// RAV traverses it as a statement, we produce invalid node kinds in that
@@ -831,6 +856,11 @@ public:
return RecursiveASTVisitor::TraverseStmt(S);
}
+ bool TraverseOpaqueValueExpr(OpaqueValueExpr *VE) {
+ // OpaqueValue doesn't correspond to concrete syntax, ignore it.
+ return true;
+ }
+
// Some expressions are not yet handled by syntax trees.
bool WalkUpFromExpr(Expr *E) {
assert(!isImplicitExpr(E) && "should be handled by TraverseStmt");
@@ -1426,6 +1456,10 @@ public:
bool WalkUpFromIfStmt(IfStmt *S) {
Builder.markChildToken(S->getIfLoc(), syntax::NodeRole::IntroducerKeyword);
+ Stmt *ConditionStatement = S->getCond();
+ if (S->hasVarStorage())
+ ConditionStatement = S->getConditionVariableDeclStmt();
+ Builder.markStmtChild(ConditionStatement, syntax::NodeRole::Condition);
Builder.markStmtChild(S->getThen(), syntax::NodeRole::ThenStatement);
Builder.markChildToken(S->getElseLoc(), syntax::NodeRole::ElseKeyword);
Builder.markStmtChild(S->getElse(), syntax::NodeRole::ElseStatement);
diff --git a/clang/lib/Tooling/Syntax/Tokens.cpp b/clang/lib/Tooling/Syntax/Tokens.cpp
index 234df9cb7182..8a31e776d030 100644
--- a/clang/lib/Tooling/Syntax/Tokens.cpp
+++ b/clang/lib/Tooling/Syntax/Tokens.cpp
@@ -183,7 +183,39 @@ llvm::StringRef FileRange::text(const SourceManager &SM) const {
return Text.substr(Begin, length());
}
+void TokenBuffer::indexExpandedTokens() {
+ // No-op if the index is already created.
+ if (!ExpandedTokIndex.empty())
+ return;
+ ExpandedTokIndex.reserve(ExpandedTokens.size());
+ // Index ExpandedTokens for faster lookups by SourceLocation.
+ for (size_t I = 0, E = ExpandedTokens.size(); I != E; ++I) {
+ SourceLocation Loc = ExpandedTokens[I].location();
+ if (Loc.isValid())
+ ExpandedTokIndex[Loc] = I;
+ }
+}
+
llvm::ArrayRef<syntax::Token> TokenBuffer::expandedTokens(SourceRange R) const {
+ if (R.isInvalid())
+ return {};
+ if (!ExpandedTokIndex.empty()) {
+ // Quick lookup if `R` is a token range.
+ // This is a huge win since majority of the users use ranges provided by an
+ // AST. Ranges in AST are token ranges from expanded token stream.
+ const auto B = ExpandedTokIndex.find(R.getBegin());
+ const auto E = ExpandedTokIndex.find(R.getEnd());
+ if (B != ExpandedTokIndex.end() && E != ExpandedTokIndex.end()) {
+ const Token *L = ExpandedTokens.data() + B->getSecond();
+ // Add 1 to End to make a half-open range.
+ const Token *R = ExpandedTokens.data() + E->getSecond() + 1;
+ if (L > R)
+ return {};
+ return {L, R};
+ }
+ }
+ // Slow case. Use `isBeforeInTranslationUnit` to binary search for the
+ // required range.
return getTokensCovering(expandedTokens(), R, *SourceMgr);
}
diff --git a/clang/lib/Tooling/Tooling.cpp b/clang/lib/Tooling/Tooling.cpp
index 79851ac723da..5242134097da 100644
--- a/clang/lib/Tooling/Tooling.cpp
+++ b/clang/lib/Tooling/Tooling.cpp
@@ -83,16 +83,20 @@ newDriver(DiagnosticsEngine *Diagnostics, const char *BinaryName,
return CompilerDriver;
}
-/// Retrieves the clang CC1 specific flags out of the compilation's jobs.
-///
-/// Returns nullptr on error.
-static const llvm::opt::ArgStringList *getCC1Arguments(
- DiagnosticsEngine *Diagnostics, driver::Compilation *Compilation) {
- // We expect to get back exactly one Command job, if we didn't something
- // failed. Extract that job from the Compilation.
+/// Decide whether extra compiler frontend commands can be ignored.
+static bool ignoreExtraCC1Commands(const driver::Compilation *Compilation) {
const driver::JobList &Jobs = Compilation->getJobs();
const driver::ActionList &Actions = Compilation->getActions();
+
bool OffloadCompilation = false;
+
+ // Jobs and Actions look very different depending on whether the Clang tool
+ // injected -fsyntax-only or not. Try to handle both cases here.
+
+ for (const auto &Job : Jobs)
+ if (StringRef(Job.getExecutable()) == "clang-offload-bundler")
+ OffloadCompilation = true;
+
if (Jobs.size() > 1) {
for (auto A : Actions){
// On MacOSX real actions may end up being wrapped in BindArchAction
@@ -117,8 +121,33 @@ static const llvm::opt::ArgStringList *getCC1Arguments(
}
}
}
- if (Jobs.size() == 0 || !isa<driver::Command>(*Jobs.begin()) ||
- (Jobs.size() > 1 && !OffloadCompilation)) {
+
+ return OffloadCompilation;
+}
+
+namespace clang {
+namespace tooling {
+
+const llvm::opt::ArgStringList *
+getCC1Arguments(DiagnosticsEngine *Diagnostics,
+ driver::Compilation *Compilation) {
+ const driver::JobList &Jobs = Compilation->getJobs();
+
+ auto IsCC1Command = [](const driver::Command &Cmd) {
+ return StringRef(Cmd.getCreator().getName()) == "clang";
+ };
+
+ auto IsSrcFile = [](const driver::InputInfo &II) {
+ return isSrcFile(II.getType());
+ };
+
+ llvm::SmallVector<const driver::Command *, 1> CC1Jobs;
+ for (const driver::Command &Job : Jobs)
+ if (IsCC1Command(Job) && llvm::all_of(Job.getInputInfos(), IsSrcFile))
+ CC1Jobs.push_back(&Job);
+
+ if (CC1Jobs.empty() ||
+ (CC1Jobs.size() > 1 && !ignoreExtraCC1Commands(Compilation))) {
SmallString<256> error_msg;
llvm::raw_svector_ostream error_stream(error_msg);
Jobs.Print(error_stream, "; ", true);
@@ -127,19 +156,9 @@ static const llvm::opt::ArgStringList *getCC1Arguments(
return nullptr;
}
- // The one job we find should be to invoke clang again.
- const auto &Cmd = cast<driver::Command>(*Jobs.begin());
- if (StringRef(Cmd.getCreator().getName()) != "clang") {
- Diagnostics->Report(diag::err_fe_expected_clang_command);
- return nullptr;
- }
-
- return &Cmd.getArguments();
+ return &CC1Jobs[0]->getArguments();
}
-namespace clang {
-namespace tooling {
-
/// Returns a clang build invocation initialized from the CC1 flags.
CompilerInvocation *newInvocation(DiagnosticsEngine *Diagnostics,
const llvm::opt::ArgStringList &CC1Args,
@@ -334,6 +353,10 @@ bool ToolInvocation::run() {
DiagnosticsEngine Diagnostics(
IntrusiveRefCntPtr<DiagnosticIDs>(new DiagnosticIDs()), &*DiagOpts,
DiagConsumer ? DiagConsumer : &DiagnosticPrinter, false);
+ // Although `Diagnostics` are used only for command-line parsing, the custom
+ // `DiagConsumer` might expect a `SourceManager` to be present.
+ SourceManager SrcMgr(Diagnostics, *Files);
+ Diagnostics.setSourceManager(&SrcMgr);
const std::unique_ptr<driver::Driver> Driver(
newDriver(&Diagnostics, BinaryName, &Files->getVirtualFileSystem()));
@@ -440,8 +463,9 @@ static void injectResourceDir(CommandLineArguments &Args, const char *Argv0,
return;
// If there's no override in place add our resource dir.
- Args.push_back("-resource-dir=" +
- CompilerInvocation::GetResourcesPath(Argv0, MainAddr));
+ Args = getInsertArgumentAdjuster(
+ ("-resource-dir=" + CompilerInvocation::GetResourcesPath(Argv0, MainAddr))
+ .c_str())(Args, "");
}
int ClangTool::run(ToolAction *Action) {
diff --git a/clang/lib/Tooling/Transformer/RangeSelector.cpp b/clang/lib/Tooling/Transformer/RangeSelector.cpp
index 0f3138db218a..753e89e0e1f3 100644
--- a/clang/lib/Tooling/Transformer/RangeSelector.cpp
+++ b/clang/lib/Tooling/Transformer/RangeSelector.cpp
@@ -8,6 +8,7 @@
#include "clang/Tooling/Transformer/RangeSelector.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/TypeLoc.h"
#include "clang/ASTMatchers/ASTMatchFinder.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Lex/Lexer.h"
@@ -228,8 +229,16 @@ RangeSelector transformer::name(std::string ID) {
SourceLocation L = I->getMemberLocation();
return CharSourceRange::getTokenRange(L, L);
}
+ if (const auto *T = Node.get<TypeLoc>()) {
+ TypeLoc Loc = *T;
+ auto ET = Loc.getAs<ElaboratedTypeLoc>();
+ if (!ET.isNull()) {
+ Loc = ET.getNamedTypeLoc();
+ }
+ return CharSourceRange::getTokenRange(Loc.getSourceRange());
+ }
return typeError(ID, Node.getNodeKind(),
- "DeclRefExpr, NamedDecl, CXXCtorInitializer");
+ "DeclRefExpr, NamedDecl, CXXCtorInitializer, TypeLoc");
};
}
diff --git a/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp b/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
index 56ec45e8fd1d..a1c99b60216b 100644
--- a/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
+++ b/clang/lib/Tooling/Transformer/SourceCodeBuilders.cpp
@@ -93,6 +93,8 @@ tooling::buildDereference(const Expr &E, const ASTContext &Context) {
llvm::Optional<std::string> tooling::buildAddressOf(const Expr &E,
const ASTContext &Context) {
+ if (E.isImplicitCXXThis())
+ return std::string("this");
if (const auto *Op = dyn_cast<UnaryOperator>(&E))
if (Op->getOpcode() == UO_Deref) {
// Strip leading '*'.
diff --git a/clang/lib/Tooling/Transformer/Stencil.cpp b/clang/lib/Tooling/Transformer/Stencil.cpp
index d46087e4b04b..4dc3544bb06d 100644
--- a/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -43,141 +43,6 @@ getNode(const ast_matchers::BoundNodes &Nodes, StringRef Id) {
return It->second;
}
-namespace {
-// An arbitrary fragment of code within a stencil.
-struct RawTextData {
- explicit RawTextData(std::string T) : Text(std::move(T)) {}
- std::string Text;
-};
-
-// A debugging operation to dump the AST for a particular (bound) AST node.
-struct DebugPrintNodeData {
- explicit DebugPrintNodeData(std::string S) : Id(std::move(S)) {}
- std::string Id;
-};
-
-// Operators that take a single node Id as an argument.
-enum class UnaryNodeOperator {
- Parens,
- Deref,
- MaybeDeref,
- AddressOf,
- MaybeAddressOf,
- Describe,
-};
-
-// Generic container for stencil operations with a (single) node-id argument.
-struct UnaryOperationData {
- UnaryOperationData(UnaryNodeOperator Op, std::string Id)
- : Op(Op), Id(std::move(Id)) {}
- UnaryNodeOperator Op;
- std::string Id;
-};
-
-// The fragment of code corresponding to the selected range.
-struct SelectorData {
- explicit SelectorData(RangeSelector S) : Selector(std::move(S)) {}
- RangeSelector Selector;
-};
-
-// A stencil operation to build a member access `e.m` or `e->m`, as appropriate.
-struct AccessData {
- AccessData(StringRef BaseId, Stencil Member)
- : BaseId(std::string(BaseId)), Member(std::move(Member)) {}
- std::string BaseId;
- Stencil Member;
-};
-
-struct IfBoundData {
- IfBoundData(StringRef Id, Stencil TrueStencil, Stencil FalseStencil)
- : Id(std::string(Id)), TrueStencil(std::move(TrueStencil)),
- FalseStencil(std::move(FalseStencil)) {}
- std::string Id;
- Stencil TrueStencil;
- Stencil FalseStencil;
-};
-
-struct SequenceData {
- SequenceData(std::vector<Stencil> Stencils) : Stencils(std::move(Stencils)) {}
- std::vector<Stencil> Stencils;
-};
-
-std::string toStringData(const RawTextData &Data) {
- std::string Result;
- llvm::raw_string_ostream OS(Result);
- OS << "\"";
- OS.write_escaped(Data.Text);
- OS << "\"";
- OS.flush();
- return Result;
-}
-
-std::string toStringData(const DebugPrintNodeData &Data) {
- return (llvm::Twine("dPrint(\"") + Data.Id + "\")").str();
-}
-
-std::string toStringData(const UnaryOperationData &Data) {
- StringRef OpName;
- switch (Data.Op) {
- case UnaryNodeOperator::Parens:
- OpName = "expression";
- break;
- case UnaryNodeOperator::Deref:
- OpName = "deref";
- break;
- case UnaryNodeOperator::MaybeDeref:
- OpName = "maybeDeref";
- break;
- case UnaryNodeOperator::AddressOf:
- OpName = "addressOf";
- break;
- case UnaryNodeOperator::MaybeAddressOf:
- OpName = "maybeAddressOf";
- break;
- case UnaryNodeOperator::Describe:
- OpName = "describe";
- break;
- }
- return (OpName + "(\"" + Data.Id + "\")").str();
-}
-
-std::string toStringData(const SelectorData &) { return "selection(...)"; }
-
-std::string toStringData(const AccessData &Data) {
- return (llvm::Twine("access(\"") + Data.BaseId + "\", " +
- Data.Member->toString() + ")")
- .str();
-}
-
-std::string toStringData(const IfBoundData &Data) {
- return (llvm::Twine("ifBound(\"") + Data.Id + "\", " +
- Data.TrueStencil->toString() + ", " + Data.FalseStencil->toString() +
- ")")
- .str();
-}
-
-std::string toStringData(const MatchConsumer<std::string> &) {
- return "run(...)";
-}
-
-std::string toStringData(const SequenceData &Data) {
- llvm::SmallVector<std::string, 2> Parts;
- Parts.reserve(Data.Stencils.size());
- for (const auto &S : Data.Stencils)
- Parts.push_back(S->toString());
- return (llvm::Twine("seq(") + llvm::join(Parts, ", ") + ")").str();
-}
-
-// The `evalData()` overloads evaluate the given stencil data to a string, given
-// the match result, and append it to `Result`. We define an overload for each
-// type of stencil data.
-
-Error evalData(const RawTextData &Data, const MatchFinder::MatchResult &,
- std::string *Result) {
- Result->append(Data.Text);
- return Error::success();
-}
-
static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
std::string *Result) {
std::string Output;
@@ -190,11 +55,6 @@ static Error printNode(StringRef Id, const MatchFinder::MatchResult &Match,
return Error::success();
}
-Error evalData(const DebugPrintNodeData &Data,
- const MatchFinder::MatchResult &Match, std::string *Result) {
- return printNode(Data.Id, Match, Result);
-}
-
// FIXME: Consider memoizing this function using the `ASTContext`.
static bool isSmartPointerType(QualType Ty, ASTContext &Context) {
using namespace ::clang::ast_matchers;
@@ -213,229 +73,402 @@ static bool isSmartPointerType(QualType Ty, ASTContext &Context) {
return match(SmartPointer, Ty, Context).size() > 0;
}
-Error evalData(const UnaryOperationData &Data,
- const MatchFinder::MatchResult &Match, std::string *Result) {
- // The `Describe` operation can be applied to any node, not just expressions,
- // so it is handled here, separately.
- if (Data.Op == UnaryNodeOperator::Describe)
- return printNode(Data.Id, Match, Result);
-
- const auto *E = Match.Nodes.getNodeAs<Expr>(Data.Id);
- if (E == nullptr)
- return llvm::make_error<StringError>(
- errc::invalid_argument, "Id not bound or not Expr: " + Data.Id);
- llvm::Optional<std::string> Source;
- switch (Data.Op) {
- case UnaryNodeOperator::Parens:
- Source = tooling::buildParens(*E, *Match.Context);
- break;
- case UnaryNodeOperator::Deref:
- Source = tooling::buildDereference(*E, *Match.Context);
- break;
- case UnaryNodeOperator::MaybeDeref:
- if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
- // Strip off any operator->. This can only occur inside an actual arrow
- // member access, so we treat it as equivalent to an actual object
- // expression.
- if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
- if (OpCall->getOperator() == clang::OO_Arrow &&
- OpCall->getNumArgs() == 1) {
- E = OpCall->getArg(0);
- }
- }
- Source = tooling::buildDereference(*E, *Match.Context);
+// Identifies use of `operator*` on smart pointers, and returns the underlying
+// smart-pointer expression; otherwise, returns null.
+static const Expr *isSmartDereference(const Expr &E, ASTContext &Context) {
+ using namespace ::clang::ast_matchers;
+
+ const auto HasOverloadedArrow = cxxRecordDecl(hasMethod(cxxMethodDecl(
+ hasOverloadedOperatorName("->"), returns(qualType(pointsTo(type()))))));
+ // Verify it is a smart pointer by finding `operator->` in the class
+ // declaration.
+ auto Deref = cxxOperatorCallExpr(
+ hasOverloadedOperatorName("*"), hasUnaryOperand(expr().bind("arg")),
+ callee(cxxMethodDecl(ofClass(HasOverloadedArrow))));
+ return selectFirst<Expr>("arg", match(Deref, E, Context));
+}
+
+namespace {
+// An arbitrary fragment of code within a stencil.
+class RawTextStencil : public StencilInterface {
+ std::string Text;
+
+public:
+ explicit RawTextStencil(std::string T) : Text(std::move(T)) {}
+
+ std::string toString() const override {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ OS << "\"";
+ OS.write_escaped(Text);
+ OS << "\"";
+ OS.flush();
+ return Result;
+ }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ Result->append(Text);
+ return Error::success();
+ }
+};
+
+// A debugging operation to dump the AST for a particular (bound) AST node.
+class DebugPrintNodeStencil : public StencilInterface {
+ std::string Id;
+
+public:
+ explicit DebugPrintNodeStencil(std::string S) : Id(std::move(S)) {}
+
+ std::string toString() const override {
+ return (llvm::Twine("dPrint(\"") + Id + "\")").str();
+ }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ return printNode(Id, Match, Result);
+ }
+};
+
+// Operators that take a single node Id as an argument.
+enum class UnaryNodeOperator {
+ Parens,
+ Deref,
+ MaybeDeref,
+ AddressOf,
+ MaybeAddressOf,
+ Describe,
+};
+
+// Generic container for stencil operations with a (single) node-id argument.
+class UnaryOperationStencil : public StencilInterface {
+ UnaryNodeOperator Op;
+ std::string Id;
+
+public:
+ UnaryOperationStencil(UnaryNodeOperator Op, std::string Id)
+ : Op(Op), Id(std::move(Id)) {}
+
+ std::string toString() const override {
+ StringRef OpName;
+ switch (Op) {
+ case UnaryNodeOperator::Parens:
+ OpName = "expression";
+ break;
+ case UnaryNodeOperator::Deref:
+ OpName = "deref";
+ break;
+ case UnaryNodeOperator::MaybeDeref:
+ OpName = "maybeDeref";
+ break;
+ case UnaryNodeOperator::AddressOf:
+ OpName = "addressOf";
+ break;
+ case UnaryNodeOperator::MaybeAddressOf:
+ OpName = "maybeAddressOf";
+ break;
+ case UnaryNodeOperator::Describe:
+ OpName = "describe";
break;
}
- *Result += tooling::getText(*E, *Match.Context);
- return Error::success();
- case UnaryNodeOperator::AddressOf:
- Source = tooling::buildAddressOf(*E, *Match.Context);
- break;
- case UnaryNodeOperator::MaybeAddressOf:
- if (E->getType()->isAnyPointerType() ||
- isSmartPointerType(E->getType(), *Match.Context)) {
- // Strip off any operator->. This can only occur inside an actual arrow
- // member access, so we treat it as equivalent to an actual object
- // expression.
- if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
- if (OpCall->getOperator() == clang::OO_Arrow &&
- OpCall->getNumArgs() == 1) {
- E = OpCall->getArg(0);
+ return (OpName + "(\"" + Id + "\")").str();
+ }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ // The `Describe` operation can be applied to any node, not just
+ // expressions, so it is handled here, separately.
+ if (Op == UnaryNodeOperator::Describe)
+ return printNode(Id, Match, Result);
+
+ const auto *E = Match.Nodes.getNodeAs<Expr>(Id);
+ if (E == nullptr)
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Id not bound or not Expr: " + Id);
+ llvm::Optional<std::string> Source;
+ switch (Op) {
+ case UnaryNodeOperator::Parens:
+ Source = tooling::buildParens(*E, *Match.Context);
+ break;
+ case UnaryNodeOperator::Deref:
+ Source = tooling::buildDereference(*E, *Match.Context);
+ break;
+ case UnaryNodeOperator::MaybeDeref:
+ if (E->getType()->isAnyPointerType() ||
+ isSmartPointerType(E->getType(), *Match.Context)) {
+ // Strip off any operator->. This can only occur inside an actual arrow
+ // member access, so we treat it as equivalent to an actual object
+ // expression.
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
+ if (OpCall->getOperator() == clang::OO_Arrow &&
+ OpCall->getNumArgs() == 1) {
+ E = OpCall->getArg(0);
+ }
}
+ Source = tooling::buildDereference(*E, *Match.Context);
+ break;
}
*Result += tooling::getText(*E, *Match.Context);
return Error::success();
+ case UnaryNodeOperator::AddressOf:
+ Source = tooling::buildAddressOf(*E, *Match.Context);
+ break;
+ case UnaryNodeOperator::MaybeAddressOf:
+ if (E->getType()->isAnyPointerType() ||
+ isSmartPointerType(E->getType(), *Match.Context)) {
+ // Strip off any operator->. This can only occur inside an actual arrow
+ // member access, so we treat it as equivalent to an actual object
+ // expression.
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
+ if (OpCall->getOperator() == clang::OO_Arrow &&
+ OpCall->getNumArgs() == 1) {
+ E = OpCall->getArg(0);
+ }
+ }
+ *Result += tooling::getText(*E, *Match.Context);
+ return Error::success();
+ }
+ Source = tooling::buildAddressOf(*E, *Match.Context);
+ break;
+ case UnaryNodeOperator::Describe:
+ llvm_unreachable("This case is handled at the start of the function");
}
- Source = tooling::buildAddressOf(*E, *Match.Context);
- break;
- case UnaryNodeOperator::Describe:
- llvm_unreachable("This case is handled at the start of the function");
+ if (!Source)
+ return llvm::make_error<StringError>(
+ errc::invalid_argument,
+ "Could not construct expression source from ID: " + Id);
+ *Result += *Source;
+ return Error::success();
}
- if (!Source)
- return llvm::make_error<StringError>(
- errc::invalid_argument,
- "Could not construct expression source from ID: " + Data.Id);
- *Result += *Source;
- return Error::success();
-}
+};
-Error evalData(const SelectorData &Data, const MatchFinder::MatchResult &Match,
- std::string *Result) {
- auto RawRange = Data.Selector(Match);
- if (!RawRange)
- return RawRange.takeError();
- CharSourceRange Range = Lexer::makeFileCharRange(
- *RawRange, *Match.SourceManager, Match.Context->getLangOpts());
- if (Range.isInvalid()) {
- // Validate the original range to attempt to get a meaningful error message.
- // If it's valid, then something else is the cause and we just return the
- // generic failure message.
- if (auto Err = tooling::validateEditRange(*RawRange, *Match.SourceManager))
- return handleErrors(std::move(Err), [](std::unique_ptr<StringError> E) {
- assert(E->convertToErrorCode() ==
- llvm::make_error_code(errc::invalid_argument) &&
- "Validation errors must carry the invalid_argument code");
- return llvm::createStringError(
- errc::invalid_argument,
- "selected range could not be resolved to a valid source range; " +
- E->getMessage());
- });
- return llvm::createStringError(
- errc::invalid_argument,
- "selected range could not be resolved to a valid source range");
- }
- // Validate `Range`, because `makeFileCharRange` accepts some ranges that
- // `validateEditRange` rejects.
- if (auto Err = tooling::validateEditRange(Range, *Match.SourceManager))
- return joinErrors(
- llvm::createStringError(errc::invalid_argument,
- "selected range is not valid for editing"),
- std::move(Err));
- *Result += tooling::getText(Range, *Match.Context);
- return Error::success();
-}
+// The fragment of code corresponding to the selected range.
+class SelectorStencil : public StencilInterface {
+ RangeSelector Selector;
-Error evalData(const AccessData &Data, const MatchFinder::MatchResult &Match,
- std::string *Result) {
- const auto *E = Match.Nodes.getNodeAs<Expr>(Data.BaseId);
- if (E == nullptr)
- return llvm::make_error<StringError>(errc::invalid_argument,
- "Id not bound: " + Data.BaseId);
- if (!E->isImplicitCXXThis()) {
- if (llvm::Optional<std::string> S =
- E->getType()->isAnyPointerType()
- ? tooling::buildArrow(*E, *Match.Context)
- : tooling::buildDot(*E, *Match.Context))
- *Result += *S;
- else
- return llvm::make_error<StringError>(
+public:
+ explicit SelectorStencil(RangeSelector S) : Selector(std::move(S)) {}
+
+ std::string toString() const override { return "selection(...)"; }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ auto RawRange = Selector(Match);
+ if (!RawRange)
+ return RawRange.takeError();
+ CharSourceRange Range = Lexer::makeFileCharRange(
+ *RawRange, *Match.SourceManager, Match.Context->getLangOpts());
+ if (Range.isInvalid()) {
+ // Validate the original range to attempt to get a meaningful error
+ // message. If it's valid, then something else is the cause and we just
+ // return the generic failure message.
+ if (auto Err =
+ tooling::validateEditRange(*RawRange, *Match.SourceManager))
+ return handleErrors(std::move(Err), [](std::unique_ptr<StringError> E) {
+ assert(E->convertToErrorCode() ==
+ llvm::make_error_code(errc::invalid_argument) &&
+ "Validation errors must carry the invalid_argument code");
+ return llvm::createStringError(
+ errc::invalid_argument,
+ "selected range could not be resolved to a valid source range; " +
+ E->getMessage());
+ });
+ return llvm::createStringError(
errc::invalid_argument,
- "Could not construct object text from ID: " + Data.BaseId);
+ "selected range could not be resolved to a valid source range");
+ }
+ // Validate `Range`, because `makeFileCharRange` accepts some ranges that
+ // `validateEditRange` rejects.
+ if (auto Err = tooling::validateEditRange(Range, *Match.SourceManager))
+ return joinErrors(
+ llvm::createStringError(errc::invalid_argument,
+ "selected range is not valid for editing"),
+ std::move(Err));
+ *Result += tooling::getText(Range, *Match.Context);
+ return Error::success();
}
- return Data.Member->eval(Match, Result);
-}
+};
-Error evalData(const IfBoundData &Data, const MatchFinder::MatchResult &Match,
- std::string *Result) {
- auto &M = Match.Nodes.getMap();
- return (M.find(Data.Id) != M.end() ? Data.TrueStencil : Data.FalseStencil)
- ->eval(Match, Result);
-}
+// A stencil operation to build a member access `e.m` or `e->m`, as appropriate.
+class AccessStencil : public StencilInterface {
+ std::string BaseId;
+ Stencil Member;
-Error evalData(const MatchConsumer<std::string> &Fn,
- const MatchFinder::MatchResult &Match, std::string *Result) {
- Expected<std::string> Value = Fn(Match);
- if (!Value)
- return Value.takeError();
- *Result += *Value;
- return Error::success();
-}
+public:
+ AccessStencil(StringRef BaseId, Stencil Member)
+ : BaseId(std::string(BaseId)), Member(std::move(Member)) {}
-Error evalData(const SequenceData &Data, const MatchFinder::MatchResult &Match,
- std::string *Result) {
- for (const auto &S : Data.Stencils)
- if (auto Err = S->eval(Match, Result))
- return Err;
- return Error::success();
-}
+ std::string toString() const override {
+ return (llvm::Twine("access(\"") + BaseId + "\", " + Member->toString() +
+ ")")
+ .str();
+ }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ const auto *E = Match.Nodes.getNodeAs<Expr>(BaseId);
+ if (E == nullptr)
+ return llvm::make_error<StringError>(errc::invalid_argument,
+ "Id not bound: " + BaseId);
+ if (!E->isImplicitCXXThis()) {
+ llvm::Optional<std::string> S;
+ if (E->getType()->isAnyPointerType() ||
+ isSmartPointerType(E->getType(), *Match.Context)) {
+ // Strip off any operator->. This can only occur inside an actual arrow
+ // member access, so we treat it as equivalent to an actual object
+ // expression.
+ if (const auto *OpCall = dyn_cast<clang::CXXOperatorCallExpr>(E)) {
+ if (OpCall->getOperator() == clang::OO_Arrow &&
+ OpCall->getNumArgs() == 1) {
+ E = OpCall->getArg(0);
+ }
+ }
+ S = tooling::buildArrow(*E, *Match.Context);
+ } else if (const auto *Operand = isSmartDereference(*E, *Match.Context)) {
+ // `buildDot` already handles the built-in dereference operator, so we
+ // only need to catch overloaded `operator*`.
+ S = tooling::buildArrow(*Operand, *Match.Context);
+ } else {
+ S = tooling::buildDot(*E, *Match.Context);
+ }
+ if (S.hasValue())
+ *Result += *S;
+ else
+ return llvm::make_error<StringError>(
+ errc::invalid_argument,
+ "Could not construct object text from ID: " + BaseId);
+ }
+ return Member->eval(Match, Result);
+ }
+};
+
+class IfBoundStencil : public StencilInterface {
+ std::string Id;
+ Stencil TrueStencil;
+ Stencil FalseStencil;
+
+public:
+ IfBoundStencil(StringRef Id, Stencil TrueStencil, Stencil FalseStencil)
+ : Id(std::string(Id)), TrueStencil(std::move(TrueStencil)),
+ FalseStencil(std::move(FalseStencil)) {}
+
+ std::string toString() const override {
+ return (llvm::Twine("ifBound(\"") + Id + "\", " + TrueStencil->toString() +
+ ", " + FalseStencil->toString() + ")")
+ .str();
+ }
-template <typename T> class StencilImpl : public StencilInterface {
- T Data;
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+ auto &M = Match.Nodes.getMap();
+ return (M.find(Id) != M.end() ? TrueStencil : FalseStencil)
+ ->eval(Match, Result);
+ }
+};
+
+class SequenceStencil : public StencilInterface {
+ std::vector<Stencil> Stencils;
public:
- template <typename... Ps>
- explicit StencilImpl(Ps &&... Args) : Data(std::forward<Ps>(Args)...) {}
+ SequenceStencil(std::vector<Stencil> Stencils)
+ : Stencils(std::move(Stencils)) {}
+
+ std::string toString() const override {
+ llvm::SmallVector<std::string, 2> Parts;
+ Parts.reserve(Stencils.size());
+ for (const auto &S : Stencils)
+ Parts.push_back(S->toString());
+ return (llvm::Twine("seq(") + llvm::join(Parts, ", ") + ")").str();
+ }
Error eval(const MatchFinder::MatchResult &Match,
std::string *Result) const override {
- return evalData(Data, Match, Result);
+ for (const auto &S : Stencils)
+ if (auto Err = S->eval(Match, Result))
+ return Err;
+ return Error::success();
}
+};
+
+class RunStencil : public StencilInterface {
+ MatchConsumer<std::string> Consumer;
+
+public:
+ explicit RunStencil(MatchConsumer<std::string> C) : Consumer(std::move(C)) {}
- std::string toString() const override { return toStringData(Data); }
+ std::string toString() const override { return "run(...)"; }
+
+ Error eval(const MatchFinder::MatchResult &Match,
+ std::string *Result) const override {
+
+ Expected<std::string> Value = Consumer(Match);
+ if (!Value)
+ return Value.takeError();
+ *Result += *Value;
+ return Error::success();
+ }
};
} // namespace
Stencil transformer::detail::makeStencil(StringRef Text) {
- return std::make_shared<StencilImpl<RawTextData>>(std::string(Text));
+ return std::make_shared<RawTextStencil>(std::string(Text));
}
Stencil transformer::detail::makeStencil(RangeSelector Selector) {
- return std::make_shared<StencilImpl<SelectorData>>(std::move(Selector));
+ return std::make_shared<SelectorStencil>(std::move(Selector));
}
Stencil transformer::dPrint(StringRef Id) {
- return std::make_shared<StencilImpl<DebugPrintNodeData>>(std::string(Id));
+ return std::make_shared<DebugPrintNodeStencil>(std::string(Id));
}
Stencil transformer::expression(llvm::StringRef Id) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Parens, std::string(Id));
+ return std::make_shared<UnaryOperationStencil>(UnaryNodeOperator::Parens,
+ std::string(Id));
}
Stencil transformer::deref(llvm::StringRef ExprId) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Deref, std::string(ExprId));
+ return std::make_shared<UnaryOperationStencil>(UnaryNodeOperator::Deref,
+ std::string(ExprId));
}
Stencil transformer::maybeDeref(llvm::StringRef ExprId) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::MaybeDeref, std::string(ExprId));
+ return std::make_shared<UnaryOperationStencil>(UnaryNodeOperator::MaybeDeref,
+ std::string(ExprId));
}
Stencil transformer::addressOf(llvm::StringRef ExprId) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::AddressOf, std::string(ExprId));
+ return std::make_shared<UnaryOperationStencil>(UnaryNodeOperator::AddressOf,
+ std::string(ExprId));
}
Stencil transformer::maybeAddressOf(llvm::StringRef ExprId) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
+ return std::make_shared<UnaryOperationStencil>(
UnaryNodeOperator::MaybeAddressOf, std::string(ExprId));
}
Stencil transformer::describe(StringRef Id) {
- return std::make_shared<StencilImpl<UnaryOperationData>>(
- UnaryNodeOperator::Describe, std::string(Id));
+ return std::make_shared<UnaryOperationStencil>(UnaryNodeOperator::Describe,
+ std::string(Id));
}
Stencil transformer::access(StringRef BaseId, Stencil Member) {
- return std::make_shared<StencilImpl<AccessData>>(BaseId, std::move(Member));
+ return std::make_shared<AccessStencil>(BaseId, std::move(Member));
}
Stencil transformer::ifBound(StringRef Id, Stencil TrueStencil,
Stencil FalseStencil) {
- return std::make_shared<StencilImpl<IfBoundData>>(Id, std::move(TrueStencil),
- std::move(FalseStencil));
+ return std::make_shared<IfBoundStencil>(Id, std::move(TrueStencil),
+ std::move(FalseStencil));
}
Stencil transformer::run(MatchConsumer<std::string> Fn) {
- return std::make_shared<StencilImpl<MatchConsumer<std::string>>>(
- std::move(Fn));
+ return std::make_shared<RunStencil>(std::move(Fn));
}
Stencil transformer::catVector(std::vector<Stencil> Parts) {
// Only one argument, so don't wrap in sequence.
if (Parts.size() == 1)
return std::move(Parts[0]);
- return std::make_shared<StencilImpl<SequenceData>>(std::move(Parts));
+ return std::make_shared<SequenceStencil>(std::move(Parts));
}
diff --git a/clang/tools/amdgpu-arch/AMDGPUArch.cpp b/clang/tools/amdgpu-arch/AMDGPUArch.cpp
new file mode 100644
index 000000000000..4fae78b4f121
--- /dev/null
+++ b/clang/tools/amdgpu-arch/AMDGPUArch.cpp
@@ -0,0 +1,78 @@
+//===- AMDGPUArch.cpp - list AMDGPU installed ----------*- C++ -*---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a tool for detecting name of AMDGPU installed in system
+// using HSA. This tool is used by AMDGPU OpenMP driver.
+//
+//===----------------------------------------------------------------------===//
+
+#if defined(__has_include)
+#if __has_include("hsa.h")
+#define HSA_HEADER_FOUND 1
+#include "hsa.h"
+#elif __has_include("hsa/hsa.h")
+#define HSA_HEADER_FOUND 1
+#include "hsa/hsa.h"
+#else
+#define HSA_HEADER_FOUND 0
+#endif
+#else
+#define HSA_HEADER_FOUND 0
+#endif
+
+#if !HSA_HEADER_FOUND
+int main() { return 1; }
+#else
+
+#include <string>
+#include <vector>
+
+static hsa_status_t iterateAgentsCallback(hsa_agent_t Agent, void *Data) {
+ hsa_device_type_t DeviceType;
+ hsa_status_t Status =
+ hsa_agent_get_info(Agent, HSA_AGENT_INFO_DEVICE, &DeviceType);
+
+ // continue only if device type if GPU
+ if (Status != HSA_STATUS_SUCCESS || DeviceType != HSA_DEVICE_TYPE_GPU) {
+ return Status;
+ }
+
+ std::vector<std::string> *GPUs =
+ static_cast<std::vector<std::string> *>(Data);
+ char GPUName[64];
+ Status = hsa_agent_get_info(Agent, HSA_AGENT_INFO_NAME, GPUName);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return Status;
+ }
+ GPUs->push_back(GPUName);
+ return HSA_STATUS_SUCCESS;
+}
+
+int main() {
+ hsa_status_t Status = hsa_init();
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ std::vector<std::string> GPUs;
+ Status = hsa_iterate_agents(iterateAgentsCallback, &GPUs);
+ if (Status != HSA_STATUS_SUCCESS) {
+ return 1;
+ }
+
+ for (const auto &GPU : GPUs)
+ printf("%s\n", GPU.c_str());
+
+ if (GPUs.size() < 1)
+ return 1;
+
+ hsa_shut_down();
+ return 0;
+}
+
+#endif
diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp
index 64f0e2badf33..144e87f78c64 100644
--- a/clang/tools/clang-format/ClangFormat.cpp
+++ b/clang/tools/clang-format/ClangFormat.cpp
@@ -402,11 +402,26 @@ static bool format(StringRef FileName) {
return true;
}
- if (SortIncludes.getNumOccurrences() != 0)
- FormatStyle->SortIncludes = SortIncludes;
+ if (SortIncludes.getNumOccurrences() != 0) {
+ if (SortIncludes)
+ FormatStyle->SortIncludes = FormatStyle::SI_CaseSensitive;
+ else
+ FormatStyle->SortIncludes = FormatStyle::SI_Never;
+ }
unsigned CursorPosition = Cursor;
Replacements Replaces = sortIncludes(*FormatStyle, Code->getBuffer(), Ranges,
AssumedFileName, &CursorPosition);
+
+ // To format JSON insert a variable to trick the code into thinking its
+ // JavaScript.
+ if (FormatStyle->isJson()) {
+ auto Err = Replaces.add(tooling::Replacement(
+ tooling::Replacement(AssumedFileName, 0, 0, "x = ")));
+ if (Err) {
+ llvm::errs() << "Bad Json variable insertion\n";
+ }
+ }
+
auto ChangedCode = tooling::applyAllReplacements(Code->getBuffer(), Replaces);
if (!ChangedCode) {
llvm::errs() << llvm::toString(ChangedCode.takeError()) << "\n";
@@ -502,7 +517,8 @@ int main(int argc, const char **argv) {
cl::SetVersionPrinter(PrintVersion);
cl::ParseCommandLineOptions(
argc, argv,
- "A tool to format C/C++/Java/JavaScript/Objective-C/Protobuf/C# code.\n\n"
+ "A tool to format C/C++/Java/JavaScript/JSON/Objective-C/Protobuf/C# "
+ "code.\n\n"
"If no arguments are specified, it formats the code from standard input\n"
"and writes the result to the standard output.\n"
"If <file>s are given, it reformats the files. If -i is specified\n"
diff --git a/clang/tools/clang-repl/ClangRepl.cpp b/clang/tools/clang-repl/ClangRepl.cpp
new file mode 100644
index 000000000000..ba6bb11abc86
--- /dev/null
+++ b/clang/tools/clang-repl/ClangRepl.cpp
@@ -0,0 +1,108 @@
+//===--- tools/clang-repl/ClangRepl.cpp - clang-repl - the Clang REPL -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a REPL tool on top of clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Interpreter/Interpreter.h"
+
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/LineEditor/LineEditor.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h" // llvm_shutdown
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetSelect.h" // llvm::Initialize*
+
+static llvm::cl::list<std::string>
+ ClangArgs("Xcc", llvm::cl::ZeroOrMore,
+ llvm::cl::desc("Argument to pass to the CompilerInvocation"),
+ llvm::cl::CommaSeparated);
+static llvm::cl::opt<bool> OptHostSupportsJit("host-supports-jit",
+ llvm::cl::Hidden);
+static llvm::cl::list<std::string> OptInputs(llvm::cl::Positional,
+ llvm::cl::ZeroOrMore,
+ llvm::cl::desc("[code to run]"));
+
+static void LLVMErrorHandler(void *UserData, const std::string &Message,
+ bool GenCrashDiag) {
+ auto &Diags = *static_cast<clang::DiagnosticsEngine *>(UserData);
+
+ Diags.Report(clang::diag::err_fe_error_backend) << Message;
+
+ // Run the interrupt handlers to make sure any special cleanups get done, in
+ // particular that we remove files registered with RemoveFileOnSignal.
+ llvm::sys::RunInterruptHandlers();
+
+ // We cannot recover from llvm errors. When reporting a fatal error, exit
+ // with status 70 to generate crash diagnostics. For BSD systems this is
+ // defined as an internal software error. Otherwise, exit with status 1.
+
+ exit(GenCrashDiag ? 70 : 1);
+}
+
+llvm::ExitOnError ExitOnErr;
+int main(int argc, const char **argv) {
+ ExitOnErr.setBanner("clang-repl: ");
+ llvm::cl::ParseCommandLineOptions(argc, argv);
+
+ std::vector<const char *> ClangArgv(ClangArgs.size());
+ std::transform(ClangArgs.begin(), ClangArgs.end(), ClangArgv.begin(),
+ [](const std::string &s) -> const char * { return s.data(); });
+ llvm::InitializeNativeTarget();
+ llvm::InitializeNativeTargetAsmPrinter();
+
+ if (OptHostSupportsJit) {
+ auto J = llvm::orc::LLJITBuilder().create();
+ if (J)
+ llvm::outs() << "true\n";
+ else {
+ llvm::consumeError(J.takeError());
+ llvm::outs() << "false\n";
+ }
+ return 0;
+ }
+
+ // FIXME: Investigate if we could use runToolOnCodeWithArgs from tooling. It
+ // can replace the boilerplate code for creation of the compiler instance.
+ auto CI = ExitOnErr(clang::IncrementalCompilerBuilder::create(ClangArgv));
+
+ // Set an error handler, so that any LLVM backend diagnostics go through our
+ // error handler.
+ llvm::install_fatal_error_handler(LLVMErrorHandler,
+ static_cast<void *>(&CI->getDiagnostics()));
+
+ auto Interp = ExitOnErr(clang::Interpreter::create(std::move(CI)));
+ for (const std::string &input : OptInputs) {
+ if (auto Err = Interp->ParseAndExecute(input))
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ }
+
+ if (OptInputs.empty()) {
+ llvm::LineEditor LE("clang-repl");
+ // FIXME: Add LE.setListCompleter
+ while (llvm::Optional<std::string> Line = LE.readLine()) {
+ if (*Line == "quit")
+ break;
+ if (auto Err = Interp->ParseAndExecute(*Line))
+ llvm::logAllUnhandledErrors(std::move(Err), llvm::errs(), "error: ");
+ }
+ }
+
+ // Our error handler depends on the Diagnostics object, which we're
+ // potentially about to delete. Uninstall the handler now so that any
+ // later errors use the default handling behavior instead.
+ llvm::remove_fatal_error_handler();
+
+ llvm::llvm_shutdown();
+
+ return 0;
+}
diff --git a/clang/tools/driver/cc1_main.cpp b/clang/tools/driver/cc1_main.cpp
index b4ab6f57345b..396d6ff529f3 100644
--- a/clang/tools/driver/cc1_main.cpp
+++ b/clang/tools/driver/cc1_main.cpp
@@ -203,6 +203,12 @@ int cc1_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts = new DiagnosticOptions();
TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
DiagnosticsEngine Diags(DiagID, &*DiagOpts, DiagsBuffer);
+
+ // Setup round-trip remarks for the DiagnosticsEngine used in CreateFromArgs.
+ if (find(Argv, StringRef("-Rround-trip-cc1-args")) != Argv.end())
+ Diags.setSeverity(diag::remark_cc1_round_trip_generated,
+ diag::Severity::Remark, {});
+
bool Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
Argv, Diags, Argv0);
diff --git a/clang/tools/driver/cc1as_main.cpp b/clang/tools/driver/cc1as_main.cpp
index de71026fbffe..086ce0ea7787 100644
--- a/clang/tools/driver/cc1as_main.cpp
+++ b/clang/tools/driver/cc1as_main.cpp
@@ -91,6 +91,7 @@ struct AssemblerInvocation {
unsigned SaveTemporaryLabels : 1;
unsigned GenDwarfForAssembly : 1;
unsigned RelaxELFRelocations : 1;
+ unsigned Dwarf64 : 1;
unsigned DwarfVersion;
std::string DwarfDebugFlags;
std::string DwarfDebugProducer;
@@ -160,6 +161,7 @@ public:
FatalWarnings = 0;
NoWarn = 0;
IncrementalLinkerCompatible = 0;
+ Dwarf64 = 0;
DwarfVersion = 0;
EmbedBitcode = 0;
}
@@ -231,13 +233,16 @@ bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
}
Opts.RelaxELFRelocations = Args.hasArg(OPT_mrelax_relocations);
+ if (auto *DwarfFormatArg = Args.getLastArg(OPT_gdwarf64, OPT_gdwarf32))
+ Opts.Dwarf64 = DwarfFormatArg->getOption().matches(OPT_gdwarf64);
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 2, Diags);
Opts.DwarfDebugFlags =
std::string(Args.getLastArgValue(OPT_dwarf_debug_flags));
Opts.DwarfDebugProducer =
std::string(Args.getLastArgValue(OPT_dwarf_debug_producer));
- Opts.DebugCompilationDir =
- std::string(Args.getLastArgValue(OPT_fdebug_compilation_dir));
+ if (const Arg *A = Args.getLastArg(options::OPT_ffile_compilation_dir_EQ,
+ options::OPT_fdebug_compilation_dir_EQ))
+ Opts.DebugCompilationDir = A->getValue();
Opts.MainFileName = std::string(Args.getLastArgValue(OPT_main_file_name));
for (const auto &Arg : Args.getAllArgValues(OPT_fdebug_prefix_map_EQ)) {
@@ -319,7 +324,7 @@ getOutputStream(StringRef Path, DiagnosticsEngine &Diags, bool Binary) {
std::error_code EC;
auto Out = std::make_unique<raw_fd_ostream>(
- Path, EC, (Binary ? sys::fs::OF_None : sys::fs::OF_Text));
+ Path, EC, (Binary ? sys::fs::OF_None : sys::fs::OF_TextWithCRLF));
if (EC) {
Diags.Report(diag::err_fe_unable_to_open_output) << Path << EC.message();
return nullptr;
@@ -328,8 +333,8 @@ getOutputStream(StringRef Path, DiagnosticsEngine &Diags, bool Binary) {
return Out;
}
-static bool ExecuteAssembler(AssemblerInvocation &Opts,
- DiagnosticsEngine &Diags) {
+static bool ExecuteAssemblerImpl(AssemblerInvocation &Opts,
+ DiagnosticsEngine &Diags) {
// Get the target specific parser.
std::string Error;
const Target *TheTarget = TargetRegistry::lookupTarget(Opts.Triple, Error);
@@ -337,7 +342,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
return Diags.Report(diag::err_target_unknown_triple) << Opts.Triple;
ErrorOr<std::unique_ptr<MemoryBuffer>> Buffer =
- MemoryBuffer::getFileOrSTDIN(Opts.InputFile);
+ MemoryBuffer::getFileOrSTDIN(Opts.InputFile, /*IsText=*/true);
if (std::error_code EC = Buffer.getError()) {
Error = EC.message();
@@ -378,11 +383,15 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
if (!Opts.SplitDwarfOutput.empty())
DwoOS = getOutputStream(Opts.SplitDwarfOutput, Diags, IsBinary);
- // FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
- // MCObjectFileInfo needs a MCContext reference in order to initialize itself.
- std::unique_ptr<MCObjectFileInfo> MOFI(new MCObjectFileInfo());
+ // Build up the feature string from the target feature list.
+ std::string FS = llvm::join(Opts.Features, ",");
- MCContext Ctx(MAI.get(), MRI.get(), MOFI.get(), &SrcMgr, &MCOptions);
+ std::unique_ptr<MCSubtargetInfo> STI(
+ TheTarget->createMCSubtargetInfo(Opts.Triple, Opts.CPU, FS));
+ assert(STI && "Unable to create subtarget info!");
+
+ MCContext Ctx(Triple(Opts.Triple), MAI.get(), MRI.get(), STI.get(), &SrcMgr,
+ &MCOptions);
bool PIC = false;
if (Opts.RelocationModel == "static") {
@@ -395,7 +404,12 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
PIC = false;
}
- MOFI->InitMCObjectFileInfo(Triple(Opts.Triple), PIC, Ctx);
+ // FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
+ // MCObjectFileInfo needs a MCContext reference in order to initialize itself.
+ std::unique_ptr<MCObjectFileInfo> MOFI(
+ TheTarget->createMCObjectFileInfo(Ctx, PIC));
+ Ctx.setObjectFileInfo(MOFI.get());
+
if (Opts.SaveTemporaryLabels)
Ctx.setAllowTemporaryLabels(false);
if (Opts.GenDwarfForAssembly)
@@ -417,23 +431,17 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
Ctx.addDebugPrefixMapEntry(KV.first, KV.second);
if (!Opts.MainFileName.empty())
Ctx.setMainFileName(StringRef(Opts.MainFileName));
+ Ctx.setDwarfFormat(Opts.Dwarf64 ? dwarf::DWARF64 : dwarf::DWARF32);
Ctx.setDwarfVersion(Opts.DwarfVersion);
if (Opts.GenDwarfForAssembly)
Ctx.setGenDwarfRootFile(Opts.InputFile,
SrcMgr.getMemoryBuffer(BufferIndex)->getBuffer());
- // Build up the feature string from the target feature list.
- std::string FS = llvm::join(Opts.Features, ",");
-
std::unique_ptr<MCStreamer> Str;
std::unique_ptr<MCInstrInfo> MCII(TheTarget->createMCInstrInfo());
assert(MCII && "Unable to create instruction info!");
- std::unique_ptr<MCSubtargetInfo> STI(
- TheTarget->createMCSubtargetInfo(Opts.Triple, Opts.CPU, FS));
- assert(STI && "Unable to create subtarget info!");
-
raw_pwrite_stream *Out = FDOS.get();
std::unique_ptr<buffer_ostream> BOS;
@@ -487,8 +495,7 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
// When -fembed-bitcode is passed to clang_as, a 1-byte marker
// is emitted in __LLVM,__asm section if the object file is MachO format.
- if (Opts.EmbedBitcode && Ctx.getObjectFileInfo()->getObjectFileType() ==
- MCObjectFileInfo::IsMachO) {
+ if (Opts.EmbedBitcode && Ctx.getObjectFileType() == MCContext::IsMachO) {
MCSection *AsmLabel = Ctx.getMachOSection(
"__LLVM", "__asm", MachO::S_REGULAR, 4, SectionKind::getReadOnly());
Str.get()->SwitchSection(AsmLabel);
@@ -525,12 +532,12 @@ static bool ExecuteAssembler(AssemblerInvocation &Opts,
Failed = Parser->Run(Opts.NoInitialTextSection);
}
- // Parser has a reference to the output stream (Str), so close Parser first.
- Parser.reset();
- Str.reset();
- // Close the output stream early.
- BOS.reset();
- FDOS.reset();
+ return Failed;
+}
+
+static bool ExecuteAssembler(AssemblerInvocation &Opts,
+ DiagnosticsEngine &Diags) {
+ bool Failed = ExecuteAssemblerImpl(Opts, Diags);
// Delete output file if there were errors.
if (Failed) {
@@ -578,7 +585,7 @@ int cc1as_main(ArrayRef<const char *> Argv, const char *Argv0, void *MainAddr) {
return 1;
if (Asm.ShowHelp) {
- getDriverOptTable().PrintHelp(
+ getDriverOptTable().printHelp(
llvm::outs(), "clang -cc1as [options] file...",
"Clang Integrated Assembler",
/*Include=*/driver::options::CC1AsOption, /*Exclude=*/0,
diff --git a/clang/tools/driver/cc1gen_reproducer_main.cpp b/clang/tools/driver/cc1gen_reproducer_main.cpp
index 472055ee2170..89b7227fdb17 100644
--- a/clang/tools/driver/cc1gen_reproducer_main.cpp
+++ b/clang/tools/driver/cc1gen_reproducer_main.cpp
@@ -162,7 +162,7 @@ int cc1gen_reproducer_main(ArrayRef<const char *> Argv, const char *Argv0,
// Parse the invocation descriptor.
StringRef Input = Argv[0];
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Buffer =
- llvm::MemoryBuffer::getFile(Input);
+ llvm::MemoryBuffer::getFile(Input, /*IsText=*/true);
if (!Buffer) {
llvm::errs() << "error: failed to read " << Input << ": "
<< Buffer.getError().message() << "\n";
diff --git a/clang/tools/driver/driver.cpp b/clang/tools/driver/driver.cpp
index f67af6790fff..5a453429e79b 100644
--- a/clang/tools/driver/driver.cpp
+++ b/clang/tools/driver/driver.cpp
@@ -244,20 +244,28 @@ static void getCLEnvVarOptions(std::string &EnvValue, llvm::StringSaver &Saver,
}
static void SetBackdoorDriverOutputsFromEnvVars(Driver &TheDriver) {
- // Handle CC_PRINT_OPTIONS and CC_PRINT_OPTIONS_FILE.
- TheDriver.CCPrintOptions = !!::getenv("CC_PRINT_OPTIONS");
- if (TheDriver.CCPrintOptions)
- TheDriver.CCPrintOptionsFilename = ::getenv("CC_PRINT_OPTIONS_FILE");
-
- // Handle CC_PRINT_HEADERS and CC_PRINT_HEADERS_FILE.
- TheDriver.CCPrintHeaders = !!::getenv("CC_PRINT_HEADERS");
- if (TheDriver.CCPrintHeaders)
- TheDriver.CCPrintHeadersFilename = ::getenv("CC_PRINT_HEADERS_FILE");
-
- // Handle CC_LOG_DIAGNOSTICS and CC_LOG_DIAGNOSTICS_FILE.
- TheDriver.CCLogDiagnostics = !!::getenv("CC_LOG_DIAGNOSTICS");
- if (TheDriver.CCLogDiagnostics)
- TheDriver.CCLogDiagnosticsFilename = ::getenv("CC_LOG_DIAGNOSTICS_FILE");
+ auto CheckEnvVar = [](const char *EnvOptSet, const char *EnvOptFile,
+ std::string &OptFile) {
+ bool OptSet = !!::getenv(EnvOptSet);
+ if (OptSet) {
+ if (const char *Var = ::getenv(EnvOptFile))
+ OptFile = Var;
+ }
+ return OptSet;
+ };
+
+ TheDriver.CCPrintOptions =
+ CheckEnvVar("CC_PRINT_OPTIONS", "CC_PRINT_OPTIONS_FILE",
+ TheDriver.CCPrintOptionsFilename);
+ TheDriver.CCPrintHeaders =
+ CheckEnvVar("CC_PRINT_HEADERS", "CC_PRINT_HEADERS_FILE",
+ TheDriver.CCPrintHeadersFilename);
+ TheDriver.CCLogDiagnostics =
+ CheckEnvVar("CC_LOG_DIAGNOSTICS", "CC_LOG_DIAGNOSTICS_FILE",
+ TheDriver.CCLogDiagnosticsFilename);
+ TheDriver.CCPrintProcessStats =
+ CheckEnvVar("CC_PRINT_PROC_STAT", "CC_PRINT_PROC_STAT_FILE",
+ TheDriver.CCPrintStatReportFilename);
}
static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
@@ -265,7 +273,7 @@ static void FixupDiagPrefixExeName(TextDiagnosticPrinter *DiagClient,
// If the clang binary happens to be named cl.exe for compatibility reasons,
// use clang-cl.exe as the prefix to avoid confusion between clang and MSVC.
StringRef ExeBasename(llvm::sys::path::stem(Path));
- if (ExeBasename.equals_lower("cl"))
+ if (ExeBasename.equals_insensitive("cl"))
ExeBasename = "clang-cl";
DiagClient->setPrefix(std::string(ExeBasename));
}
@@ -340,46 +348,40 @@ static int ExecuteCC1Tool(SmallVectorImpl<const char *> &ArgV) {
return 1;
}
-int main(int argc_, const char **argv_) {
+int main(int Argc, const char **Argv) {
noteBottomOfStack();
- llvm::InitLLVM X(argc_, argv_);
+ llvm::InitLLVM X(Argc, Argv);
llvm::setBugReportMsg("PLEASE submit a bug report to " BUG_REPORT_URL
" and include the crash backtrace, preprocessed "
"source, and associated run script.\n");
- SmallVector<const char *, 256> argv(argv_, argv_ + argc_);
+ SmallVector<const char *, 256> Args(Argv, Argv + Argc);
if (llvm::sys::Process::FixupStandardFileDescriptors())
return 1;
llvm::InitializeAllTargets();
- auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(argv[0]);
llvm::BumpPtrAllocator A;
llvm::StringSaver Saver(A);
// Parse response files using the GNU syntax, unless we're in CL mode. There
- // are two ways to put clang in CL compatibility mode: argv[0] is either
+ // are two ways to put clang in CL compatibility mode: Args[0] is either
// clang-cl or cl, or --driver-mode=cl is on the command line. The normal
// command line parsing can't happen until after response file parsing, so we
// have to manually search for a --driver-mode=cl argument the hard way.
// Finally, our -cc1 tools don't care which tokenization mode we use because
// response files written by clang will tokenize the same way in either mode.
- bool ClangCLMode = false;
- if (StringRef(TargetAndMode.DriverMode).equals("--driver-mode=cl") ||
- llvm::find_if(argv, [](const char *F) {
- return F && strcmp(F, "--driver-mode=cl") == 0;
- }) != argv.end()) {
- ClangCLMode = true;
- }
+ bool ClangCLMode =
+ IsClangCL(getDriverMode(Args[0], llvm::makeArrayRef(Args).slice(1)));
enum { Default, POSIX, Windows } RSPQuoting = Default;
- for (const char *F : argv) {
+ for (const char *F : Args) {
if (strcmp(F, "--rsp-quoting=posix") == 0)
RSPQuoting = POSIX;
else if (strcmp(F, "--rsp-quoting=windows") == 0)
RSPQuoting = Windows;
}
- // Determines whether we want nullptr markers in argv to indicate response
+ // Determines whether we want nullptr markers in Args to indicate response
// files end-of-lines. We only use this for the /LINK driver argument with
// clang-cl.exe on Windows.
bool MarkEOLs = ClangCLMode;
@@ -390,31 +392,31 @@ int main(int argc_, const char **argv_) {
else
Tokenizer = &llvm::cl::TokenizeGNUCommandLine;
- if (MarkEOLs && argv.size() > 1 && StringRef(argv[1]).startswith("-cc1"))
+ if (MarkEOLs && Args.size() > 1 && StringRef(Args[1]).startswith("-cc1"))
MarkEOLs = false;
- llvm::cl::ExpandResponseFiles(Saver, Tokenizer, argv, MarkEOLs);
+ llvm::cl::ExpandResponseFiles(Saver, Tokenizer, Args, MarkEOLs);
// Handle -cc1 integrated tools, even if -cc1 was expanded from a response
// file.
- auto FirstArg = std::find_if(argv.begin() + 1, argv.end(),
+ auto FirstArg = std::find_if(Args.begin() + 1, Args.end(),
[](const char *A) { return A != nullptr; });
- if (FirstArg != argv.end() && StringRef(*FirstArg).startswith("-cc1")) {
+ if (FirstArg != Args.end() && StringRef(*FirstArg).startswith("-cc1")) {
// If -cc1 came from a response file, remove the EOL sentinels.
if (MarkEOLs) {
- auto newEnd = std::remove(argv.begin(), argv.end(), nullptr);
- argv.resize(newEnd - argv.begin());
+ auto newEnd = std::remove(Args.begin(), Args.end(), nullptr);
+ Args.resize(newEnd - Args.begin());
}
- return ExecuteCC1Tool(argv);
+ return ExecuteCC1Tool(Args);
}
// Handle options that need handling before the real command line parsing in
// Driver::BuildCompilation()
bool CanonicalPrefixes = true;
- for (int i = 1, size = argv.size(); i < size; ++i) {
+ for (int i = 1, size = Args.size(); i < size; ++i) {
// Skip end-of-line response file markers
- if (argv[i] == nullptr)
+ if (Args[i] == nullptr)
continue;
- if (StringRef(argv[i]) == "-no-canonical-prefixes") {
+ if (StringRef(Args[i]) == "-no-canonical-prefixes") {
CanonicalPrefixes = false;
break;
}
@@ -430,7 +432,7 @@ int main(int argc_, const char **argv_) {
getCLEnvVarOptions(OptCL.getValue(), Saver, PrependedOpts);
// Insert right after the program name to prepend to the argument list.
- argv.insert(argv.begin() + 1, PrependedOpts.begin(), PrependedOpts.end());
+ Args.insert(Args.begin() + 1, PrependedOpts.begin(), PrependedOpts.end());
}
// Arguments in "_CL_" are appended.
llvm::Optional<std::string> Opt_CL_ = llvm::sys::Process::GetEnv("_CL_");
@@ -439,7 +441,7 @@ int main(int argc_, const char **argv_) {
getCLEnvVarOptions(Opt_CL_.getValue(), Saver, AppendedOpts);
// Insert at the end of the argument list to append.
- argv.append(AppendedOpts.begin(), AppendedOpts.end());
+ Args.append(AppendedOpts.begin(), AppendedOpts.end());
}
}
@@ -448,10 +450,10 @@ int main(int argc_, const char **argv_) {
// scenes.
if (const char *OverrideStr = ::getenv("CCC_OVERRIDE_OPTIONS")) {
// FIXME: Driver shouldn't take extra initial argument.
- ApplyQAOverride(argv, OverrideStr, SavedStrings);
+ ApplyQAOverride(Args, OverrideStr, SavedStrings);
}
- std::string Path = GetExecutablePath(argv[0], CanonicalPrefixes);
+ std::string Path = GetExecutablePath(Args[0], CanonicalPrefixes);
// Whether the cc1 tool should be called inside the current process, or if we
// should spawn a new clang subprocess (old behavior).
@@ -460,7 +462,7 @@ int main(int argc_, const char **argv_) {
bool UseNewCC1Process;
IntrusiveRefCntPtr<DiagnosticOptions> DiagOpts =
- CreateAndPopulateDiagOpts(argv, UseNewCC1Process);
+ CreateAndPopulateDiagOpts(Args, UseNewCC1Process);
TextDiagnosticPrinter *DiagClient
= new TextDiagnosticPrinter(llvm::errs(), &*DiagOpts);
@@ -481,10 +483,11 @@ int main(int argc_, const char **argv_) {
ProcessWarningOptions(Diags, *DiagOpts, /*ReportDiags=*/false);
Driver TheDriver(Path, llvm::sys::getDefaultTargetTriple(), Diags);
- SetInstallDir(argv, TheDriver, CanonicalPrefixes);
+ SetInstallDir(Args, TheDriver, CanonicalPrefixes);
+ auto TargetAndMode = ToolChain::getTargetAndModeFromProgramName(Args[0]);
TheDriver.setTargetAndMode(TargetAndMode);
- insertTargetAndModeArgs(TargetAndMode, argv, SavedStrings);
+ insertTargetAndModeArgs(TargetAndMode, Args, SavedStrings);
SetBackdoorDriverOutputsFromEnvVars(TheDriver);
@@ -494,7 +497,7 @@ int main(int argc_, const char **argv_) {
llvm::CrashRecoveryContext::Enable();
}
- std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(argv));
+ std::unique_ptr<Compilation> C(TheDriver.BuildCompilation(Args));
int Res = 1;
bool IsCrash = false;
if (C && !C->containsError()) {
diff --git a/clang/utils/TableGen/ClangAttrEmitter.cpp b/clang/utils/TableGen/ClangAttrEmitter.cpp
index d435c5780531..d679d58aaef1 100644
--- a/clang/utils/TableGen/ClangAttrEmitter.cpp
+++ b/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -773,10 +773,8 @@ namespace {
void writeValue(raw_ostream &OS) const override {
OS << "\";\n";
- OS << " bool isFirst = true;\n"
- << " for (const auto &Val : " << RangeName << "()) {\n"
- << " if (isFirst) isFirst = false;\n"
- << " else OS << \", \";\n";
+ OS << " for (const auto &Val : " << RangeName << "()) {\n"
+ << " DelimitAttributeArgument(OS, IsFirstArgument);\n";
writeValueImpl(OS);
OS << " }\n";
OS << " OS << \"";
@@ -1428,10 +1426,12 @@ writePrettyPrintFunction(const Record &R,
return;
}
- OS << " switch (getAttributeSpellingListIndex()) {\n"
- " default:\n"
- " llvm_unreachable(\"Unknown attribute spelling!\");\n"
- " break;\n";
+ OS << " bool IsFirstArgument = true; (void)IsFirstArgument;\n"
+ << " unsigned TrailingOmittedArgs = 0; (void)TrailingOmittedArgs;\n"
+ << " switch (getAttributeSpellingListIndex()) {\n"
+ << " default:\n"
+ << " llvm_unreachable(\"Unknown attribute spelling!\");\n"
+ << " break;\n";
for (unsigned I = 0; I < Spellings.size(); ++ I) {
llvm::SmallString<16> Prefix;
@@ -1476,12 +1476,10 @@ writePrettyPrintFunction(const Record &R,
Spelling += Name;
- OS <<
- " case " << I << " : {\n"
- " OS << \"" << Prefix << Spelling;
+ OS << " case " << I << " : {\n"
+ << " OS << \"" << Prefix << Spelling << "\";\n";
if (Variety == "Pragma") {
- OS << "\";\n";
OS << " printPrettyPragma(OS, Policy);\n";
OS << " OS << \"\\n\";";
OS << " break;\n";
@@ -1490,19 +1488,18 @@ writePrettyPrintFunction(const Record &R,
}
if (Spelling == "availability") {
- OS << "(";
+ OS << " OS << \"(";
writeAvailabilityValue(OS);
- OS << ")";
+ OS << ")\";\n";
} else if (Spelling == "deprecated" || Spelling == "gnu::deprecated") {
- OS << "(";
+ OS << " OS << \"(";
writeDeprecatedAttrValue(OS, Variety);
- OS << ")";
+ OS << ")\";\n";
} else {
// To avoid printing parentheses around an empty argument list or
// printing spurious commas at the end of an argument list, we need to
// determine where the last provided non-fake argument is.
unsigned NonFakeArgs = 0;
- unsigned TrailingOptArgs = 0;
bool FoundNonOptArg = false;
for (const auto &arg : llvm::reverse(Args)) {
if (arg->isFake())
@@ -1516,61 +1513,33 @@ writePrettyPrintFunction(const Record &R,
FoundNonOptArg = true;
continue;
}
- if (!TrailingOptArgs++)
- OS << "\";\n"
- << " unsigned TrailingOmittedArgs = 0;\n";
OS << " if (" << arg->getIsOmitted() << ")\n"
<< " ++TrailingOmittedArgs;\n";
}
- if (TrailingOptArgs)
- OS << " OS << \"";
- if (TrailingOptArgs < NonFakeArgs)
- OS << "(";
- else if (TrailingOptArgs)
- OS << "\";\n"
- << " if (TrailingOmittedArgs < " << NonFakeArgs << ")\n"
- << " OS << \"(\";\n"
- << " OS << \"";
unsigned ArgIndex = 0;
for (const auto &arg : Args) {
if (arg->isFake())
continue;
- if (ArgIndex) {
- if (ArgIndex >= NonFakeArgs - TrailingOptArgs)
- OS << "\";\n"
- << " if (" << ArgIndex << " < " << NonFakeArgs
- << " - TrailingOmittedArgs)\n"
- << " OS << \", \";\n"
- << " OS << \"";
- else
- OS << ", ";
- }
std::string IsOmitted = arg->getIsOmitted();
if (arg->isOptional() && IsOmitted != "false")
- OS << "\";\n"
- << " if (!(" << IsOmitted << ")) {\n"
- << " OS << \"";
+ OS << " if (!(" << IsOmitted << ")) {\n";
+ // Variadic arguments print their own leading comma.
+ if (!arg->isVariadic())
+ OS << " DelimitAttributeArgument(OS, IsFirstArgument);\n";
+ OS << " OS << \"";
arg->writeValue(OS);
+ OS << "\";\n";
if (arg->isOptional() && IsOmitted != "false")
- OS << "\";\n"
- << " }\n"
- << " OS << \"";
+ OS << " }\n";
++ArgIndex;
}
- if (TrailingOptArgs < NonFakeArgs)
- OS << ")";
- else if (TrailingOptArgs)
- OS << "\";\n"
- << " if (TrailingOmittedArgs < " << NonFakeArgs << ")\n"
- << " OS << \")\";\n"
- << " OS << \"";
+ if (ArgIndex != 0)
+ OS << " if (!IsFirstArgument)\n"
+ << " OS << \")\";\n";
}
-
- OS << Suffix + "\";\n";
-
- OS <<
- " break;\n"
- " }\n";
+ OS << " OS << \"" << Suffix << "\";\n"
+ << " break;\n"
+ << " }\n";
}
// End of the switch statement.
@@ -1859,6 +1828,22 @@ struct PragmaClangAttributeSupport {
} // end anonymous namespace
+static bool isSupportedPragmaClangAttributeSubject(const Record &Subject) {
+ // FIXME: #pragma clang attribute does not currently support statement
+ // attributes, so test whether the subject is one that appertains to a
+ // declaration node. However, it may be reasonable for support for statement
+ // attributes to be added.
+ if (Subject.isSubClassOf("DeclNode") || Subject.isSubClassOf("DeclBase") ||
+ Subject.getName() == "DeclBase")
+ return true;
+
+ if (Subject.isSubClassOf("SubsetSubject"))
+ return isSupportedPragmaClangAttributeSubject(
+ *Subject.getValueAsDef("Base"));
+
+ return false;
+}
+
static bool doesDeclDeriveFrom(const Record *D, const Record *Base) {
const Record *CurrentBase = D->getValueAsOptionalDef(BaseFieldName);
if (!CurrentBase)
@@ -1980,13 +1965,15 @@ bool PragmaClangAttributeSupport::isAttributedSupported(
return false;
const Record *SubjectObj = Attribute.getValueAsDef("Subjects");
std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
- if (Subjects.empty())
- return false;
+ bool HasAtLeastOneValidSubject = false;
for (const auto *Subject : Subjects) {
+ if (!isSupportedPragmaClangAttributeSubject(*Subject))
+ continue;
if (SubjectsToRules.find(Subject) == SubjectsToRules.end())
return false;
+ HasAtLeastOneValidSubject = true;
}
- return true;
+ return HasAtLeastOneValidSubject;
}
static std::string GenerateTestExpression(ArrayRef<Record *> LangOpts) {
@@ -2032,6 +2019,8 @@ PragmaClangAttributeSupport::generateStrictConformsTo(const Record &Attr,
const Record *SubjectObj = Attr.getValueAsDef("Subjects");
std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
for (const auto *Subject : Subjects) {
+ if (!isSupportedPragmaClangAttributeSubject(*Subject))
+ continue;
auto It = SubjectsToRules.find(Subject);
assert(It != SubjectsToRules.end() &&
"This attribute is unsupported by #pragma clang attribute");
@@ -2279,6 +2268,18 @@ static void emitAttributes(RecordKeeper &Records, raw_ostream &OS,
std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
ParsedAttrMap AttrMap = getParsedAttrList(Records);
+ // Helper to print the starting character of an attribute argument. If there
+ // hasn't been an argument yet, it prints an opening parenthese; otherwise it
+ // prints a comma.
+ OS << "static inline void DelimitAttributeArgument("
+ << "raw_ostream& OS, bool& IsFirst) {\n"
+ << " if (IsFirst) {\n"
+ << " IsFirst = false;\n"
+ << " OS << \"(\";\n"
+ << " } else\n"
+ << " OS << \", \";\n"
+ << "}\n";
+
for (const auto *Attr : Attrs) {
const Record &R = *Attr;
@@ -3522,7 +3523,7 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
return;
const Record *SubjectObj = Attr.getValueAsDef("Subjects");
- std::vector<Record*> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
+ std::vector<Record *> Subjects = SubjectObj->getValueAsListOfDefs("Subjects");
// If the list of subjects is empty, it is assumed that the attribute
// appertains to everything.
@@ -3531,42 +3532,231 @@ static void GenerateAppertainsTo(const Record &Attr, raw_ostream &OS) {
bool Warn = SubjectObj->getValueAsDef("Diag")->getValueAsBit("Warn");
- // Otherwise, generate an appertainsTo check specific to this attribute which
- // checks all of the given subjects against the Decl passed in.
- //
- // If D is null, that means the attribute was not applied to a declaration
- // at all (for instance because it was applied to a type), or that the caller
- // has determined that the check should fail (perhaps prior to the creation
- // of the declaration).
- OS << "bool diagAppertainsToDecl(Sema &S, ";
- OS << "const ParsedAttr &Attr, const Decl *D) const override {\n";
- OS << " if (";
- for (auto I = Subjects.begin(), E = Subjects.end(); I != E; ++I) {
- // If the subject has custom code associated with it, use the generated
- // function for it. The function cannot be inlined into this check (yet)
- // because it requires the subject to be of a specific type, and were that
- // information inlined here, it would not support an attribute with multiple
- // custom subjects.
- if ((*I)->isSubClassOf("SubsetSubject")) {
- OS << "!" << functionNameForCustomAppertainsTo(**I) << "(D)";
- } else {
- OS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)";
+ // Split the subjects into declaration subjects and statement subjects.
+ // FIXME: subset subjects are added to the declaration list until there are
+ // enough statement attributes with custom subject needs to warrant
+ // the implementation effort.
+ std::vector<Record *> DeclSubjects, StmtSubjects;
+ llvm::copy_if(
+ Subjects, std::back_inserter(DeclSubjects), [](const Record *R) {
+ return R->isSubClassOf("SubsetSubject") || !R->isSubClassOf("StmtNode");
+ });
+ llvm::copy_if(Subjects, std::back_inserter(StmtSubjects),
+ [](const Record *R) { return R->isSubClassOf("StmtNode"); });
+
+ // We should have sorted all of the subjects into two lists.
+ // FIXME: this assertion will be wrong if we ever add type attribute subjects.
+ assert(DeclSubjects.size() + StmtSubjects.size() == Subjects.size());
+
+ if (DeclSubjects.empty()) {
+ // If there are no decl subjects but there are stmt subjects, diagnose
+ // trying to apply a statement attribute to a declaration.
+ if (!StmtSubjects.empty()) {
+ OS << "bool diagAppertainsToDecl(Sema &S, const ParsedAttr &AL, ";
+ OS << "const Decl *D) const override {\n";
+ OS << " S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)\n";
+ OS << " << AL << D->getLocation();\n";
+ OS << " return false;\n";
+ OS << "}\n\n";
}
+ } else {
+ // Otherwise, generate an appertainsTo check specific to this attribute
+ // which checks all of the given subjects against the Decl passed in.
+ OS << "bool diagAppertainsToDecl(Sema &S, ";
+ OS << "const ParsedAttr &Attr, const Decl *D) const override {\n";
+ OS << " if (";
+ for (auto I = DeclSubjects.begin(), E = DeclSubjects.end(); I != E; ++I) {
+ // If the subject has custom code associated with it, use the generated
+ // function for it. The function cannot be inlined into this check (yet)
+ // because it requires the subject to be of a specific type, and were that
+ // information inlined here, it would not support an attribute with
+ // multiple custom subjects.
+ if ((*I)->isSubClassOf("SubsetSubject"))
+ OS << "!" << functionNameForCustomAppertainsTo(**I) << "(D)";
+ else
+ OS << "!isa<" << GetSubjectWithSuffix(*I) << ">(D)";
- if (I + 1 != E)
- OS << " && ";
+ if (I + 1 != E)
+ OS << " && ";
+ }
+ OS << ") {\n";
+ OS << " S.Diag(Attr.getLoc(), diag::";
+ OS << (Warn ? "warn_attribute_wrong_decl_type_str"
+ : "err_attribute_wrong_decl_type_str");
+ OS << ")\n";
+ OS << " << Attr << ";
+ OS << CalculateDiagnostic(*SubjectObj) << ";\n";
+ OS << " return false;\n";
+ OS << " }\n";
+ OS << " return true;\n";
+ OS << "}\n\n";
+ }
+
+ if (StmtSubjects.empty()) {
+ // If there are no stmt subjects but there are decl subjects, diagnose
+ // trying to apply a declaration attribute to a statement.
+ if (!DeclSubjects.empty()) {
+ OS << "bool diagAppertainsToStmt(Sema &S, const ParsedAttr &AL, ";
+ OS << "const Stmt *St) const override {\n";
+ OS << " S.Diag(AL.getLoc(), diag::err_decl_attribute_invalid_on_stmt)\n";
+ OS << " << AL << St->getBeginLoc();\n";
+ OS << " return false;\n";
+ OS << "}\n\n";
+ }
+ } else {
+ // Now, do the same for statements.
+ OS << "bool diagAppertainsToStmt(Sema &S, ";
+ OS << "const ParsedAttr &Attr, const Stmt *St) const override {\n";
+ OS << " if (";
+ for (auto I = StmtSubjects.begin(), E = StmtSubjects.end(); I != E; ++I) {
+ OS << "!isa<" << (*I)->getName() << ">(St)";
+ if (I + 1 != E)
+ OS << " && ";
+ }
+ OS << ") {\n";
+ OS << " S.Diag(Attr.getLoc(), diag::";
+ OS << (Warn ? "warn_attribute_wrong_decl_type_str"
+ : "err_attribute_wrong_decl_type_str");
+ OS << ")\n";
+ OS << " << Attr << ";
+ OS << CalculateDiagnostic(*SubjectObj) << ";\n";
+ OS << " return false;\n";
+ OS << " }\n";
+ OS << " return true;\n";
+ OS << "}\n\n";
+ }
+}
+
+// Generates the mutual exclusion checks. The checks for parsed attributes are
+// written into OS and the checks for merging declaration attributes are
+// written into MergeOS.
+static void GenerateMutualExclusionsChecks(const Record &Attr,
+ const RecordKeeper &Records,
+ raw_ostream &OS,
+ raw_ostream &MergeDeclOS,
+ raw_ostream &MergeStmtOS) {
+ // Find all of the definitions that inherit from MutualExclusions and include
+ // the given attribute in the list of exclusions to generate the
+ // diagMutualExclusion() check.
+ std::vector<Record *> ExclusionsList =
+ Records.getAllDerivedDefinitions("MutualExclusions");
+
+ // We don't do any of this magic for type attributes yet.
+ if (Attr.isSubClassOf("TypeAttr"))
+ return;
+
+ // This means the attribute is either a statement attribute, a decl
+ // attribute, or both; find out which.
+ bool CurAttrIsStmtAttr =
+ Attr.isSubClassOf("StmtAttr") || Attr.isSubClassOf("DeclOrStmtAttr");
+ bool CurAttrIsDeclAttr =
+ !CurAttrIsStmtAttr || Attr.isSubClassOf("DeclOrStmtAttr");
+
+ std::vector<std::string> DeclAttrs, StmtAttrs;
+
+ for (const Record *Exclusion : ExclusionsList) {
+ std::vector<Record *> MutuallyExclusiveAttrs =
+ Exclusion->getValueAsListOfDefs("Exclusions");
+ auto IsCurAttr = [Attr](const Record *R) {
+ return R->getName() == Attr.getName();
+ };
+ if (llvm::any_of(MutuallyExclusiveAttrs, IsCurAttr)) {
+ // This list of exclusions includes the attribute we're looking for, so
+ // add the exclusive attributes to the proper list for checking.
+ for (const Record *AttrToExclude : MutuallyExclusiveAttrs) {
+ if (IsCurAttr(AttrToExclude))
+ continue;
+
+ if (CurAttrIsStmtAttr)
+ StmtAttrs.push_back((AttrToExclude->getName() + "Attr").str());
+ if (CurAttrIsDeclAttr)
+ DeclAttrs.push_back((AttrToExclude->getName() + "Attr").str());
+ }
+ }
+ }
+
+ // If there are any decl or stmt attributes, silence -Woverloaded-virtual
+ // warnings for them both.
+ if (!DeclAttrs.empty() || !StmtAttrs.empty())
+ OS << " using ParsedAttrInfo::diagMutualExclusion;\n\n";
+
+ // If we discovered any decl or stmt attributes to test for, generate the
+ // predicates for them now.
+ if (!DeclAttrs.empty()) {
+ // Generate the ParsedAttrInfo subclass logic for declarations.
+ OS << " bool diagMutualExclusion(Sema &S, const ParsedAttr &AL, "
+ << "const Decl *D) const override {\n";
+ for (const std::string &A : DeclAttrs) {
+ OS << " if (const auto *A = D->getAttr<" << A << ">()) {\n";
+ OS << " S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)"
+ << " << AL << A;\n";
+ OS << " S.Diag(A->getLocation(), diag::note_conflicting_attribute);";
+ OS << " \nreturn false;\n";
+ OS << " }\n";
+ }
+ OS << " return true;\n";
+ OS << " }\n\n";
+
+ // Also generate the declaration attribute merging logic if the current
+ // attribute is one that can be inheritted on a declaration. It is assumed
+ // this code will be executed in the context of a function with parameters:
+ // Sema &S, Decl *D, Attr *A and that returns a bool (false on diagnostic,
+ // true on success).
+ if (Attr.isSubClassOf("InheritableAttr")) {
+ MergeDeclOS << " if (const auto *Second = dyn_cast<"
+ << (Attr.getName() + "Attr").str() << ">(A)) {\n";
+ for (const std::string &A : DeclAttrs) {
+ MergeDeclOS << " if (const auto *First = D->getAttr<" << A
+ << ">()) {\n";
+ MergeDeclOS << " S.Diag(First->getLocation(), "
+ << "diag::err_attributes_are_not_compatible) << First << "
+ << "Second;\n";
+ MergeDeclOS << " S.Diag(Second->getLocation(), "
+ << "diag::note_conflicting_attribute);\n";
+ MergeDeclOS << " return false;\n";
+ MergeDeclOS << " }\n";
+ }
+ MergeDeclOS << " return true;\n";
+ MergeDeclOS << " }\n";
+ }
+ }
+
+ // Statement attributes are a bit different from declarations. With
+ // declarations, each attribute is added to the declaration as it is
+ // processed, and so you can look on the Decl * itself to see if there is a
+ // conflicting attribute. Statement attributes are processed as a group
+ // because AttributedStmt needs to tail-allocate all of the attribute nodes
+ // at once. This means we cannot check whether the statement already contains
+ // an attribute to check for the conflict. Instead, we need to check whether
+ // the given list of semantic attributes contain any conflicts. It is assumed
+ // this code will be executed in the context of a function with parameters:
+ // Sema &S, const SmallVectorImpl<const Attr *> &C. The code will be within a
+ // loop which loops over the container C with a loop variable named A to
+ // represent the current attribute to check for conflicts.
+ //
+ // FIXME: it would be nice not to walk over the list of potential attributes
+ // to apply to the statement more than once, but statements typically don't
+ // have long lists of attributes on them, so re-walking the list should not
+ // be an expensive operation.
+ if (!StmtAttrs.empty()) {
+ MergeStmtOS << " if (const auto *Second = dyn_cast<"
+ << (Attr.getName() + "Attr").str() << ">(A)) {\n";
+ MergeStmtOS << " auto Iter = llvm::find_if(C, [](const Attr *Check) "
+ << "{ return isa<";
+ interleave(
+ StmtAttrs, [&](const std::string &Name) { MergeStmtOS << Name; },
+ [&] { MergeStmtOS << ", "; });
+ MergeStmtOS << ">(Check); });\n";
+ MergeStmtOS << " if (Iter != C.end()) {\n";
+ MergeStmtOS << " S.Diag((*Iter)->getLocation(), "
+ << "diag::err_attributes_are_not_compatible) << *Iter << "
+ << "Second;\n";
+ MergeStmtOS << " S.Diag(Second->getLocation(), "
+ << "diag::note_conflicting_attribute);\n";
+ MergeStmtOS << " return false;\n";
+ MergeStmtOS << " }\n";
+ MergeStmtOS << " }\n";
}
- OS << ") {\n";
- OS << " S.Diag(Attr.getLoc(), diag::";
- OS << (Warn ? "warn_attribute_wrong_decl_type_str" :
- "err_attribute_wrong_decl_type_str");
- OS << ")\n";
- OS << " << Attr << ";
- OS << CalculateDiagnostic(*SubjectObj) << ";\n";
- OS << " return false;\n";
- OS << " }\n";
- OS << " return true;\n";
- OS << "}\n\n";
}
static void
@@ -3717,6 +3907,8 @@ static bool IsKnownToGCC(const Record &Attr) {
void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
emitSourceFileHeader("Parsed attribute helpers", OS);
+ OS << "#if !defined(WANT_DECL_MERGE_LOGIC) && "
+ << "!defined(WANT_STMT_MERGE_LOGIC)\n";
PragmaClangAttributeSupport &PragmaAttributeSupport =
getPragmaAttributeSupport(Records);
@@ -3737,6 +3929,12 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
GenerateCustomAppertainsTo(*Subject, OS);
}
+ // This stream is used to collect all of the declaration attribute merging
+ // logic for performing mutual exclusion checks. This gets emitted at the
+ // end of the file in a helper function of its own.
+ std::string DeclMergeChecks, StmtMergeChecks;
+ raw_string_ostream MergeDeclOS(DeclMergeChecks), MergeStmtOS(StmtMergeChecks);
+
// Generate a ParsedAttrInfo struct for each of the attributes.
for (auto I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
// TODO: If the attribute's kind appears in the list of duplicates, that is
@@ -3790,6 +3988,7 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
OS << " Spellings = " << I->first << "Spellings;\n";
OS << " }\n";
GenerateAppertainsTo(Attr, OS);
+ GenerateMutualExclusionsChecks(Attr, Records, OS, MergeDeclOS, MergeStmtOS);
GenerateLangOptRequirements(Attr, OS);
GenerateTargetRequirements(Attr, Dupes, OS);
GenerateSpellingIndexToSemanticSpelling(Attr, OS);
@@ -3809,6 +4008,28 @@ void EmitClangAttrParsedAttrImpl(RecordKeeper &Records, raw_ostream &OS) {
// Generate the attribute match rules.
emitAttributeMatchRules(PragmaAttributeSupport, OS);
+
+ OS << "#elif defined(WANT_DECL_MERGE_LOGIC)\n\n";
+
+ // Write out the declaration merging check logic.
+ OS << "static bool DiagnoseMutualExclusions(Sema &S, const NamedDecl *D, "
+ << "const Attr *A) {\n";
+ OS << MergeDeclOS.str();
+ OS << " return true;\n";
+ OS << "}\n\n";
+
+ OS << "#elif defined(WANT_STMT_MERGE_LOGIC)\n\n";
+
+ // Write out the statement merging check logic.
+ OS << "static bool DiagnoseMutualExclusions(Sema &S, "
+ << "const SmallVectorImpl<const Attr *> &C) {\n";
+ OS << " for (const Attr *A : C) {\n";
+ OS << MergeStmtOS.str();
+ OS << " }\n";
+ OS << " return true;\n";
+ OS << "}\n\n";
+
+ OS << "#endif\n";
}
// Emits the kind list of parsed attributes
@@ -4233,9 +4454,13 @@ void EmitTestPragmaAttributeSupportedAttributes(RecordKeeper &Records,
std::vector<Record *> Subjects =
SubjectObj->getValueAsListOfDefs("Subjects");
OS << " (";
+ bool PrintComma = false;
for (const auto &Subject : llvm::enumerate(Subjects)) {
- if (Subject.index())
+ if (!isSupportedPragmaClangAttributeSubject(*Subject.value()))
+ continue;
+ if (PrintComma)
OS << ", ";
+ PrintComma = true;
PragmaClangAttributeSupport::RuleOrAggregateRuleSet &RuleSet =
Support.SubjectsToRules.find(Subject.value())->getSecond();
if (RuleSet.isRule()) {
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 430895d8425f..014c1adcd809 100644
--- a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -133,9 +133,9 @@ namespace {
std::vector<std::string> SubGroups;
unsigned IDNo;
- const Record *ExplicitDef;
+ llvm::SmallVector<const Record *, 1> Defs;
- GroupInfo() : IDNo(0), ExplicitDef(nullptr) {}
+ GroupInfo() : IDNo(0) {}
};
} // end anonymous namespace.
@@ -150,12 +150,6 @@ static bool diagGroupBeforeByName(const Record *LHS, const Record *RHS) {
RHS->getValueAsString("GroupName");
}
-static bool beforeThanCompareGroups(const GroupInfo *LHS, const GroupInfo *RHS){
- assert(!LHS->DiagsInGroup.empty() && !RHS->DiagsInGroup.empty());
- return beforeThanCompare(LHS->DiagsInGroup.front(),
- RHS->DiagsInGroup.front());
-}
-
/// Invert the 1-[0/1] mapping of diags to group into a one to many
/// mapping of groups to diags in the group.
static void groupDiagnostics(const std::vector<Record*> &Diags,
@@ -174,24 +168,13 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
}
- typedef SmallPtrSet<GroupInfo *, 16> GroupSetTy;
- GroupSetTy ImplicitGroups;
-
// Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
// groups (these are warnings that GCC supports that clang never produces).
for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
Record *Group = DiagGroups[i];
GroupInfo &GI =
DiagsInGroup[std::string(Group->getValueAsString("GroupName"))];
- if (Group->isAnonymous()) {
- if (GI.DiagsInGroup.size() > 1)
- ImplicitGroups.insert(&GI);
- } else {
- if (GI.ExplicitDef)
- assert(GI.ExplicitDef == Group);
- else
- GI.ExplicitDef = Group;
- }
+ GI.Defs.push_back(Group);
std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
@@ -205,61 +188,51 @@ static void groupDiagnostics(const std::vector<Record*> &Diags,
I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo)
I->second.IDNo = IDNo;
- // Sort the implicit groups, so we can warn about them deterministically.
- SmallVector<GroupInfo *, 16> SortedGroups(ImplicitGroups.begin(),
- ImplicitGroups.end());
- for (SmallVectorImpl<GroupInfo *>::iterator I = SortedGroups.begin(),
- E = SortedGroups.end();
- I != E; ++I) {
- MutableArrayRef<const Record *> GroupDiags = (*I)->DiagsInGroup;
- llvm::sort(GroupDiags, beforeThanCompare);
- }
- llvm::sort(SortedGroups, beforeThanCompareGroups);
+ // Warn if the same group is defined more than once (including implicitly).
+ for (auto &Group : DiagsInGroup) {
+ if (Group.second.Defs.size() == 1 &&
+ (!Group.second.Defs.front()->isAnonymous() ||
+ Group.second.DiagsInGroup.size() <= 1))
+ continue;
- // Warn about the same group being used anonymously in multiple places.
- for (SmallVectorImpl<GroupInfo *>::const_iterator I = SortedGroups.begin(),
- E = SortedGroups.end();
- I != E; ++I) {
- ArrayRef<const Record *> GroupDiags = (*I)->DiagsInGroup;
-
- if ((*I)->ExplicitDef) {
- std::string Name =
- std::string((*I)->ExplicitDef->getValueAsString("GroupName"));
- for (ArrayRef<const Record *>::const_iterator DI = GroupDiags.begin(),
- DE = GroupDiags.end();
- DI != DE; ++DI) {
- const DefInit *GroupInit = cast<DefInit>((*DI)->getValueInit("Group"));
- const Record *NextDiagGroup = GroupInit->getDef();
- if (NextDiagGroup == (*I)->ExplicitDef)
- continue;
-
- SrcMgr.PrintMessage((*DI)->getLoc().front(),
- SourceMgr::DK_Error,
- Twine("group '") + Name +
- "' is referred to anonymously");
- SrcMgr.PrintMessage((*I)->ExplicitDef->getLoc().front(),
- SourceMgr::DK_Note, "group defined here");
+ bool First = true;
+ for (const Record *Def : Group.second.Defs) {
+ // Skip implicit definitions from diagnostics; we'll report those
+ // separately below.
+ bool IsImplicit = false;
+ for (const Record *Diag : Group.second.DiagsInGroup) {
+ if (cast<DefInit>(Diag->getValueInit("Group"))->getDef() == Def) {
+ IsImplicit = true;
+ break;
+ }
}
- } else {
- // If there's no existing named group, we should just warn once and use
- // notes to list all the other cases.
- ArrayRef<const Record *>::const_iterator DI = GroupDiags.begin(),
- DE = GroupDiags.end();
- assert(DI != DE && "We only care about groups with multiple uses!");
-
- const DefInit *GroupInit = cast<DefInit>((*DI)->getValueInit("Group"));
- const Record *NextDiagGroup = GroupInit->getDef();
- std::string Name =
- std::string(NextDiagGroup->getValueAsString("GroupName"));
-
- SrcMgr.PrintMessage((*DI)->getLoc().front(),
- SourceMgr::DK_Error,
- Twine("group '") + Name +
- "' is referred to anonymously");
-
- for (++DI; DI != DE; ++DI) {
- SrcMgr.PrintMessage((*DI)->getLoc().front(),
- SourceMgr::DK_Note, "also referenced here");
+ if (IsImplicit)
+ continue;
+
+ llvm::SMLoc Loc = Def->getLoc().front();
+ if (First) {
+ SrcMgr.PrintMessage(Loc, SourceMgr::DK_Error,
+ Twine("group '") + Group.first +
+ "' is defined more than once");
+ First = false;
+ } else {
+ SrcMgr.PrintMessage(Loc, SourceMgr::DK_Note, "also defined here");
+ }
+ }
+
+ for (const Record *Diag : Group.second.DiagsInGroup) {
+ if (!cast<DefInit>(Diag->getValueInit("Group"))->getDef()->isAnonymous())
+ continue;
+
+ llvm::SMLoc Loc = Diag->getLoc().front();
+ if (First) {
+ SrcMgr.PrintMessage(Loc, SourceMgr::DK_Error,
+ Twine("group '") + Group.first +
+ "' is implicitly defined more than once");
+ First = false;
+ } else {
+ SrcMgr.PrintMessage(Loc, SourceMgr::DK_Note,
+ "also implicitly defined here");
}
}
}
@@ -584,7 +557,7 @@ struct PluralPiece : SelectPiece {
struct DiffPiece : Piece {
DiffPiece() : Piece(DiffPieceClass) {}
- Piece *Options[2] = {};
+ Piece *Parts[4] = {};
int Indexes[2] = {};
static bool classof(const Piece *P) {
@@ -660,9 +633,18 @@ private:
}
DiagText(DiagnosticTextBuilder &Builder, StringRef Text)
- : Builder(Builder), Root(parseDiagText(Text)) {}
-
- Piece *parseDiagText(StringRef &Text, bool Nested = false);
+ : Builder(Builder), Root(parseDiagText(Text, StopAt::End)) {}
+
+ enum class StopAt {
+ // Parse until the end of the string.
+ End,
+ // Additionally stop if we hit a non-nested '|' or '}'.
+ PipeOrCloseBrace,
+ // Additionally stop if we hit a non-nested '$'.
+ Dollar,
+ };
+
+ Piece *parseDiagText(StringRef &Text, StopAt Stop);
int parseModifier(StringRef &) const;
public:
@@ -928,7 +910,24 @@ struct DiagTextDocPrinter : DiagTextVisitor<DiagTextDocPrinter> {
void VisitPlural(PluralPiece *P) { VisitSelect(P); }
- void VisitDiff(DiffPiece *P) { Visit(P->Options[1]); }
+ void VisitDiff(DiffPiece *P) {
+ // Render %diff{a $ b $ c|d}e,f as %select{a %e b %f c|d}.
+ PlaceholderPiece E(MT_Placeholder, P->Indexes[0]);
+ PlaceholderPiece F(MT_Placeholder, P->Indexes[1]);
+
+ MultiPiece FirstOption;
+ FirstOption.Pieces.push_back(P->Parts[0]);
+ FirstOption.Pieces.push_back(&E);
+ FirstOption.Pieces.push_back(P->Parts[1]);
+ FirstOption.Pieces.push_back(&F);
+ FirstOption.Pieces.push_back(P->Parts[2]);
+
+ SelectPiece Select(MT_Diff);
+ Select.Options.push_back(&FirstOption);
+ Select.Options.push_back(P->Parts[3]);
+
+ VisitSelect(&Select);
+ }
std::vector<std::string> &RST;
};
@@ -982,9 +981,13 @@ public:
void VisitDiff(DiffPiece *P) {
Result += "%diff{";
- Visit(P->Options[0]);
+ Visit(P->Parts[0]);
+ Result += "$";
+ Visit(P->Parts[1]);
+ Result += "$";
+ Visit(P->Parts[2]);
Result += "|";
- Visit(P->Options[1]);
+ Visit(P->Parts[3]);
Result += "}";
addInt(mapIndex(P->Indexes[0]));
Result += ",";
@@ -1009,16 +1012,19 @@ int DiagnosticTextBuilder::DiagText::parseModifier(StringRef &Text) const {
}
Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
- bool Nested) {
+ StopAt Stop) {
std::vector<Piece *> Parsed;
+ constexpr llvm::StringLiteral StopSets[] = {"%", "%|}", "%|}$"};
+ llvm::StringRef StopSet = StopSets[static_cast<int>(Stop)];
+
while (!Text.empty()) {
size_t End = (size_t)-2;
do
- End = Nested ? Text.find_first_of("%|}", End + 2)
- : Text.find_first_of('%', End + 2);
- while (End < Text.size() - 1 && Text[End] == '%' &&
- (Text[End + 1] == '%' || Text[End + 1] == '|'));
+ End = Text.find_first_of(StopSet, End + 2);
+ while (
+ End < Text.size() - 1 && Text[End] == '%' &&
+ (Text[End + 1] == '%' || Text[End + 1] == '|' || Text[End + 1] == '$'));
if (End) {
Parsed.push_back(New<TextPiece>(Text.slice(0, End), "diagtext"));
@@ -1027,7 +1033,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
break;
}
- if (Text[0] == '|' || Text[0] == '}')
+ if (Text[0] == '|' || Text[0] == '}' || Text[0] == '$')
break;
// Drop the '%'.
@@ -1050,6 +1056,12 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
.Case("", MT_Placeholder)
.Default(MT_Unknown);
+ auto ExpectAndConsume = [&](StringRef Prefix) {
+ if (!Text.consume_front(Prefix))
+ Builder.PrintFatalError("expected '" + Prefix + "' while parsing %" +
+ Modifier);
+ };
+
switch (ModType) {
case MT_Unknown:
Builder.PrintFatalError("Unknown modifier type: " + Modifier);
@@ -1057,11 +1069,11 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
SelectPiece *Select = New<SelectPiece>(MT_Select);
do {
Text = Text.drop_front(); // '{' or '|'
- Select->Options.push_back(parseDiagText(Text, true));
+ Select->Options.push_back(
+ parseDiagText(Text, StopAt::PipeOrCloseBrace));
assert(!Text.empty() && "malformed %select");
} while (Text.front() == '|');
- // Drop the trailing '}'.
- Text = Text.drop_front(1);
+ ExpectAndConsume("}");
Select->Index = parseModifier(Text);
Parsed.push_back(Select);
continue;
@@ -1078,24 +1090,24 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
Plural->OptionPrefixes.push_back(
New<TextPiece>(Text.slice(0, End), "diagtext"));
Text = Text.slice(End, StringRef::npos);
- Plural->Options.push_back(parseDiagText(Text, true));
- assert(!Text.empty() && "malformed %select");
+ Plural->Options.push_back(
+ parseDiagText(Text, StopAt::PipeOrCloseBrace));
+ assert(!Text.empty() && "malformed %plural");
} while (Text.front() == '|');
- // Drop the trailing '}'.
- Text = Text.drop_front(1);
+ ExpectAndConsume("}");
Plural->Index = parseModifier(Text);
Parsed.push_back(Plural);
continue;
}
case MT_Sub: {
SubstitutionPiece *Sub = New<SubstitutionPiece>();
- Text = Text.drop_front(); // '{'
+ ExpectAndConsume("{");
size_t NameSize = Text.find_first_of('}');
assert(NameSize != size_t(-1) && "failed to find the end of the name");
assert(NameSize != 0 && "empty name?");
Sub->Name = Text.substr(0, NameSize).str();
Text = Text.drop_front(NameSize);
- Text = Text.drop_front(); // '}'
+ ExpectAndConsume("}");
if (!Text.empty()) {
while (true) {
if (!isdigit(Text[0]))
@@ -1113,14 +1125,17 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
}
case MT_Diff: {
DiffPiece *Diff = New<DiffPiece>();
- Text = Text.drop_front(); // '{'
- Diff->Options[0] = parseDiagText(Text, true);
- Text = Text.drop_front(); // '|'
- Diff->Options[1] = parseDiagText(Text, true);
-
- Text = Text.drop_front(); // '}'
+ ExpectAndConsume("{");
+ Diff->Parts[0] = parseDiagText(Text, StopAt::Dollar);
+ ExpectAndConsume("$");
+ Diff->Parts[1] = parseDiagText(Text, StopAt::Dollar);
+ ExpectAndConsume("$");
+ Diff->Parts[2] = parseDiagText(Text, StopAt::PipeOrCloseBrace);
+ ExpectAndConsume("|");
+ Diff->Parts[3] = parseDiagText(Text, StopAt::PipeOrCloseBrace);
+ ExpectAndConsume("}");
Diff->Indexes[0] = parseModifier(Text);
- Text = Text.drop_front(); // ','
+ ExpectAndConsume(",");
Diff->Indexes[1] = parseModifier(Text);
Parsed.push_back(Diff);
continue;
diff --git a/clang/utils/TableGen/ClangOpcodesEmitter.cpp b/clang/utils/TableGen/ClangOpcodesEmitter.cpp
index e5bfac5ba1b6..ffeedcdf0ee2 100644
--- a/clang/utils/TableGen/ClangOpcodesEmitter.cpp
+++ b/clang/utils/TableGen/ClangOpcodesEmitter.cpp
@@ -122,13 +122,13 @@ void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N, Record *R) {
// Emit calls to read arguments.
for (size_t I = 0, N = Args.size(); I < N; ++I) {
- OS << "\tauto V" << I;
+ OS << " auto V" << I;
OS << " = ";
OS << "PC.read<" << Args[I]->getValueAsString("Name") << ">();\n";
}
// Emit a call to the template method and pass arguments.
- OS << "\tif (!" << N;
+ OS << " if (!" << N;
PrintTypes(OS, TS);
OS << "(S";
if (ChangesPC)
@@ -140,15 +140,15 @@ void ClangOpcodesEmitter::EmitInterp(raw_ostream &OS, StringRef N, Record *R) {
for (size_t I = 0, N = Args.size(); I < N; ++I)
OS << ", V" << I;
OS << "))\n";
- OS << "\t\treturn false;\n";
+ OS << " return false;\n";
// Bail out if interpreter returned.
if (CanReturn) {
- OS << "\tif (!S.Current || S.Current->isRoot())\n";
- OS << "\t\treturn true;\n";
+ OS << " if (!S.Current || S.Current->isRoot())\n";
+ OS << " return true;\n";
}
- OS << "\tcontinue;\n";
+ OS << " continue;\n";
OS << "}\n";
});
OS << "#endif\n";
@@ -158,14 +158,14 @@ void ClangOpcodesEmitter::EmitDisasm(raw_ostream &OS, StringRef N, Record *R) {
OS << "#ifdef GET_DISASM\n";
Enumerate(R, N, [R, &OS](ArrayRef<Record *>, const Twine &ID) {
OS << "case OP_" << ID << ":\n";
- OS << "\tPrintName(\"" << ID << "\");\n";
- OS << "\tOS << \"\\t\"";
+ OS << " PrintName(\"" << ID << "\");\n";
+ OS << " OS << \"\\t\"";
for (auto *Arg : R->getValueAsListOfDefs("Args"))
OS << " << PC.read<" << Arg->getValueAsString("Name") << ">() << \" \"";
- OS << "<< \"\\n\";\n";
- OS << "\tcontinue;\n";
+ OS << " << \"\\n\";\n";
+ OS << " continue;\n";
});
OS << "#endif\n";
}
@@ -181,11 +181,11 @@ void ClangOpcodesEmitter::EmitEmitter(raw_ostream &OS, StringRef N, Record *R) {
// Emit the list of arguments.
OS << "bool ByteCodeEmitter::emit" << ID << "(";
for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << Args[I]->getValueAsString("Name") << " A" << I << ",";
+ OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
OS << "const SourceInfo &L) {\n";
// Emit a call to write the opcodes.
- OS << "\treturn emitOp<";
+ OS << " return emitOp<";
for (size_t I = 0, N = Args.size(); I < N; ++I) {
if (I != 0)
OS << ", ";
@@ -250,7 +250,7 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
// Emit the dispatch implementation in the source.
OS << "#if defined(GET_EVAL_IMPL) || defined(GET_LINK_IMPL)\n";
- OS << "bool \n";
+ OS << "bool\n";
OS << "#if defined(GET_EVAL_IMPL)\n";
OS << "EvalEmitter\n";
OS << "#else\n";
@@ -271,13 +271,14 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
// Custom evaluator methods dispatch to template methods.
if (R->getValueAsBit("HasCustomEval")) {
OS << "#ifdef GET_LINK_IMPL\n";
- OS << "return emit" << ID << "\n";
+ OS << " return emit" << ID << "\n";
OS << "#else\n";
- OS << "return emit" << N;
+ OS << " return emit" << N;
PrintTypes(OS, TS);
OS << "\n#endif\n";
+ OS << " ";
} else {
- OS << "return emit" << ID;
+ OS << " return emit" << ID;
}
OS << "(";
@@ -290,19 +291,19 @@ void ClangOpcodesEmitter::EmitGroup(raw_ostream &OS, StringRef N, Record *R) {
// Print a switch statement selecting T.
if (auto *TypeClass = dyn_cast<DefInit>(Types->getElement(I))) {
- OS << "switch (T" << I << "){\n";
+ OS << " switch (T" << I << ") {\n";
auto Cases = TypeClass->getDef()->getValueAsListOfDefs("Types");
for (auto *Case : Cases) {
- OS << "case PT_" << Case->getName() << ":\n";
+ OS << " case PT_" << Case->getName() << ":\n";
TS.push_back(Case);
Rec(I + 1, ID + Case->getName());
TS.pop_back();
}
// Emit a default case if not all types are present.
if (Cases.size() < NumTypes)
- OS << "default: llvm_unreachable(\"invalid type\");\n";
- OS << "}\n";
- OS << "llvm_unreachable(\"invalid enum value\");\n";
+ OS << " default: llvm_unreachable(\"invalid type\");\n";
+ OS << " }\n";
+ OS << " llvm_unreachable(\"invalid enum value\");\n";
} else {
PrintFatalError("Expected a type class");
}
@@ -323,12 +324,12 @@ void ClangOpcodesEmitter::EmitEval(raw_ostream &OS, StringRef N, Record *R) {
OS << "bool EvalEmitter::emit" << ID << "(";
for (size_t I = 0, N = Args.size(); I < N; ++I)
- OS << Args[I]->getValueAsString("Name") << " A" << I << ",";
+ OS << Args[I]->getValueAsString("Name") << " A" << I << ", ";
OS << "const SourceInfo &L) {\n";
- OS << "if (!isActive()) return true;\n";
- OS << "CurrentSource = L;\n";
+ OS << " if (!isActive()) return true;\n";
+ OS << " CurrentSource = L;\n";
- OS << "return " << N;
+ OS << " return " << N;
PrintTypes(OS, TS);
OS << "(S, OpPC";
for (size_t I = 0, N = Args.size(); I < N; ++I)
diff --git a/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp b/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
index 7c63cf51ecfa..a4cb5b7cacd9 100644
--- a/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
+++ b/clang/utils/TableGen/ClangOpenCLBuiltinEmitter.cpp
@@ -8,6 +8,37 @@
//
//===----------------------------------------------------------------------===//
//
+// These backends consume the definitions of OpenCL builtin functions in
+// clang/lib/Sema/OpenCLBuiltins.td and produce builtin handling code for
+// inclusion in SemaLookup.cpp, or a test file that calls all declared builtins.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TableGenBackends.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/StringMatcher.h"
+#include "llvm/TableGen/TableGenBackend.h"
+
+using namespace llvm;
+
+namespace {
+
+// A list of signatures that are shared by one or more builtin functions.
+struct BuiltinTableEntries {
+ SmallVector<StringRef, 4> Names;
+ std::vector<std::pair<const Record *, unsigned>> Signatures;
+};
+
// This tablegen backend emits code for checking whether a function is an
// OpenCL builtin function. If so, all overloads of this function are
// added to the LookupResult. The generated include file is used by
@@ -48,39 +79,11 @@
// Find out whether a string matches an existing OpenCL builtin function
// name and return an index into BuiltinTable and the number of overloads.
//
-// * void OCL2Qual(ASTContext&, OpenCLTypeStruct, std::vector<QualType>&)
+// * void OCL2Qual(Sema&, OpenCLTypeStruct, std::vector<QualType>&)
// Convert an OpenCLTypeStruct type to a list of QualType instances.
// One OpenCLTypeStruct can represent multiple types, primarily when using
// GenTypes.
//
-//===----------------------------------------------------------------------===//
-
-#include "TableGenBackends.h"
-#include "llvm/ADT/MapVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
-#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSet.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/raw_ostream.h"
-#include "llvm/TableGen/Error.h"
-#include "llvm/TableGen/Record.h"
-#include "llvm/TableGen/StringMatcher.h"
-#include "llvm/TableGen/TableGenBackend.h"
-#include <set>
-
-using namespace llvm;
-
-namespace {
-
-// A list of signatures that are shared by one or more builtin functions.
-struct BuiltinTableEntries {
- SmallVector<StringRef, 4> Names;
- std::vector<std::pair<const Record *, unsigned>> Signatures;
-};
-
class BuiltinNameEmitter {
public:
BuiltinNameEmitter(RecordKeeper &Records, raw_ostream &OS)
@@ -229,6 +232,64 @@ private:
// same entry (<I1, I2, I3>).
MapVector<BuiltinIndexListTy *, BuiltinTableEntries> SignatureListMap;
};
+
+// OpenCL builtin test generator. This class processes the same TableGen input
+// as BuiltinNameEmitter, but generates a .cl file that contains a call to each
+// builtin function described in the .td input.
+class OpenCLBuiltinTestEmitter {
+public:
+ OpenCLBuiltinTestEmitter(RecordKeeper &Records, raw_ostream &OS)
+ : Records(Records), OS(OS) {}
+
+ // Entrypoint to generate the functions for testing all OpenCL builtin
+ // functions.
+ void emit();
+
+private:
+ struct TypeFlags {
+ TypeFlags() : IsConst(false), IsVolatile(false), IsPointer(false) {}
+ bool IsConst : 1;
+ bool IsVolatile : 1;
+ bool IsPointer : 1;
+ StringRef AddrSpace;
+ };
+
+ // Return a string representation of the given type, such that it can be
+ // used as a type in OpenCL C code.
+ std::string getTypeString(const Record *Type, TypeFlags Flags,
+ int VectorSize) const;
+
+ // Return the type(s) and vector size(s) for the given type. For
+ // non-GenericTypes, the resulting vectors will contain 1 element. For
+ // GenericTypes, the resulting vectors typically contain multiple elements.
+ void getTypeLists(Record *Type, TypeFlags &Flags,
+ std::vector<Record *> &TypeList,
+ std::vector<int64_t> &VectorList) const;
+
+ // Expand the TableGen Records representing a builtin function signature into
+ // one or more function signatures. Return them as a vector of a vector of
+ // strings, with each string containing an OpenCL C type and optional
+ // qualifiers.
+ //
+ // The Records may contain GenericTypes, which expand into multiple
+ // signatures. Repeated occurrences of GenericType in a signature expand to
+ // the same types. For example [char, FGenType, FGenType] expands to:
+ // [char, float, float]
+ // [char, float2, float2]
+ // [char, float3, float3]
+ // ...
+ void
+ expandTypesInSignature(const std::vector<Record *> &Signature,
+ SmallVectorImpl<SmallVector<std::string, 2>> &Types);
+
+ // Contains OpenCL builtin functions and related information, stored as
+ // Record instances. They are coming from the associated TableGen file.
+ RecordKeeper &Records;
+
+ // The output file.
+ raw_ostream &OS;
+};
+
} // namespace
void BuiltinNameEmitter::Emit() {
@@ -340,10 +401,8 @@ struct OpenCLBuiltinStruct {
const bool IsConv : 1;
// OpenCL extension(s) required for this overload.
const unsigned short Extension;
- // First OpenCL version in which this overload was introduced (e.g. CL20).
- const unsigned short MinVersion;
- // First OpenCL version in which this overload was removed (e.g. CL20).
- const unsigned short MaxVersion;
+ // OpenCL versions in which this overload is available.
+ const unsigned short Versions;
};
)";
@@ -491,6 +550,29 @@ void BuiltinNameEmitter::EmitSignatureTable() {
OS << "};\n\n";
}
+// Encode a range MinVersion..MaxVersion into a single bit mask that can be
+// checked against LangOpts using isOpenCLVersionContainedInMask().
+// This must be kept in sync with OpenCLVersionID in OpenCLOptions.h.
+// (Including OpenCLOptions.h here would be a layering violation.)
+static unsigned short EncodeVersions(unsigned int MinVersion,
+ unsigned int MaxVersion) {
+ unsigned short Encoded = 0;
+
+ // A maximum version of 0 means available in all later versions.
+ if (MaxVersion == 0) {
+ MaxVersion = UINT_MAX;
+ }
+
+ unsigned VersionIDs[] = {100, 110, 120, 200, 300};
+ for (unsigned I = 0; I < sizeof(VersionIDs) / sizeof(VersionIDs[0]); I++) {
+ if (VersionIDs[I] >= MinVersion && VersionIDs[I] < MaxVersion) {
+ Encoded |= 1 << I;
+ }
+ }
+
+ return Encoded;
+}
+
void BuiltinNameEmitter::EmitBuiltinTable() {
unsigned Index = 0;
@@ -505,16 +587,18 @@ void BuiltinNameEmitter::EmitBuiltinTable() {
for (const auto &Overload : SLM.second.Signatures) {
StringRef ExtName = Overload.first->getValueAsDef("Extension")->getName();
+ unsigned int MinVersion =
+ Overload.first->getValueAsDef("MinVersion")->getValueAsInt("ID");
+ unsigned int MaxVersion =
+ Overload.first->getValueAsDef("MaxVersion")->getValueAsInt("ID");
+
OS << " { " << Overload.second << ", "
<< Overload.first->getValueAsListOfDefs("Signature").size() << ", "
<< (Overload.first->getValueAsBit("IsPure")) << ", "
<< (Overload.first->getValueAsBit("IsConst")) << ", "
<< (Overload.first->getValueAsBit("IsConv")) << ", "
<< FunctionExtensionIndex[ExtName] << ", "
- << Overload.first->getValueAsDef("MinVersion")->getValueAsInt("ID")
- << ", "
- << Overload.first->getValueAsDef("MaxVersion")->getValueAsInt("ID")
- << " },\n";
+ << EncodeVersions(MinVersion, MaxVersion) << " },\n";
Index++;
}
}
@@ -628,6 +712,9 @@ static std::pair<unsigned, unsigned> isOpenCLBuiltin(llvm::StringRef Name) {
void BuiltinNameEmitter::EmitQualTypeFinder() {
OS << R"(
+static QualType getOpenCLEnumType(Sema &S, llvm::StringRef Name);
+static QualType getOpenCLTypedefType(Sema &S, llvm::StringRef Name);
+
// Convert an OpenCLTypeStruct type to a list of QualTypes.
// Generic types represent multiple types and vector sizes, thus a vector
// is returned. The conversion is done in two steps:
@@ -636,8 +723,9 @@ void BuiltinNameEmitter::EmitQualTypeFinder() {
// or a single scalar type for non generic types.
// Step 2: Qualifiers and other type properties such as vector size are
// applied.
-static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
+static void OCL2Qual(Sema &S, const OpenCLTypeStruct &Ty,
llvm::SmallVectorImpl<QualType> &QT) {
+ ASTContext &Context = S.Context;
// Number of scalar types in the GenType.
unsigned GenTypeNumTypes;
// Pointer to the list of vector sizes for the GenType.
@@ -663,7 +751,7 @@ static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
Records.getAllDerivedDefinitions("ImageType");
// Map an image type name to its 3 access-qualified types (RO, WO, RW).
- std::map<StringRef, SmallVector<Record *, 3>> ImageTypesMap;
+ StringMap<SmallVector<Record *, 3>> ImageTypesMap;
for (auto *IT : ImageTypes) {
auto Entry = ImageTypesMap.find(IT->getValueAsString("Name"));
if (Entry == ImageTypesMap.end()) {
@@ -681,18 +769,19 @@ static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
// tells which one is needed. Emit a switch statement that puts the
// corresponding QualType into "QT".
for (const auto &ITE : ImageTypesMap) {
- OS << " case OCLT_" << ITE.first.str() << ":\n"
+ OS << " case OCLT_" << ITE.getKey() << ":\n"
<< " switch (Ty.AccessQualifier) {\n"
<< " case OCLAQ_None:\n"
<< " llvm_unreachable(\"Image without access qualifier\");\n";
- for (const auto &Image : ITE.second) {
+ for (const auto &Image : ITE.getValue()) {
OS << StringSwitch<const char *>(
Image->getValueAsString("AccessQualifier"))
.Case("RO", " case OCLAQ_ReadOnly:\n")
.Case("WO", " case OCLAQ_WriteOnly:\n")
.Case("RW", " case OCLAQ_ReadWrite:\n")
- << " QT.push_back(Context."
- << Image->getValueAsDef("QTName")->getValueAsString("Name") << ");\n"
+ << " QT.push_back("
+ << Image->getValueAsDef("QTExpr")->getValueAsString("TypeExpr")
+ << ");\n"
<< " break;\n";
}
OS << " }\n"
@@ -701,35 +790,45 @@ static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
// Switch cases for generic types.
for (const auto *GenType : Records.getAllDerivedDefinitions("GenericType")) {
- OS << " case OCLT_" << GenType->getValueAsString("Name") << ":\n";
- OS << " QT.append({";
+ OS << " case OCLT_" << GenType->getValueAsString("Name") << ": {\n";
// Build the Cartesian product of (vector sizes) x (types). Only insert
// the plain scalar types for now; other type information such as vector
// size and type qualifiers will be added after the switch statement.
- for (unsigned I = 0; I < GenType->getValueAsDef("VectorList")
- ->getValueAsListOfInts("List")
- .size();
- I++) {
- for (const auto *T :
- GenType->getValueAsDef("TypeList")->getValueAsListOfDefs("List")) {
- OS << "Context."
- << T->getValueAsDef("QTName")->getValueAsString("Name") << ", ";
+ std::vector<Record *> BaseTypes =
+ GenType->getValueAsDef("TypeList")->getValueAsListOfDefs("List");
+
+ // Collect all QualTypes for a single vector size into TypeList.
+ OS << " SmallVector<QualType, " << BaseTypes.size() << "> TypeList;\n";
+ for (const auto *T : BaseTypes) {
+ StringRef Ext =
+ T->getValueAsDef("Extension")->getValueAsString("ExtName");
+ if (!Ext.empty()) {
+ OS << " if (S.getPreprocessor().isMacroDefined(\"" << Ext
+ << "\")) {\n ";
+ }
+ OS << " TypeList.push_back("
+ << T->getValueAsDef("QTExpr")->getValueAsString("TypeExpr") << ");\n";
+ if (!Ext.empty()) {
+ OS << " }\n";
}
}
- OS << "});\n";
- // GenTypeNumTypes is the number of types in the GenType
- // (e.g. float/double/half).
- OS << " GenTypeNumTypes = "
- << GenType->getValueAsDef("TypeList")->getValueAsListOfDefs("List")
- .size()
- << ";\n";
+ OS << " GenTypeNumTypes = TypeList.size();\n";
+
+ // Duplicate the TypeList for every vector size.
+ std::vector<int64_t> VectorList =
+ GenType->getValueAsDef("VectorList")->getValueAsListOfInts("List");
+ OS << " QT.reserve(" << VectorList.size() * BaseTypes.size() << ");\n"
+ << " for (unsigned I = 0; I < " << VectorList.size() << "; I++) {\n"
+ << " QT.append(TypeList);\n"
+ << " }\n";
+
// GenVectorSizes is the list of vector sizes for this GenType.
- // QT contains GenTypeNumTypes * #GenVectorSizes elements.
OS << " GenVectorSizes = List"
<< GenType->getValueAsDef("VectorList")->getValueAsString("Name")
- << ";\n";
- OS << " break;\n";
+ << ";\n"
+ << " break;\n"
+ << " }\n";
}
// Switch cases for non generic, non image types (int, int4, float, ...).
@@ -748,13 +847,23 @@ static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
TypesSeen.insert(std::make_pair(T->getValueAsString("Name"), true));
// Check the Type does not have an "abstract" QualType
- auto QT = T->getValueAsDef("QTName");
+ auto QT = T->getValueAsDef("QTExpr");
if (QT->getValueAsBit("IsAbstract") == 1)
continue;
// Emit the cases for non generic, non image types.
OS << " case OCLT_" << T->getValueAsString("Name") << ":\n";
- OS << " QT.push_back(Context." << QT->getValueAsString("Name")
- << ");\n";
+
+ StringRef Ext = T->getValueAsDef("Extension")->getValueAsString("ExtName");
+ // If this type depends on an extension, ensure the extension macro is
+ // defined.
+ if (!Ext.empty()) {
+ OS << " if (S.getPreprocessor().isMacroDefined(\"" << Ext
+ << "\")) {\n ";
+ }
+ OS << " QT.push_back(" << QT->getValueAsString("TypeExpr") << ");\n";
+ if (!Ext.empty()) {
+ OS << " }\n";
+ }
OS << " break;\n";
}
@@ -814,7 +923,230 @@ static void OCL2Qual(ASTContext &Context, const OpenCLTypeStruct &Ty,
OS << "\n} // OCL2Qual\n";
}
+std::string OpenCLBuiltinTestEmitter::getTypeString(const Record *Type,
+ TypeFlags Flags,
+ int VectorSize) const {
+ std::string S;
+ if (Type->getValueAsBit("IsConst") || Flags.IsConst) {
+ S += "const ";
+ }
+ if (Type->getValueAsBit("IsVolatile") || Flags.IsVolatile) {
+ S += "volatile ";
+ }
+
+ auto PrintAddrSpace = [&S](StringRef AddrSpace) {
+ S += StringSwitch<const char *>(AddrSpace)
+ .Case("clang::LangAS::opencl_private", "__private")
+ .Case("clang::LangAS::opencl_global", "__global")
+ .Case("clang::LangAS::opencl_constant", "__constant")
+ .Case("clang::LangAS::opencl_local", "__local")
+ .Case("clang::LangAS::opencl_generic", "__generic")
+ .Default("__private");
+ S += " ";
+ };
+ if (Flags.IsPointer) {
+ PrintAddrSpace(Flags.AddrSpace);
+ } else if (Type->getValueAsBit("IsPointer")) {
+ PrintAddrSpace(Type->getValueAsString("AddrSpace"));
+ }
+
+ StringRef Acc = Type->getValueAsString("AccessQualifier");
+ if (Acc != "") {
+ S += StringSwitch<const char *>(Acc)
+ .Case("RO", "__read_only ")
+ .Case("WO", "__write_only ")
+ .Case("RW", "__read_write ");
+ }
+
+ S += Type->getValueAsString("Name").str();
+ if (VectorSize > 1) {
+ S += std::to_string(VectorSize);
+ }
+
+ if (Type->getValueAsBit("IsPointer") || Flags.IsPointer) {
+ S += " *";
+ }
+
+ return S;
+}
+
+void OpenCLBuiltinTestEmitter::getTypeLists(
+ Record *Type, TypeFlags &Flags, std::vector<Record *> &TypeList,
+ std::vector<int64_t> &VectorList) const {
+ bool isGenType = Type->isSubClassOf("GenericType");
+ if (isGenType) {
+ TypeList = Type->getValueAsDef("TypeList")->getValueAsListOfDefs("List");
+ VectorList =
+ Type->getValueAsDef("VectorList")->getValueAsListOfInts("List");
+ return;
+ }
+
+ if (Type->isSubClassOf("PointerType") || Type->isSubClassOf("ConstType") ||
+ Type->isSubClassOf("VolatileType")) {
+ StringRef SubTypeName = Type->getValueAsString("Name");
+ Record *PossibleGenType = Records.getDef(SubTypeName);
+ if (PossibleGenType && PossibleGenType->isSubClassOf("GenericType")) {
+ // When PointerType, ConstType, or VolatileType is applied to a
+ // GenericType, the flags need to be taken from the subtype, not from the
+ // GenericType.
+ Flags.IsPointer = Type->getValueAsBit("IsPointer");
+ Flags.IsConst = Type->getValueAsBit("IsConst");
+ Flags.IsVolatile = Type->getValueAsBit("IsVolatile");
+ Flags.AddrSpace = Type->getValueAsString("AddrSpace");
+ getTypeLists(PossibleGenType, Flags, TypeList, VectorList);
+ return;
+ }
+ }
+
+ // Not a GenericType, so just insert the single type.
+ TypeList.push_back(Type);
+ VectorList.push_back(Type->getValueAsInt("VecWidth"));
+}
+
+void OpenCLBuiltinTestEmitter::expandTypesInSignature(
+ const std::vector<Record *> &Signature,
+ SmallVectorImpl<SmallVector<std::string, 2>> &Types) {
+ // Find out if there are any GenTypes in this signature, and if so, calculate
+ // into how many signatures they will expand.
+ unsigned NumSignatures = 1;
+ SmallVector<SmallVector<std::string, 4>, 4> ExpandedGenTypes;
+ for (const auto &Arg : Signature) {
+ SmallVector<std::string, 4> ExpandedArg;
+ std::vector<Record *> TypeList;
+ std::vector<int64_t> VectorList;
+ TypeFlags Flags;
+
+ getTypeLists(Arg, Flags, TypeList, VectorList);
+
+ // Insert the Cartesian product of the types and vector sizes.
+ for (const auto &Vector : VectorList) {
+ for (const auto &Type : TypeList) {
+ ExpandedArg.push_back(getTypeString(Type, Flags, Vector));
+ }
+ }
+ NumSignatures = std::max<unsigned>(NumSignatures, ExpandedArg.size());
+ ExpandedGenTypes.push_back(ExpandedArg);
+ }
+
+ // Now the total number of signatures is known. Populate the return list with
+ // all signatures.
+ for (unsigned I = 0; I < NumSignatures; I++) {
+ SmallVector<std::string, 2> Args;
+
+ // Process a single signature.
+ for (unsigned ArgNum = 0; ArgNum < Signature.size(); ArgNum++) {
+ // For differently-sized GenTypes in a parameter list, the smaller
+ // GenTypes just repeat, so index modulo the number of expanded types.
+ size_t TypeIndex = I % ExpandedGenTypes[ArgNum].size();
+ Args.push_back(ExpandedGenTypes[ArgNum][TypeIndex]);
+ }
+ Types.push_back(Args);
+ }
+}
+
+void OpenCLBuiltinTestEmitter::emit() {
+ emitSourceFileHeader("OpenCL Builtin exhaustive testing", OS);
+
+ // Enable some extensions for testing.
+ OS << R"(
+#pragma OPENCL EXTENSION cl_khr_fp16 : enable
+#pragma OPENCL EXTENSION cl_khr_fp64 : enable
+#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
+#pragma OPENCL EXTENSION cl_khr_gl_msaa_sharing : enable
+#pragma OPENCL EXTENSION cl_khr_mipmap_image_writes : enable
+#pragma OPENCL EXTENSION cl_khr_3d_image_writes : enable
+
+)";
+
+ // Ensure each test has a unique name by numbering them.
+ unsigned TestID = 0;
+
+ // Iterate over all builtins.
+ std::vector<Record *> Builtins = Records.getAllDerivedDefinitions("Builtin");
+ for (const auto *B : Builtins) {
+ StringRef Name = B->getValueAsString("Name");
+
+ SmallVector<SmallVector<std::string, 2>, 4> FTypes;
+ expandTypesInSignature(B->getValueAsListOfDefs("Signature"), FTypes);
+
+ OS << "// Test " << Name << "\n";
+ std::string OptionalEndif;
+ StringRef Extensions =
+ B->getValueAsDef("Extension")->getValueAsString("ExtName");
+ if (!Extensions.empty()) {
+ OS << "#if";
+ OptionalEndif = "#endif // Extension\n";
+
+ SmallVector<StringRef, 2> ExtVec;
+ Extensions.split(ExtVec, " ");
+ bool isFirst = true;
+ for (StringRef Ext : ExtVec) {
+ if (!isFirst) {
+ OS << " &&";
+ }
+ OS << " defined(" << Ext << ")";
+ isFirst = false;
+ }
+ OS << "\n";
+ }
+ auto PrintOpenCLVersion = [this](int Version) {
+ OS << "CL_VERSION_" << (Version / 100) << "_" << ((Version % 100) / 10);
+ };
+ int MinVersion = B->getValueAsDef("MinVersion")->getValueAsInt("ID");
+ if (MinVersion != 100) {
+ // OpenCL 1.0 is the default minimum version.
+ OS << "#if __OPENCL_C_VERSION__ >= ";
+ PrintOpenCLVersion(MinVersion);
+ OS << "\n";
+ OptionalEndif = "#endif // MinVersion\n" + OptionalEndif;
+ }
+ int MaxVersion = B->getValueAsDef("MaxVersion")->getValueAsInt("ID");
+ if (MaxVersion) {
+ OS << "#if __OPENCL_C_VERSION__ < ";
+ PrintOpenCLVersion(MaxVersion);
+ OS << "\n";
+ OptionalEndif = "#endif // MaxVersion\n" + OptionalEndif;
+ }
+ for (const auto &Signature : FTypes) {
+ // Emit function declaration.
+ OS << Signature[0] << " test" << TestID++ << "_" << Name << "(";
+ if (Signature.size() > 1) {
+ for (unsigned I = 1; I < Signature.size(); I++) {
+ if (I != 1)
+ OS << ", ";
+ OS << Signature[I] << " arg" << I;
+ }
+ }
+ OS << ") {\n";
+
+ // Emit function body.
+ OS << " ";
+ if (Signature[0] != "void") {
+ OS << "return ";
+ }
+ OS << Name << "(";
+ for (unsigned I = 1; I < Signature.size(); I++) {
+ if (I != 1)
+ OS << ", ";
+ OS << "arg" << I;
+ }
+ OS << ");\n";
+
+ // End of function body.
+ OS << "}\n";
+ }
+ OS << OptionalEndif << "\n";
+ }
+}
+
void clang::EmitClangOpenCLBuiltins(RecordKeeper &Records, raw_ostream &OS) {
BuiltinNameEmitter NameChecker(Records, OS);
NameChecker.Emit();
}
+
+void clang::EmitClangOpenCLBuiltinTests(RecordKeeper &Records,
+ raw_ostream &OS) {
+ OpenCLBuiltinTestEmitter TestFileGenerator(Records, OS);
+ TestFileGenerator.emit();
+}
diff --git a/clang/utils/TableGen/MveEmitter.cpp b/clang/utils/TableGen/MveEmitter.cpp
index e9ae08ac4c05..091af2dc52a1 100644
--- a/clang/utils/TableGen/MveEmitter.cpp
+++ b/clang/utils/TableGen/MveEmitter.cpp
@@ -1272,6 +1272,13 @@ Result::Ptr EmitterBase::getCodeForDagArg(DagInit *D, unsigned ArgNum,
return it->second;
}
+ // Sometimes the Arg is a bit. Prior to multiclass template argument
+ // checking, integers would sneak through the bit declaration,
+ // but now they really are bits.
+ if (auto *BI = dyn_cast<BitInit>(Arg))
+ return std::make_shared<IntLiteralResult>(getScalarType("u32"),
+ BI->getValue());
+
if (auto *II = dyn_cast<IntInit>(Arg))
return std::make_shared<IntLiteralResult>(getScalarType("u32"),
II->getValue());
@@ -1287,7 +1294,11 @@ Result::Ptr EmitterBase::getCodeForDagArg(DagInit *D, unsigned ArgNum,
}
}
- PrintFatalError("bad dag argument type for code generation");
+ PrintError("bad DAG argument type for code generation");
+ PrintNote("DAG: " + D->getAsString());
+ if (TypedInit *Typed = dyn_cast<TypedInit>(Arg))
+ PrintNote("argument type: " + Typed->getType()->getAsString());
+ PrintFatalNote("argument number " + Twine(ArgNum) + ": " + Arg->getAsString());
}
Result::Ptr EmitterBase::getCodeForArg(unsigned ArgNum, const Type *ArgType,
diff --git a/clang/utils/TableGen/NeonEmitter.cpp b/clang/utils/TableGen/NeonEmitter.cpp
index ba952f220037..f0da1a7d2f4e 100644
--- a/clang/utils/TableGen/NeonEmitter.cpp
+++ b/clang/utils/TableGen/NeonEmitter.cpp
@@ -2115,7 +2115,11 @@ void NeonEmitter::genIntrinsicRangeCheckCode(raw_ostream &OS,
std::string LowerBound, UpperBound;
Record *R = Def->getRecord();
- if (R->getValueAsBit("isVCVT_N")) {
+ if (R->getValueAsBit("isVXAR")) {
+ //VXAR takes an immediate in the range [0, 63]
+ LowerBound = "0";
+ UpperBound = "63";
+ } else if (R->getValueAsBit("isVCVT_N")) {
// VCVT between floating- and fixed-point values takes an immediate
// in the range [1, 32) for f32 or [1, 64) for f64 or [1, 16) for f16.
LowerBound = "1";
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
new file mode 100644
index 000000000000..24f2250c9ae0
--- /dev/null
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -0,0 +1,1269 @@
+//===- RISCVVEmitter.cpp - Generate riscv_vector.h for use with clang -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting riscv_vector.h which
+// includes a declaration and definition of each intrinsic functions specified
+// in https://github.com/riscv/rvv-intrinsic-doc.
+//
+// See also the documentation in include/clang/Basic/riscv_vector.td.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+#include <numeric>
+
+using namespace llvm;
+using BasicType = char;
+using VScaleVal = Optional<unsigned>;
+
+namespace {
+
+// Exponential LMUL
+struct LMULType {
+ int Log2LMUL;
+ LMULType(int Log2LMUL);
+ // Return the C/C++ string representation of LMUL
+ std::string str() const;
+ Optional<unsigned> getScale(unsigned ElementBitwidth) const;
+ void MulLog2LMUL(int Log2LMUL);
+ LMULType &operator*=(uint32_t RHS);
+};
+
+// This class is compact representation of a valid and invalid RVVType.
+class RVVType {
+ enum ScalarTypeKind : uint32_t {
+ Void,
+ Size_t,
+ Ptrdiff_t,
+ UnsignedLong,
+ SignedLong,
+ Boolean,
+ SignedInteger,
+ UnsignedInteger,
+ Float,
+ Invalid,
+ };
+ BasicType BT;
+ ScalarTypeKind ScalarType = Invalid;
+ LMULType LMUL;
+ bool IsPointer = false;
+ // IsConstant indices are "int", but have the constant expression.
+ bool IsImmediate = false;
+ // Const qualifier for pointer to const object or object of const type.
+ bool IsConstant = false;
+ unsigned ElementBitwidth = 0;
+ VScaleVal Scale = 0;
+ bool Valid;
+
+ std::string BuiltinStr;
+ std::string ClangBuiltinStr;
+ std::string Str;
+ std::string ShortStr;
+
+public:
+ RVVType() : RVVType(BasicType(), 0, StringRef()) {}
+ RVVType(BasicType BT, int Log2LMUL, StringRef prototype);
+
+ // Return the string representation of a type, which is an encoded string for
+ // passing to the BUILTIN() macro in Builtins.def.
+ const std::string &getBuiltinStr() const { return BuiltinStr; }
+
+ // Return the clang buitlin type for RVV vector type which are used in the
+ // riscv_vector.h header file.
+ const std::string &getClangBuiltinStr() const { return ClangBuiltinStr; }
+
+ // Return the C/C++ string representation of a type for use in the
+ // riscv_vector.h header file.
+ const std::string &getTypeStr() const { return Str; }
+
+ // Return the short name of a type for C/C++ name suffix.
+ const std::string &getShortStr() {
+ // Not all types are used in short name, so compute the short name by
+ // demanded.
+ if (ShortStr.empty())
+ initShortStr();
+ return ShortStr;
+ }
+
+ bool isValid() const { return Valid; }
+ bool isScalar() const { return Scale.hasValue() && Scale.getValue() == 0; }
+ bool isVector() const { return Scale.hasValue() && Scale.getValue() != 0; }
+ bool isFloat() const { return ScalarType == ScalarTypeKind::Float; }
+ bool isSignedInteger() const {
+ return ScalarType == ScalarTypeKind::SignedInteger;
+ }
+ bool isFloatVector(unsigned Width) const {
+ return isVector() && isFloat() && ElementBitwidth == Width;
+ }
+ bool isFloat(unsigned Width) const {
+ return isFloat() && ElementBitwidth == Width;
+ }
+
+private:
+ // Verify RVV vector type and set Valid.
+ bool verifyType() const;
+
+ // Creates a type based on basic types of TypeRange
+ void applyBasicType();
+
+ // Applies a prototype modifier to the current type. The result maybe an
+ // invalid type.
+ void applyModifier(StringRef prototype);
+
+ // Compute and record a string for legal type.
+ void initBuiltinStr();
+ // Compute and record a builtin RVV vector type string.
+ void initClangBuiltinStr();
+ // Compute and record a type string for used in the header.
+ void initTypeStr();
+ // Compute and record a short name of a type for C/C++ name suffix.
+ void initShortStr();
+};
+
+using RVVTypePtr = RVVType *;
+using RVVTypes = std::vector<RVVTypePtr>;
+
+enum RISCVExtension : uint8_t {
+ Basic = 0,
+ F = 1 << 1,
+ D = 1 << 2,
+ Zfh = 1 << 3,
+ Zvamo = 1 << 4,
+ Zvlsseg = 1 << 5,
+};
+
+// TODO refactor RVVIntrinsic class design after support all intrinsic
+// combination. This represents an instantiation of an intrinsic with a
+// particular type and prototype
+class RVVIntrinsic {
+
+private:
+ std::string Name; // Builtin name
+ std::string MangledName;
+ std::string IRName;
+ bool HasSideEffects;
+ bool IsMask;
+ bool HasMaskedOffOperand;
+ bool HasVL;
+ bool HasNoMaskedOverloaded;
+ bool HasAutoDef; // There is automiatic definition in header
+ std::string ManualCodegen;
+ RVVTypePtr OutputType; // Builtin output type
+ RVVTypes InputTypes; // Builtin input types
+ // The types we use to obtain the specific LLVM intrinsic. They are index of
+ // InputTypes. -1 means the return type.
+ std::vector<int64_t> IntrinsicTypes;
+ uint8_t RISCVExtensions = 0;
+ unsigned NF = 1;
+
+public:
+ RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
+ StringRef MangledSuffix, StringRef IRName, bool HasSideEffects,
+ bool IsMask, bool HasMaskedOffOperand, bool HasVL,
+ bool HasNoMaskedOverloaded, bool HasAutoDef,
+ StringRef ManualCodegen, const RVVTypes &Types,
+ const std::vector<int64_t> &IntrinsicTypes,
+ StringRef RequiredExtension, unsigned NF);
+ ~RVVIntrinsic() = default;
+
+ StringRef getName() const { return Name; }
+ StringRef getMangledName() const { return MangledName; }
+ bool hasSideEffects() const { return HasSideEffects; }
+ bool hasMaskedOffOperand() const { return HasMaskedOffOperand; }
+ bool hasVL() const { return HasVL; }
+ bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; }
+ bool hasManualCodegen() const { return !ManualCodegen.empty(); }
+ bool hasAutoDef() const { return HasAutoDef; }
+ bool isMask() const { return IsMask; }
+ StringRef getIRName() const { return IRName; }
+ StringRef getManualCodegen() const { return ManualCodegen; }
+ uint8_t getRISCVExtensions() const { return RISCVExtensions; }
+ unsigned getNF() const { return NF; }
+
+ // Return the type string for a BUILTIN() macro in Builtins.def.
+ std::string getBuiltinTypeStr() const;
+
+ // Emit the code block for switch body in EmitRISCVBuiltinExpr, it should
+ // init the RVVIntrinsic ID and IntrinsicTypes.
+ void emitCodeGenSwitchBody(raw_ostream &o) const;
+
+ // Emit the macros for mapping C/C++ intrinsic function to builtin functions.
+ void emitIntrinsicMacro(raw_ostream &o) const;
+
+ // Emit the mangled function definition.
+ void emitMangledFuncDef(raw_ostream &o) const;
+};
+
+class RVVEmitter {
+private:
+ RecordKeeper &Records;
+ std::string HeaderCode;
+ // Concat BasicType, LMUL and Proto as key
+ StringMap<RVVType> LegalTypes;
+ StringSet<> IllegalTypes;
+
+public:
+ RVVEmitter(RecordKeeper &R) : Records(R) {}
+
+ /// Emit riscv_vector.h
+ void createHeader(raw_ostream &o);
+
+ /// Emit all the __builtin prototypes and code needed by Sema.
+ void createBuiltins(raw_ostream &o);
+
+ /// Emit all the information needed to map builtin -> LLVM IR intrinsic.
+ void createCodeGen(raw_ostream &o);
+
+ std::string getSuffixStr(char Type, int Log2LMUL, StringRef Prototypes);
+
+private:
+ /// Create all intrinsics and add them to \p Out
+ void createRVVIntrinsics(std::vector<std::unique_ptr<RVVIntrinsic>> &Out);
+ /// Compute output and input types by applying different config (basic type
+ /// and LMUL with type transformers). It also record result of type in legal
+ /// or illegal set to avoid compute the same config again. The result maybe
+ /// have illegal RVVType.
+ Optional<RVVTypes> computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
+ ArrayRef<std::string> PrototypeSeq);
+ Optional<RVVTypePtr> computeType(BasicType BT, int Log2LMUL, StringRef Proto);
+
+ /// Emit Acrh predecessor definitions and body, assume the element of Defs are
+ /// sorted by extension.
+ void emitArchMacroAndBody(
+ std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &o,
+ std::function<void(raw_ostream &, const RVVIntrinsic &)>);
+
+ // Emit the architecture preprocessor definitions. Return true when emits
+ // non-empty string.
+ bool emitExtDefStr(uint8_t Extensions, raw_ostream &o);
+ // Slice Prototypes string into sub prototype string and process each sub
+ // prototype string individually in the Handler.
+ void parsePrototypes(StringRef Prototypes,
+ std::function<void(StringRef)> Handler);
+};
+
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// Type implementation
+//===----------------------------------------------------------------------===//
+
+LMULType::LMULType(int NewLog2LMUL) {
+ // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3
+ assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!");
+ Log2LMUL = NewLog2LMUL;
+}
+
+std::string LMULType::str() const {
+ if (Log2LMUL < 0)
+ return "mf" + utostr(1ULL << (-Log2LMUL));
+ return "m" + utostr(1ULL << Log2LMUL);
+}
+
+VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
+ int Log2ScaleResult = 0;
+ switch (ElementBitwidth) {
+ default:
+ break;
+ case 8:
+ Log2ScaleResult = Log2LMUL + 3;
+ break;
+ case 16:
+ Log2ScaleResult = Log2LMUL + 2;
+ break;
+ case 32:
+ Log2ScaleResult = Log2LMUL + 1;
+ break;
+ case 64:
+ Log2ScaleResult = Log2LMUL;
+ break;
+ }
+ // Illegal vscale result would be less than 1
+ if (Log2ScaleResult < 0)
+ return None;
+ return 1 << Log2ScaleResult;
+}
+
+void LMULType::MulLog2LMUL(int log2LMUL) { Log2LMUL += log2LMUL; }
+
+LMULType &LMULType::operator*=(uint32_t RHS) {
+ assert(isPowerOf2_32(RHS));
+ this->Log2LMUL = this->Log2LMUL + Log2_32(RHS);
+ return *this;
+}
+
+RVVType::RVVType(BasicType BT, int Log2LMUL, StringRef prototype)
+ : BT(BT), LMUL(LMULType(Log2LMUL)) {
+ applyBasicType();
+ applyModifier(prototype);
+ Valid = verifyType();
+ if (Valid) {
+ initBuiltinStr();
+ initTypeStr();
+ if (isVector()) {
+ initClangBuiltinStr();
+ }
+ }
+}
+
+// clang-format off
+// boolean type are encoded the ratio of n (SEW/LMUL)
+// SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64
+// c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t
+// IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1
+
+// type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8
+// -------- |------ | -------- | ------- | ------- | -------- | -------- | --------
+// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64
+// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32
+// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16
+// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8
+// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64
+// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32
+// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16
+// clang-format on
+
+bool RVVType::verifyType() const {
+ if (ScalarType == Invalid)
+ return false;
+ if (isScalar())
+ return true;
+ if (!Scale.hasValue())
+ return false;
+ if (isFloat() && ElementBitwidth == 8)
+ return false;
+ unsigned V = Scale.getValue();
+ switch (ElementBitwidth) {
+ case 1:
+ case 8:
+ // Check Scale is 1,2,4,8,16,32,64
+ return (V <= 64 && isPowerOf2_32(V));
+ case 16:
+ // Check Scale is 1,2,4,8,16,32
+ return (V <= 32 && isPowerOf2_32(V));
+ case 32:
+ // Check Scale is 1,2,4,8,16
+ return (V <= 16 && isPowerOf2_32(V));
+ case 64:
+ // Check Scale is 1,2,4,8
+ return (V <= 8 && isPowerOf2_32(V));
+ }
+ return false;
+}
+
+void RVVType::initBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ BuiltinStr = "v";
+ return;
+ case ScalarTypeKind::Size_t:
+ BuiltinStr = "z";
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ BuiltinStr = "Y";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ BuiltinStr = "ULi";
+ return;
+ case ScalarTypeKind::SignedLong:
+ BuiltinStr = "Li";
+ return;
+ case ScalarTypeKind::Boolean:
+ assert(ElementBitwidth == 1);
+ BuiltinStr += "b";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ case ScalarTypeKind::UnsignedInteger:
+ switch (ElementBitwidth) {
+ case 8:
+ BuiltinStr += "c";
+ break;
+ case 16:
+ BuiltinStr += "s";
+ break;
+ case 32:
+ BuiltinStr += "i";
+ break;
+ case 64:
+ BuiltinStr += "Wi";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ if (isSignedInteger())
+ BuiltinStr = "S" + BuiltinStr;
+ else
+ BuiltinStr = "U" + BuiltinStr;
+ break;
+ case ScalarTypeKind::Float:
+ switch (ElementBitwidth) {
+ case 16:
+ BuiltinStr += "x";
+ break;
+ case 32:
+ BuiltinStr += "f";
+ break;
+ case 64:
+ BuiltinStr += "d";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (isScalar()) {
+ if (IsConstant)
+ BuiltinStr += "C";
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ }
+ BuiltinStr = "q" + utostr(Scale.getValue()) + BuiltinStr;
+ // Pointer to vector types. Defined for Zvlsseg load intrinsics.
+ // Zvlsseg load intrinsics have pointer type arguments to store the loaded
+ // vector values.
+ if (IsPointer)
+ BuiltinStr += "*";
+}
+
+void RVVType::initClangBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ assert(isVector() && "Handle Vector type only");
+
+ ClangBuiltinStr = "__rvv_";
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ ClangBuiltinStr += "bool" + utostr(64 / Scale.getValue()) + "_t";
+ return;
+ case ScalarTypeKind::Float:
+ ClangBuiltinStr += "float";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ClangBuiltinStr += "int";
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ClangBuiltinStr += "uint";
+ break;
+ default:
+ llvm_unreachable("ScalarTypeKind is invalid");
+ }
+ ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
+}
+
+void RVVType::initTypeStr() {
+ assert(isValid() && "RVVType is invalid");
+
+ if (IsConstant)
+ Str += "const ";
+
+ auto getTypeString = [&](StringRef TypeStr) {
+ if (isScalar())
+ return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
+ return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
+ .str();
+ };
+
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ Str = "void";
+ return;
+ case ScalarTypeKind::Size_t:
+ Str = "size_t";
+ if (IsPointer)
+ Str += " *";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ Str = "ptrdiff_t";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ Str = "unsigned long";
+ return;
+ case ScalarTypeKind::SignedLong:
+ Str = "long";
+ return;
+ case ScalarTypeKind::Boolean:
+ if (isScalar())
+ Str += "bool";
+ else
+ // Vector bool is special case, the formulate is
+ // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1
+ Str += "vbool" + utostr(64 / Scale.getValue()) + "_t";
+ break;
+ case ScalarTypeKind::Float:
+ if (isScalar()) {
+ if (ElementBitwidth == 64)
+ Str += "double";
+ else if (ElementBitwidth == 32)
+ Str += "float";
+ else if (ElementBitwidth == 16)
+ Str += "_Float16";
+ else
+ llvm_unreachable("Unhandled floating type.");
+ } else
+ Str += getTypeString("float");
+ break;
+ case ScalarTypeKind::SignedInteger:
+ Str += getTypeString("int");
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ Str += getTypeString("uint");
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsPointer)
+ Str += " *";
+}
+
+void RVVType::initShortStr() {
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ assert(isVector());
+ ShortStr = "b" + utostr(64 / Scale.getValue());
+ return;
+ case ScalarTypeKind::Float:
+ ShortStr = "f" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ShortStr = "i" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ShortStr = "u" + utostr(ElementBitwidth);
+ break;
+ default:
+ PrintFatalError("Unhandled case!");
+ }
+ if (isVector())
+ ShortStr += LMUL.str();
+}
+
+void RVVType::applyBasicType() {
+ switch (BT) {
+ case 'c':
+ ElementBitwidth = 8;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case 's':
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case 'i':
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case 'l':
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case 'x':
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case 'f':
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case 'd':
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ default:
+ PrintFatalError("Unhandled type code!");
+ }
+ assert(ElementBitwidth != 0 && "Bad element bitwidth!");
+}
+
+void RVVType::applyModifier(StringRef Transformer) {
+ if (Transformer.empty())
+ return;
+ // Handle primitive type transformer
+ auto PType = Transformer.back();
+ switch (PType) {
+ case 'e':
+ Scale = 0;
+ break;
+ case 'v':
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case 'w':
+ ElementBitwidth *= 2;
+ LMUL *= 2;
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case 'q':
+ ElementBitwidth *= 4;
+ LMUL *= 4;
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case 'o':
+ ElementBitwidth *= 8;
+ LMUL *= 8;
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case 'm':
+ ScalarType = ScalarTypeKind::Boolean;
+ Scale = LMUL.getScale(ElementBitwidth);
+ ElementBitwidth = 1;
+ break;
+ case '0':
+ ScalarType = ScalarTypeKind::Void;
+ break;
+ case 'z':
+ ScalarType = ScalarTypeKind::Size_t;
+ break;
+ case 't':
+ ScalarType = ScalarTypeKind::Ptrdiff_t;
+ break;
+ case 'u':
+ ScalarType = ScalarTypeKind::UnsignedLong;
+ break;
+ case 'l':
+ ScalarType = ScalarTypeKind::SignedLong;
+ break;
+ default:
+ PrintFatalError("Illegal primitive type transformers!");
+ }
+ Transformer = Transformer.drop_back();
+
+ // Extract and compute complex type transformer. It can only appear one time.
+ if (Transformer.startswith("(")) {
+ size_t Idx = Transformer.find(')');
+ assert(Idx != StringRef::npos);
+ StringRef ComplexType = Transformer.slice(1, Idx);
+ Transformer = Transformer.drop_front(Idx + 1);
+ assert(Transformer.find('(') == StringRef::npos &&
+ "Only allow one complex type transformer");
+
+ auto UpdateAndCheckComplexProto = [&]() {
+ Scale = LMUL.getScale(ElementBitwidth);
+ const StringRef VectorPrototypes("vwqom");
+ if (!VectorPrototypes.contains(PType))
+ PrintFatalError("Complex type transformer only supports vector type!");
+ if (Transformer.find_first_of("PCKWS") != StringRef::npos)
+ PrintFatalError(
+ "Illegal type transformer for Complex type transformer");
+ };
+ auto ComputeFixedLog2LMUL =
+ [&](StringRef Value,
+ std::function<bool(const int32_t &, const int32_t &)> Compare) {
+ int32_t Log2LMUL;
+ Value.getAsInteger(10, Log2LMUL);
+ if (!Compare(Log2LMUL, LMUL.Log2LMUL)) {
+ ScalarType = Invalid;
+ return false;
+ }
+ // Update new LMUL
+ LMUL = LMULType(Log2LMUL);
+ UpdateAndCheckComplexProto();
+ return true;
+ };
+ auto ComplexTT = ComplexType.split(":");
+ if (ComplexTT.first == "Log2EEW") {
+ uint32_t Log2EEW;
+ ComplexTT.second.getAsInteger(10, Log2EEW);
+ // update new elmul = (eew/sew) * lmul
+ LMUL.MulLog2LMUL(Log2EEW - Log2_32(ElementBitwidth));
+ // update new eew
+ ElementBitwidth = 1 << Log2EEW;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ UpdateAndCheckComplexProto();
+ } else if (ComplexTT.first == "FixedSEW") {
+ uint32_t NewSEW;
+ ComplexTT.second.getAsInteger(10, NewSEW);
+ // Set invalid type if src and dst SEW are same.
+ if (ElementBitwidth == NewSEW) {
+ ScalarType = Invalid;
+ return;
+ }
+ // Update new SEW
+ ElementBitwidth = NewSEW;
+ UpdateAndCheckComplexProto();
+ } else if (ComplexTT.first == "LFixedLog2LMUL") {
+ // New LMUL should be larger than old
+ if (!ComputeFixedLog2LMUL(ComplexTT.second, std::greater<int32_t>()))
+ return;
+ } else if (ComplexTT.first == "SFixedLog2LMUL") {
+ // New LMUL should be smaller than old
+ if (!ComputeFixedLog2LMUL(ComplexTT.second, std::less<int32_t>()))
+ return;
+ } else {
+ PrintFatalError("Illegal complex type transformers!");
+ }
+ }
+
+ // Compute the remain type transformers
+ for (char I : Transformer) {
+ switch (I) {
+ case 'P':
+ if (IsConstant)
+ PrintFatalError("'P' transformer cannot be used after 'C'");
+ if (IsPointer)
+ PrintFatalError("'P' transformer cannot be used twice");
+ IsPointer = true;
+ break;
+ case 'C':
+ if (IsConstant)
+ PrintFatalError("'C' transformer cannot be used twice");
+ IsConstant = true;
+ break;
+ case 'K':
+ IsImmediate = true;
+ break;
+ case 'U':
+ ScalarType = ScalarTypeKind::UnsignedInteger;
+ break;
+ case 'I':
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case 'F':
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case 'S':
+ LMUL = LMULType(0);
+ // Update ElementBitwidth need to update Scale too.
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ default:
+ PrintFatalError("Illegal non-primitive type transformer!");
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RVVIntrinsic implementation
+//===----------------------------------------------------------------------===//
+RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
+ StringRef NewMangledName, StringRef MangledSuffix,
+ StringRef IRName, bool HasSideEffects, bool IsMask,
+ bool HasMaskedOffOperand, bool HasVL,
+ bool HasNoMaskedOverloaded, bool HasAutoDef,
+ StringRef ManualCodegen, const RVVTypes &OutInTypes,
+ const std::vector<int64_t> &NewIntrinsicTypes,
+ StringRef RequiredExtension, unsigned NF)
+ : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask),
+ HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
+ HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
+ ManualCodegen(ManualCodegen.str()), NF(NF) {
+
+ // Init Name and MangledName
+ Name = NewName.str();
+ if (NewMangledName.empty())
+ MangledName = NewName.split("_").first.str();
+ else
+ MangledName = NewMangledName.str();
+ if (!Suffix.empty())
+ Name += "_" + Suffix.str();
+ if (!MangledSuffix.empty())
+ MangledName += "_" + MangledSuffix.str();
+ if (IsMask) {
+ Name += "_m";
+ }
+ // Init RISC-V extensions
+ for (const auto &T : OutInTypes) {
+ if (T->isFloatVector(16) || T->isFloat(16))
+ RISCVExtensions |= RISCVExtension::Zfh;
+ else if (T->isFloatVector(32) || T->isFloat(32))
+ RISCVExtensions |= RISCVExtension::F;
+ else if (T->isFloatVector(64) || T->isFloat(64))
+ RISCVExtensions |= RISCVExtension::D;
+ }
+ if (RequiredExtension == "Zvamo")
+ RISCVExtensions |= RISCVExtension::Zvamo;
+ if (RequiredExtension == "Zvlsseg")
+ RISCVExtensions |= RISCVExtension::Zvlsseg;
+
+ // Init OutputType and InputTypes
+ OutputType = OutInTypes[0];
+ InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
+
+ // IntrinsicTypes is nonmasked version index. Need to update it
+ // if there is maskedoff operand (It is always in first operand).
+ IntrinsicTypes = NewIntrinsicTypes;
+ if (IsMask && HasMaskedOffOperand) {
+ for (auto &I : IntrinsicTypes) {
+ if (I >= 0)
+ I += NF;
+ }
+ }
+}
+
+std::string RVVIntrinsic::getBuiltinTypeStr() const {
+ std::string S;
+ S += OutputType->getBuiltinStr();
+ for (const auto &T : InputTypes) {
+ S += T->getBuiltinStr();
+ }
+ return S;
+}
+
+void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
+ if (!getIRName().empty())
+ OS << " ID = Intrinsic::riscv_" + getIRName() + ";\n";
+ if (NF >= 2)
+ OS << " NF = " + utostr(getNF()) + ";\n";
+ if (hasManualCodegen()) {
+ OS << ManualCodegen;
+ OS << "break;\n";
+ return;
+ }
+
+ if (isMask()) {
+ if (hasVL()) {
+ OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);\n";
+ } else {
+ OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
+ }
+ }
+
+ OS << " IntrinsicTypes = {";
+ ListSeparator LS;
+ for (const auto &Idx : IntrinsicTypes) {
+ if (Idx == -1)
+ OS << LS << "ResultType";
+ else
+ OS << LS << "Ops[" << Idx << "]->getType()";
+ }
+
+ // VL could be i64 or i32, need to encode it in IntrinsicTypes. VL is
+ // always last operand.
+ if (hasVL())
+ OS << ", Ops.back()->getType()";
+ OS << "};\n";
+ OS << " break;\n";
+}
+
+void RVVIntrinsic::emitIntrinsicMacro(raw_ostream &OS) const {
+ OS << "#define " << getName() << "(";
+ if (!InputTypes.empty()) {
+ ListSeparator LS;
+ for (unsigned i = 0, e = InputTypes.size(); i != e; ++i)
+ OS << LS << "op" << i;
+ }
+ OS << ") \\\n";
+ OS << "__builtin_rvv_" << getName() << "(";
+ if (!InputTypes.empty()) {
+ ListSeparator LS;
+ for (unsigned i = 0, e = InputTypes.size(); i != e; ++i)
+ OS << LS << "(" << InputTypes[i]->getTypeStr() << ")(op" << i << ")";
+ }
+ OS << ")\n";
+}
+
+void RVVIntrinsic::emitMangledFuncDef(raw_ostream &OS) const {
+ OS << "__attribute__((clang_builtin_alias(";
+ OS << "__builtin_rvv_" << getName() << ")))\n";
+ OS << OutputType->getTypeStr() << " " << getMangledName() << "(";
+ // Emit function arguments
+ if (!InputTypes.empty()) {
+ ListSeparator LS;
+ for (unsigned i = 0; i < InputTypes.size(); ++i)
+ OS << LS << InputTypes[i]->getTypeStr() << " op" << i;
+ }
+ OS << ");\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// RVVEmitter implementation
+//===----------------------------------------------------------------------===//
+void RVVEmitter::createHeader(raw_ostream &OS) {
+
+ OS << "/*===---- riscv_vector.h - RISC-V V-extension RVVIntrinsics "
+ "-------------------===\n"
+ " *\n"
+ " *\n"
+ " * Part of the LLVM Project, under the Apache License v2.0 with LLVM "
+ "Exceptions.\n"
+ " * See https://llvm.org/LICENSE.txt for license information.\n"
+ " * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception\n"
+ " *\n"
+ " *===-----------------------------------------------------------------"
+ "------===\n"
+ " */\n\n";
+
+ OS << "#ifndef __RISCV_VECTOR_H\n";
+ OS << "#define __RISCV_VECTOR_H\n\n";
+
+ OS << "#include <stdint.h>\n";
+ OS << "#include <stddef.h>\n\n";
+
+ OS << "#ifndef __riscv_vector\n";
+ OS << "#error \"Vector intrinsics require the vector extension.\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifdef __cplusplus\n";
+ OS << "extern \"C\" {\n";
+ OS << "#endif\n\n";
+
+ std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+ createRVVIntrinsics(Defs);
+
+ // Print header code
+ if (!HeaderCode.empty()) {
+ OS << HeaderCode;
+ }
+
+ auto printType = [&](auto T) {
+ OS << "typedef " << T->getClangBuiltinStr() << " " << T->getTypeStr()
+ << ";\n";
+ };
+
+ constexpr int Log2LMULs[] = {-3, -2, -1, 0, 1, 2, 3};
+ // Print RVV boolean types.
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = computeType('c', Log2LMUL, "m");
+ if (T.hasValue())
+ printType(T.getValue());
+ }
+ // Print RVV int/float types.
+ for (char I : StringRef("csil")) {
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = computeType(I, Log2LMUL, "v");
+ if (T.hasValue()) {
+ printType(T.getValue());
+ auto UT = computeType(I, Log2LMUL, "Uv");
+ printType(UT.getValue());
+ }
+ }
+ }
+ OS << "#if defined(__riscv_zfh)\n";
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = computeType('x', Log2LMUL, "v");
+ if (T.hasValue())
+ printType(T.getValue());
+ }
+ OS << "#endif\n";
+
+ OS << "#if defined(__riscv_f)\n";
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = computeType('f', Log2LMUL, "v");
+ if (T.hasValue())
+ printType(T.getValue());
+ }
+ OS << "#endif\n";
+
+ OS << "#if defined(__riscv_d)\n";
+ for (int Log2LMUL : Log2LMULs) {
+ auto T = computeType('d', Log2LMUL, "v");
+ if (T.hasValue())
+ printType(T.getValue());
+ }
+ OS << "#endif\n\n";
+
+ // The same extension include in the same arch guard marco.
+ std::stable_sort(Defs.begin(), Defs.end(),
+ [](const std::unique_ptr<RVVIntrinsic> &A,
+ const std::unique_ptr<RVVIntrinsic> &B) {
+ return A->getRISCVExtensions() < B->getRISCVExtensions();
+ });
+
+ // Print intrinsic functions with macro
+ emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
+ Inst.emitIntrinsicMacro(OS);
+ });
+
+ OS << "#define __riscv_v_intrinsic_overloading 1\n";
+
+ // Print Overloaded APIs
+ OS << "#define __rvv_overloaded static inline "
+ "__attribute__((__always_inline__, __nodebug__, __overloadable__))\n";
+
+ emitArchMacroAndBody(Defs, OS, [](raw_ostream &OS, const RVVIntrinsic &Inst) {
+ if (!Inst.isMask() && !Inst.hasNoMaskedOverloaded())
+ return;
+ OS << "__rvv_overloaded ";
+ Inst.emitMangledFuncDef(OS);
+ });
+
+ OS << "\n#ifdef __cplusplus\n";
+ OS << "}\n";
+ OS << "#endif // __riscv_vector\n";
+ OS << "#endif // __RISCV_VECTOR_H\n";
+}
+
+void RVVEmitter::createBuiltins(raw_ostream &OS) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+ createRVVIntrinsics(Defs);
+
+ OS << "#if defined(TARGET_BUILTIN) && !defined(RISCVV_BUILTIN)\n";
+ OS << "#define RISCVV_BUILTIN(ID, TYPE, ATTRS) TARGET_BUILTIN(ID, TYPE, "
+ "ATTRS, \"experimental-v\")\n";
+ OS << "#endif\n";
+ for (auto &Def : Defs) {
+ OS << "RISCVV_BUILTIN(__builtin_rvv_" << Def->getName() << ",\""
+ << Def->getBuiltinTypeStr() << "\", ";
+ if (!Def->hasSideEffects())
+ OS << "\"n\")\n";
+ else
+ OS << "\"\")\n";
+ }
+ OS << "#undef RISCVV_BUILTIN\n";
+}
+
+void RVVEmitter::createCodeGen(raw_ostream &OS) {
+ std::vector<std::unique_ptr<RVVIntrinsic>> Defs;
+ createRVVIntrinsics(Defs);
+ // IR name could be empty, use the stable sort preserves the relative order.
+ std::stable_sort(Defs.begin(), Defs.end(),
+ [](const std::unique_ptr<RVVIntrinsic> &A,
+ const std::unique_ptr<RVVIntrinsic> &B) {
+ return A->getIRName() < B->getIRName();
+ });
+ // Print switch body when the ir name or ManualCodegen changes from previous
+ // iteration.
+ RVVIntrinsic *PrevDef = Defs.begin()->get();
+ for (auto &Def : Defs) {
+ StringRef CurIRName = Def->getIRName();
+ if (CurIRName != PrevDef->getIRName() ||
+ (Def->getManualCodegen() != PrevDef->getManualCodegen())) {
+ PrevDef->emitCodeGenSwitchBody(OS);
+ }
+ PrevDef = Def.get();
+ OS << "case RISCV::BI__builtin_rvv_" << Def->getName() << ":\n";
+ }
+ Defs.back()->emitCodeGenSwitchBody(OS);
+ OS << "\n";
+}
+
+void RVVEmitter::parsePrototypes(StringRef Prototypes,
+ std::function<void(StringRef)> Handler) {
+ const StringRef Primaries("evwqom0ztul");
+ while (!Prototypes.empty()) {
+ size_t Idx = 0;
+ // Skip over complex prototype because it could contain primitive type
+ // character.
+ if (Prototypes[0] == '(')
+ Idx = Prototypes.find_first_of(')');
+ Idx = Prototypes.find_first_of(Primaries, Idx);
+ assert(Idx != StringRef::npos);
+ Handler(Prototypes.slice(0, Idx + 1));
+ Prototypes = Prototypes.drop_front(Idx + 1);
+ }
+}
+
+std::string RVVEmitter::getSuffixStr(char Type, int Log2LMUL,
+ StringRef Prototypes) {
+ SmallVector<std::string> SuffixStrs;
+ parsePrototypes(Prototypes, [&](StringRef Proto) {
+ auto T = computeType(Type, Log2LMUL, Proto);
+ SuffixStrs.push_back(T.getValue()->getShortStr());
+ });
+ return join(SuffixStrs, "_");
+}
+
+void RVVEmitter::createRVVIntrinsics(
+ std::vector<std::unique_ptr<RVVIntrinsic>> &Out) {
+ std::vector<Record *> RV = Records.getAllDerivedDefinitions("RVVBuiltin");
+ for (auto *R : RV) {
+ StringRef Name = R->getValueAsString("Name");
+ StringRef SuffixProto = R->getValueAsString("Suffix");
+ StringRef MangledName = R->getValueAsString("MangledName");
+ StringRef MangledSuffixProto = R->getValueAsString("MangledSuffix");
+ StringRef Prototypes = R->getValueAsString("Prototype");
+ StringRef TypeRange = R->getValueAsString("TypeRange");
+ bool HasMask = R->getValueAsBit("HasMask");
+ bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
+ bool HasVL = R->getValueAsBit("HasVL");
+ bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded");
+ bool HasSideEffects = R->getValueAsBit("HasSideEffects");
+ std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
+ StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
+ StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask");
+ std::vector<int64_t> IntrinsicTypes =
+ R->getValueAsListOfInts("IntrinsicTypes");
+ StringRef RequiredExtension = R->getValueAsString("RequiredExtension");
+ StringRef IRName = R->getValueAsString("IRName");
+ StringRef IRNameMask = R->getValueAsString("IRNameMask");
+ unsigned NF = R->getValueAsInt("NF");
+
+ StringRef HeaderCodeStr = R->getValueAsString("HeaderCode");
+ bool HasAutoDef = HeaderCodeStr.empty();
+ if (!HeaderCodeStr.empty()) {
+ HeaderCode += HeaderCodeStr.str();
+ }
+ // Parse prototype and create a list of primitive type with transformers
+ // (operand) in ProtoSeq. ProtoSeq[0] is output operand.
+ SmallVector<std::string> ProtoSeq;
+ parsePrototypes(Prototypes, [&ProtoSeq](StringRef Proto) {
+ ProtoSeq.push_back(Proto.str());
+ });
+
+ // Compute Builtin types
+ SmallVector<std::string> ProtoMaskSeq = ProtoSeq;
+ if (HasMask) {
+ // If HasMaskedOffOperand, insert result type as first input operand.
+ if (HasMaskedOffOperand) {
+ if (NF == 1) {
+ ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, ProtoSeq[0]);
+ } else {
+ // Convert
+ // (void, op0 address, op1 address, ...)
+ // to
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ for (unsigned I = 0; I < NF; ++I)
+ ProtoMaskSeq.insert(
+ ProtoMaskSeq.begin() + NF + 1,
+ ProtoSeq[1].substr(1)); // Use substr(1) to skip '*'
+ }
+ }
+ if (HasMaskedOffOperand && NF > 1) {
+ // Convert
+ // (void, op0 address, op1 address, ..., maskedoff0, maskedoff1, ...)
+ // to
+ // (void, op0 address, op1 address, ..., mask, maskedoff0, maskedoff1,
+ // ...)
+ ProtoMaskSeq.insert(ProtoMaskSeq.begin() + NF + 1, "m");
+ } else {
+ // If HasMask, insert 'm' as first input operand.
+ ProtoMaskSeq.insert(ProtoMaskSeq.begin() + 1, "m");
+ }
+ }
+ // If HasVL, append 'z' to last operand
+ if (HasVL) {
+ ProtoSeq.push_back("z");
+ ProtoMaskSeq.push_back("z");
+ }
+
+ // Create Intrinsics for each type and LMUL.
+ for (char I : TypeRange) {
+ for (int Log2LMUL : Log2LMULList) {
+ Optional<RVVTypes> Types = computeTypes(I, Log2LMUL, NF, ProtoSeq);
+ // Ignored to create new intrinsic if there are any illegal types.
+ if (!Types.hasValue())
+ continue;
+
+ auto SuffixStr = getSuffixStr(I, Log2LMUL, SuffixProto);
+ auto MangledSuffixStr = getSuffixStr(I, Log2LMUL, MangledSuffixProto);
+ // Create a non-mask intrinsic
+ Out.push_back(std::make_unique<RVVIntrinsic>(
+ Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
+ HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false,
+ HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
+ Types.getValue(), IntrinsicTypes, RequiredExtension, NF));
+ if (HasMask) {
+ // Create a mask intrinsic
+ Optional<RVVTypes> MaskTypes =
+ computeTypes(I, Log2LMUL, NF, ProtoMaskSeq);
+ Out.push_back(std::make_unique<RVVIntrinsic>(
+ Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
+ HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL,
+ HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
+ MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF));
+ }
+ } // end for Log2LMULList
+ } // end for TypeRange
+ }
+}
+
+Optional<RVVTypes>
+RVVEmitter::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
+ ArrayRef<std::string> PrototypeSeq) {
+ // LMUL x NF must be less than or equal to 8.
+ if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8)
+ return llvm::None;
+
+ RVVTypes Types;
+ for (const std::string &Proto : PrototypeSeq) {
+ auto T = computeType(BT, Log2LMUL, Proto);
+ if (!T.hasValue())
+ return llvm::None;
+ // Record legal type index
+ Types.push_back(T.getValue());
+ }
+ return Types;
+}
+
+Optional<RVVTypePtr> RVVEmitter::computeType(BasicType BT, int Log2LMUL,
+ StringRef Proto) {
+ std::string Idx = Twine(Twine(BT) + Twine(Log2LMUL) + Proto).str();
+ // Search first
+ auto It = LegalTypes.find(Idx);
+ if (It != LegalTypes.end())
+ return &(It->second);
+ if (IllegalTypes.count(Idx))
+ return llvm::None;
+ // Compute type and record the result.
+ RVVType T(BT, Log2LMUL, Proto);
+ if (T.isValid()) {
+ // Record legal type index and value.
+ LegalTypes.insert({Idx, T});
+ return &(LegalTypes[Idx]);
+ }
+ // Record illegal type index.
+ IllegalTypes.insert(Idx);
+ return llvm::None;
+}
+
+void RVVEmitter::emitArchMacroAndBody(
+ std::vector<std::unique_ptr<RVVIntrinsic>> &Defs, raw_ostream &OS,
+ std::function<void(raw_ostream &, const RVVIntrinsic &)> PrintBody) {
+ uint8_t PrevExt = (*Defs.begin())->getRISCVExtensions();
+ bool NeedEndif = emitExtDefStr(PrevExt, OS);
+ for (auto &Def : Defs) {
+ uint8_t CurExt = Def->getRISCVExtensions();
+ if (CurExt != PrevExt) {
+ if (NeedEndif)
+ OS << "#endif\n\n";
+ NeedEndif = emitExtDefStr(CurExt, OS);
+ PrevExt = CurExt;
+ }
+ if (Def->hasAutoDef())
+ PrintBody(OS, *Def);
+ }
+ if (NeedEndif)
+ OS << "#endif\n\n";
+}
+
+bool RVVEmitter::emitExtDefStr(uint8_t Extents, raw_ostream &OS) {
+ if (Extents == RISCVExtension::Basic)
+ return false;
+ OS << "#if ";
+ ListSeparator LS(" && ");
+ if (Extents & RISCVExtension::F)
+ OS << LS << "defined(__riscv_f)";
+ if (Extents & RISCVExtension::D)
+ OS << LS << "defined(__riscv_d)";
+ if (Extents & RISCVExtension::Zfh)
+ OS << LS << "defined(__riscv_zfh)";
+ if (Extents & RISCVExtension::Zvamo)
+ OS << LS << "defined(__riscv_zvamo)";
+ if (Extents & RISCVExtension::Zvlsseg)
+ OS << LS << "defined(__riscv_zvlsseg)";
+ OS << "\n";
+ return true;
+}
+
+namespace clang {
+void EmitRVVHeader(RecordKeeper &Records, raw_ostream &OS) {
+ RVVEmitter(Records).createHeader(OS);
+}
+
+void EmitRVVBuiltins(RecordKeeper &Records, raw_ostream &OS) {
+ RVVEmitter(Records).createBuiltins(OS);
+}
+
+void EmitRVVBuiltinCG(RecordKeeper &Records, raw_ostream &OS) {
+ RVVEmitter(Records).createCodeGen(OS);
+}
+
+} // End namespace clang
diff --git a/clang/utils/TableGen/SveEmitter.cpp b/clang/utils/TableGen/SveEmitter.cpp
index 0e69600ef861..b2f6ede56522 100644
--- a/clang/utils/TableGen/SveEmitter.cpp
+++ b/clang/utils/TableGen/SveEmitter.cpp
@@ -362,6 +362,9 @@ std::string SVEType::builtin_str() const {
if (isVoid())
return "v";
+ if (isScalarPredicate())
+ return "b";
+
if (isVoidPointer())
S += "v";
else if (!isFloatingPoint())
@@ -916,26 +919,22 @@ std::string Intrinsic::mangleName(ClassKind LocalCK) const {
}
void Intrinsic::emitIntrinsic(raw_ostream &OS) const {
- // Use the preprocessor to
- if (getClassKind() != ClassG || getProto().size() <= 1) {
- OS << "#define " << mangleName(getClassKind())
- << "(...) __builtin_sve_" << mangleName(ClassS)
- << "(__VA_ARGS__)\n";
- } else {
- std::string FullName = mangleName(ClassS);
- std::string ProtoName = mangleName(ClassG);
+ bool IsOverloaded = getClassKind() == ClassG && getProto().size() > 1;
- OS << "__aio __attribute__((__clang_arm_builtin_alias("
- << "__builtin_sve_" << FullName << ")))\n";
+ std::string FullName = mangleName(ClassS);
+ std::string ProtoName = mangleName(getClassKind());
- OS << getTypes()[0].str() << " " << ProtoName << "(";
- for (unsigned I = 0; I < getTypes().size() - 1; ++I) {
- if (I != 0)
- OS << ", ";
- OS << getTypes()[I + 1].str();
- }
- OS << ");\n";
+ OS << (IsOverloaded ? "__aio " : "__ai ")
+ << "__attribute__((__clang_arm_builtin_alias("
+ << "__builtin_sve_" << FullName << ")))\n";
+
+ OS << getTypes()[0].str() << " " << ProtoName << "(";
+ for (unsigned I = 0; I < getTypes().size() - 1; ++I) {
+ if (I != 0)
+ OS << ", ";
+ OS << getTypes()[I + 1].str();
}
+ OS << ");\n";
}
//===----------------------------------------------------------------------===//
@@ -1201,7 +1200,9 @@ void SVEEmitter::createHeader(raw_ostream &OS) {
OS << "};\n\n";
OS << "/* Function attributes */\n";
- OS << "#define __aio static inline __attribute__((__always_inline__, "
+ OS << "#define __ai static __inline__ __attribute__((__always_inline__, "
+ "__nodebug__))\n\n";
+ OS << "#define __aio static __inline__ __attribute__((__always_inline__, "
"__nodebug__, __overloadable__))\n\n";
// Add reinterpret functions.
diff --git a/clang/utils/TableGen/TableGen.cpp b/clang/utils/TableGen/TableGen.cpp
index 1b919a77988b..7fb5d0acc6f3 100644
--- a/clang/utils/TableGen/TableGen.cpp
+++ b/clang/utils/TableGen/TableGen.cpp
@@ -63,6 +63,7 @@ enum ActionType {
GenClangCommentCommandInfo,
GenClangCommentCommandList,
GenClangOpenCLBuiltins,
+ GenClangOpenCLBuiltinTests,
GenArmNeon,
GenArmFP16,
GenArmBF16,
@@ -83,6 +84,9 @@ enum ActionType {
GenArmCdeBuiltinSema,
GenArmCdeBuiltinCG,
GenArmCdeBuiltinAliases,
+ GenRISCVVectorHeader,
+ GenRISCVVectorBuiltins,
+ GenRISCVVectorBuiltinCG,
GenAttrDocs,
GenDiagDocs,
GenOptDocs,
@@ -191,6 +195,8 @@ cl::opt<ActionType> Action(
"documentation comments"),
clEnumValN(GenClangOpenCLBuiltins, "gen-clang-opencl-builtins",
"Generate OpenCL builtin declaration handlers"),
+ clEnumValN(GenClangOpenCLBuiltinTests, "gen-clang-opencl-builtin-tests",
+ "Generate OpenCL builtin declaration tests"),
clEnumValN(GenArmNeon, "gen-arm-neon", "Generate arm_neon.h for clang"),
clEnumValN(GenArmFP16, "gen-arm-fp16", "Generate arm_fp16.h for clang"),
clEnumValN(GenArmBF16, "gen-arm-bf16", "Generate arm_bf16.h for clang"),
@@ -228,6 +234,12 @@ cl::opt<ActionType> Action(
"Generate ARM CDE builtin code-generator for clang"),
clEnumValN(GenArmCdeBuiltinAliases, "gen-arm-cde-builtin-aliases",
"Generate list of valid ARM CDE builtin aliases for clang"),
+ clEnumValN(GenRISCVVectorHeader, "gen-riscv-vector-header",
+ "Generate riscv_vector.h for clang"),
+ clEnumValN(GenRISCVVectorBuiltins, "gen-riscv-vector-builtins",
+ "Generate riscv_vector_builtins.inc for clang"),
+ clEnumValN(GenRISCVVectorBuiltinCG, "gen-riscv-vector-builtin-codegen",
+ "Generate riscv_vector_builtin_cg.inc for clang"),
clEnumValN(GenAttrDocs, "gen-attr-docs",
"Generate attribute documentation"),
clEnumValN(GenDiagDocs, "gen-diag-docs",
@@ -362,6 +374,9 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenClangOpenCLBuiltins:
EmitClangOpenCLBuiltins(Records, OS);
break;
+ case GenClangOpenCLBuiltinTests:
+ EmitClangOpenCLBuiltinTests(Records, OS);
+ break;
case GenClangSyntaxNodeList:
EmitClangSyntaxNodeList(Records, OS);
break;
@@ -428,6 +443,15 @@ bool ClangTableGenMain(raw_ostream &OS, RecordKeeper &Records) {
case GenArmCdeBuiltinAliases:
EmitCdeBuiltinAliases(Records, OS);
break;
+ case GenRISCVVectorHeader:
+ EmitRVVHeader(Records, OS);
+ break;
+ case GenRISCVVectorBuiltins:
+ EmitRVVBuiltins(Records, OS);
+ break;
+ case GenRISCVVectorBuiltinCG:
+ EmitRVVBuiltinCG(Records, OS);
+ break;
case GenAttrDocs:
EmitClangAttrDocs(Records, OS);
break;
diff --git a/clang/utils/TableGen/TableGenBackends.h b/clang/utils/TableGen/TableGenBackends.h
index 33a06bfe4469..bf40c7b1d18f 100644
--- a/clang/utils/TableGen/TableGenBackends.h
+++ b/clang/utils/TableGen/TableGenBackends.h
@@ -106,6 +106,10 @@ void EmitMveBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitMveBuiltinAliases(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltins(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+void EmitRVVBuiltinCG(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
+
void EmitCdeHeader(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeBuiltinDef(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitCdeBuiltinSema(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
@@ -118,6 +122,8 @@ void EmitClangOptDocs(llvm::RecordKeeper &Records, llvm::raw_ostream &OS);
void EmitClangOpenCLBuiltins(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);
+void EmitClangOpenCLBuiltinTests(llvm::RecordKeeper &Records,
+ llvm::raw_ostream &OS);
void EmitClangDataCollectors(llvm::RecordKeeper &Records,
llvm::raw_ostream &OS);